var/home/core/zuul-output/0000755000175000017500000000000015157262653014541 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015157275312015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500001450135415157275212020272 0ustar corecorezikubelet.log][s8~_Ä6o Wj'8ƹlMW &! c`җ$%ˎ:]ݙ%:nՎ8~xxu:sԭj"r7nD9s~i/x]ׂ7{{{q%|\U`3u3Y6LvlĂ+)ƙ3\p^:9j/slkGe[x\ 4sN̙ޯx&C݇RAs̛R L7L]}/D);j7+Z{p2WKwv(k,;Z֢]Rn=CW"ϸsVY{Ki<Ϣ(t|!D0g6- ,Z˪hgtsQ5[Y/Z4sWh׭dlm;L=/L@ zQ27N/h 39~zYZ#3=zqDcϝ꽙/GR6*%:B|gaF Mr`] % O/S3ټu.4Xnʙ`Ļ6-a Y)~ O8c5YakmZ q.Y=(8 P(~Qh(MFd![ VPu[hKnW(,s 0~h~j%.^V6 #&a@0+f% qQx鼚󚿂`bE=t6ri+`E~|~3Q>B,kv :l?Wrj~^q@wùs0n<NKuO^E -^A z{,\ F}+4 پ3NY6笖W"`f(H c46g2?Y[<;I'IAzuñ[S0Xdb6oc)gןPf-Șvyx 6h"[螒v\hFۧgQQH GX ,S1۹4~FZy*5 ģ{XlU`EE7].wԭV65Qx)oj+ϻ  ~i[41/aH K V7(ٙ(Kw2Jk qFTp!Y~ü&݀ՁBb 7fIb5q!aZ,PJծI9nxa%( ^۶aA v/c (g& "i1 •\bx/ۆ]&C=mSvʒ 1гoP <%w+`e55k8 e-MWBb9d?sV49.-`ۏ 5}@vn]-̽o&!=nd#p _/B,NP`Z$#v@}q,k7HKJ+ ys-rMɚ%V]Vm$XUtQf- N|46!j/kK^b.d ShHI`),Z> Qqn5Fxy),++b/GӖX8idtT`[1/ہv8 z)Wp_2y~ \~9ʺ=(rVMܿ &~F qW'{=O왎y;/{FBk}(·_FTqVİ̇y&i`EY> 6U &VZlZ}K)P\rD6-JLqh ^$/6JX~%W}/%KP!gt\\6NOqJD馒'x{?dvdWr _ ɦc%[pU F2p,Y`V]]3 B 6ApպT_5=~Xȼs83nk._UN8olnH*ՕI]!5mL%G^[p\w'ay#.kuܜZAE;rTv_BFuu~='&[b۟Nɧ%])cFZ%s3RWڸhkҾV:@ MDi,`W^|ڢ9D_E` ;TƈJ61Y {J["j;gA_L,Nr+ᠦf{C۱ aC׸6'=A؃aIL}#\3,l FqY]1+eWb If#8b:;ꔉU׬ruK{c2$\\RQn$;ז 1B^0e%當+VUΜ3Q!BZ3Ĝw Qw =?Ḧ́W Jq$lhKhܕ`0"㦉o |obg)+o"h0ݥnV wL 58&g;ae&@JpMB^ߓ ^v EIML\uVrZV0Bvpn(r'}g 8B M.(I΋Q6tfh}]E6oY"ݴqY%,IhڋVKmn+&Wev F^S./e!LI[ޥ\͑)rOؐK4k2vRId 6b;+VU,ͭ+,v]"1\ZX'icٯem؆L;;-Z5_h`3' r:O1yi+ ԣ՜-lt41Yz c7 Ni#\ 5rN1߽bGV}E<K~g(gHi-X)\5n[MLۺ,! ƾ \ԱeˊKX>N B(I0wrqn%#o^BP=oEfD񽦇Z3Oz-ݔ@ L{~AF;1Т;߻Qv+}lPJ߽?7LޝmLTF7xHUr*xFը[%Uk_(83c9f9^z8ڲnn-';\j_f{y.b~w5g2 h  'iY6 IO4γ' )IҌÅTh&,$(7[\j<7a7.@j<~ u@Yx,R]F,5*X20z_yI=G\YkHp"MEeXú(19ۢ'8&]l U\'/"×8a$6 [UKn(LٺKg 4? L|}D85]/`E <o:sS,3nE͒D)pK@|8 8K U֙~!j/Ǣ2~71O+|p(,дYH2f|c 6ihZe'ش֜Iѿ PTLkZDvՊ M[y>mJ=M=S!:f s|CTZ4,2HM f.exPtHx9_.yT̴iS}Jf;b^m ՟gYSc&&jEgЦ)ޠEºrX-%ѫJrvk\YL~GeibU&5fĉ'mrv&NBb0WP]-$`5wOϨO PبplvwNi7AQ/ Gw \uKȘN[]'H}d R? 0 R?Jq p R?uԧN.|G@R # qA2:4@TSǑ7:^pswiN!~ډ4Dzډ4DbiXj'QDJD!ډ4B\khE#H#j#%.#dL)TpQf;J-#%UđD E)IqcHcDT9j'8U;!ID#XH?Ku9DH8҄cHDgH%_Ǒ VGJq(FibF;/H)IGQ̑Rc#i:b!*Y-P΋bcY;hj'Djueډ'?(;:u.XkUˊR13ukgBWv-A/-BdKx>84OYNg|n>슉܏i[a1>)GYW.(sS6z+DzT*V g~+gj~&4\ˡ#Ra44qϧ% [@3J$Z;Ӻײ``6|TLJ޿;>_}xtˣ_đ C0J Q8" xȘ _-T?1#4 @H{cQWMF?d,1 6=(GĊFA2at)HۥK.v1,qk!I1x{1# #(IcF⏑4F(=͵ 1e 0 r@@6chAQb˳5{AƑP\mH8RzN>h\|?#%c(L#?Xb2`D5QJ#${8(ŏ@V A4A;HV UBwMVM(wFwIo;|! " @qՋ ~h:C`4"|B˹~ !CC Q~heϒa$CCAE{!s?4wQ*C0SFG^`efQBFfґY"-B$JR]ϒ!F~X(~0'3l~H,C߰!b樆OaN6701 )9 k)A^KS.1 &դm=X=JĽ6$'AWt %xT]iģ;J4AWБt9J4AWqta >Ji+^菒^4wĞFoz0NW::B9|1D]}*tn5koݔ/NƑSv`d=صϬӿ90M nhD^p os>~8޾$Z;~rMC Zkj(P;a]^8껀vSv{q$ KU7uwkHxY8INf?̤\ ښHx\ 䇎6mvw&eIt7 l%A>rN'Y6h(q4䳾V^Qsړiw4 >K=+|G:q 5Ȋ-ҲZIcV7wɍ!Qi{3HC np_>ՌCː9 &'uHِa,CЛ[ H6sjAx cyRu_Yz`I;wP=) t {q@+l n$^rBko`#\r/A VeL&+sH)?tc|z=J@TʖD˘1j5w_pTs pKtcq 3/7'z5/VRՙ }[#Gv65>pMFVW$`$MVawUz0wy`@xAX"}p;g^SpHu.&4qkBOq9 WvM0"ɮKT[D+\h7wN{?~ >z A1 $RIXN`NJdyh"$ͤ 2$I*R&"a C?3ijkР&PCvj.#~yr5G0E>UBtR'( Я^vKhf6p7 e%ooFn7wٶ,Ã] ]6EFvSZ0;h >z>)Aa%qR?\iSd2Gebج5`rb2a E!(@-U6]]#P+]mjpiRCqe\;/X._۝???5[?Spe~Zzf`|h2)嘈) XqIQR )h /#ϋ XFh3u*L N "bE!YB]Мψ rNd-UH@4(ؼԸHt]~٧ 6ZY53Kf.'ÆRùp>WK cOP-؃1J:hیzF@4Bqv58ɢ\`{i $ ZZ؋<]'S'E4J9xwHl_EyWMLDfw]X闰z7I'OT4Ndj -yO mhO,R]?J whX4}*u ,8s'.+\Klw1]땗02W˲Z*<}nSp-#ຟaϸ<=kc&`2p,>/Qp ݚ Oj,i7Qn`1x,Sz`x{r,Eo<8<0LMeg߶[10sp /lW/uff$z<;GXL UswpT1uJl8C~߿5AV(MQ5g`6 6a7NUiݻc%/G xo3 @~L;c}ƞZ#<)]ߤ: nV@+;C^BgMe٘m/\,*m7zڼ7qQUq@P6ZuQXf׻y噞JHAƶ@xGOTarz52GWgP٤S4k{op5[VE 3[@CԇZLi 8нLG:ܾu<  7v`Ĩ- A3G>|s ^&n K2-%L:0Ȟj]̚`|A'a/S [$`2|0^u3Sa1N,|zwOץ s-[v=5¹1N76hc07 "1Wg%`8>B*8.P]8^>'=!?8!ppE|qނWQy|4S020wͲ-Xۍ4a!k4nLpcs?75Q$oG+wAШSft ._bX?"`U&Υ^A7r^ N ,!^C ?Dge"9 q-z0cNZg w5ԧϴ pxs,˳A0|Dxbq zP X|޶#fvj9$#*qkM@5"͆ # ;~=+Q+hUٳ`̽ ] սPrg>0f2|r3/isp.)Gstvn/|w(nXg(G ѱX }FO!݋a*[he|KnԶf=XuU,{1urlHf":HfaO"sP9ifϒpʜt<֚ '>CN 8m:RX4F^w4N TA6h3'pC>W0Wե̴^h\U#sbkV mtT6DH,L^w&S$W )E HC)k:GbPRʈ_9 _쿛zqysmb?t+Et{b @ӉN;^PJ^"{>Pjx2!,AwlZI Qr,;ija"FWB!{4#~P,c0Q{ YK؞ {gXg{l99b{Ǻ:{a)PrHXBm5u?KрimU8`NקޙhyIq'5#5&*9eqrQVr=ÈlU%.XbJkfgM{kO#D/mf{:AL bxʭ.+p.7jT>CQbYhXla/Ư6XH) 19VIp1N_|;lDDӪLpH*hPn6a=ٶʐ{GϦGYe$jvڷzk=1-Ɂ=WV|Ϙ֘2x3 ۛUqt+QQXH DZ2 rGڹ83ieHF~n?D!ef#S~4 xHxA(әv^7iqY331F 'cf7Y!¶kpm<-$Σ..Zթ.7fszu՜f1t%3'끳*![.<I4 2LԚì,3-DReQ9اv OvXl1Wq$H-]u*(SŗLD0 t5'_CLI.T!')A0gѧ#E4y.ZLQ_DDAH.b0Y"}Z"sDP2-ݰS ]~{T107YSyRbړ}Zz5ǣo @}2-Q?ByY Z}Zz1-kUix Z*ѡUaCLiɍ7Em,{k:u'vIDNOA,qh  5f"<ȲH1)+H'pr>]ۂ/ky4`UM_Ҵ,T./#j"Ȉ v 1E(.(%GRf2Pف.hܣ́Md9|.6U8LڀQ3 Ld q|u']TP̑SAĹ@Xp '[MuQAHgJƨj ŗO Q 21 -+beaz`3]x75+M,75K N /6޹9Qʢ:H$mc(xWY |䎮d{tU:IQC=c :8R:qF+>I&jd,x [2R6kZ&y%+ޡ<ܫ]:OPsuawz &C!\g?XB.8,S<#gÛ܁Q¦$+YuUB̼8vA`*(! i %+Ϻۭϒz]˔ܾ>RWR;s/ wF˛e Tq\v\+‘WxWXҙP~*Xr}uuA'4yV67͞cڑ38c%pPd戴Į䚡d'X]Ԇ@1:Sjhݣג3ܲ;*XrruOYG̐do$p,Tt<3헻?>#%0Ƃ_]FjE)H]L Z;7R1lM30*CӠ_w5$T_]ߨW _c_KqfH!JSDUrdSTDOJgX.*XPXJ-"n-& j% -OJgX^OUa+$."m)3":`Үd%o.a ĹKQT2 |RdZ;B1 |b8gGL‹ t~ՇG].>THsy"TTփs>S.jX剔%T׆o9WDR?..|3D0Iٟh4EE g"r D<$Ǒ1)Hƴd(_X6*)[mS\8_p?{|X5U*ϰ O#2Ȼ~4@vͮ.~J[U N_TRxᔚÉk\ J%*p.d؈CfG BZ8|'S,s%+] `¿6"®9Uvڵ S֭`s.6 P *w0(=BVђkU۠`9`c$Tx#ste5K']]gA"ڊkK$7/w?'NzuWXt89eĮPBRѡD]])=?*X9Kq4Qq@rfW8eQR㬆Q?VqT/}~Lm ї̱d :LyϭS4f)F$RKV %S*'a!;UMJ$:~B^:xmݧ*X*1CAT Dj<2aڨT4]^]`iKU6vֺ3\yҒuPbb\tu$*X Y +S?f<(.>/nc,gPRِE@\3Fxu䶐ȵd%KkU75Px6Z9{xb'%UB,S98>`q,F=ghzFYgs%ȏpAu)cd:+m.ηm Ԑ 4$'1< "ny(+XiיQ}j*hM swDd SG%d%!,{4KZUsGv|ƒ WW<{/T>;}j`9CTA<;Hao3CrqWP1V퀘 #O Ɗ<6 4I)ej(P`WDt>-JG EL%ˠήM8j荒0PK/Q J1/ (KBΒfw TX:11xٝ]65|M?c ^{IljpPgB3*5>lm-=a"N*:=d ]2S|x+( 0ND(/䰰1/<%+] +MK'F`c!j C䁩QuJ[SFS.'8*- >2nD,ŷÑ5 ]q\ GpJg_v(PJ:Φ%p_Ky="d~LYFf^z'fh|Nf[OMsQ`/Q_"J9>Q,XsU0H@h{ ,‚X-1Bjαת 3LZr²l\#\_BZ< ! 9(,6ė|-X SQ( /n* ̣dl"$BXBsxf}h`K/ ،%fsjCg+ 3$h_g[p_^hqFr G#'ۿXV`l-,51 \8;֥S8KV:RFWkh; ϱt 3䔤!3ф8Kh=3苲 TP3dέ~粸C`y7Y,͟hujƣ`OfE`e4Q1FKzO'S>R߈Y+LYK246?\ƐaAua. EOd3XƞLJFc)mSJ0¹몄 @<n4|V*XJ ˞i'dxԛ#m5ٞx3Z3QT)bL)\+*Y:geeg 9֢$a*_0άĭ|Q?VЫXw5(SIqz0W #)q>K }J['O7;et wnY;"#Cs+ov2 "&7S&.1`}ow87s`\+w@aǂ֫]ؾ&߰??&%P~ؾ_5&5mmfӧnEj7.6@lm߾zxAfWgBӇ8l=oQ_Aw1?f@{'/ssG%|OdA*o_l{,AnormhE2VC-9^`e 4 .i!P9#sPclnn*Ք7gp^6gwU3%ŵo?o歖g㕦pinTQKR Y rJV?P웹s8pU?3ЉNs Z2cg< ~F칪Ў3)s6>`7QB[Y12թɖQ$rvIm ,3 #Mhd%\©`.1pyPqUK?޵g4~jd=~yTK>99Dr$$4d3 Cl*XLe>IbwE] ]1 xמ7fVŒs٦<6ˮ<*>%K@6yG`ۻerڦ @{>y%ZH KsKVBÞmc}F 7/٤kw48Ts0b-:>~h:GD֨U$uF喨ik`fbuPǶԁT\_膬ݠZ o~ sr+b4$16lry^@]7X Dh©Zd%,勾|j.- [8b.}$^ݘc^ɌzEjoS'H5>׾M 3P)O|XOKWjZ\/&=g~x+7ʹ}I>=JzɵU7JUYsþ{h㢉i?-Mχ~۽d5l5@x#9gæ 8^"#7kn }_r{3|?l?UAWd7ULx$O2(<~_8{?L{`FV?\14JzVEmc5lxث矺jwM8^UK^DgM~ dNlÓɂ3Oǿ_/{WF\U8:nvnÞبW X u^!tuր_Ф=ZQ1Úeч~$ ɋl"bcpn BGzjc'gv6rf5JxuZ8qdz$t&WS'Ns=K\ߏ(шޚBc.'EiWXC#^ J xy3ɶ˿L.6tYfg( Cc|XuRw^v'/:Sk·sT}u1/ݷt47g/xo-גݮ/'ɇOwd?ϯ 8"l~ys¼Y@^FMF79B>_>ȭJ|kP=}d .uFJכn$IjǓkȈ}# u6'%0<? vR)|W6iPb mdcmuI 0kc'q̅e _eQtr|_x1;[uexPM)ΧoqUtž ŪQe =~o^0E:= ojƂUޣ!9p_j{q_S1Tm|z!qV|s_5+aOrĪz8񵲾 OV0OiA{=%wfSޞxhiܭJ}֬n6mF_ G*,Y5,T036O4S68Cu[_+vK0e)Cpdըrdse-]4kߜmVL+-{74-@xF5ugVwοzJ&ɟ5偷WcۥV"}WcmƜJsƂe0`!Xo2HԪ/l# {Oq&$ík|UĔ{uv79<5/-+n8.}rNgfT?F5E)gc֣~6{-#o *ۤy6_VӘ̍ pM֤+w6BrQ4{tA őt5GO/cc5YPe[/ auyo܍@S#ЮoNuz/ý2VY>Dy>#橴c pדa][ckw tS'Ԛ{>jrk3#V-e=î^[ٙh~=u΁!rMW0eVːj.uヌI fݜx>P)J['>,j(=\J }kAC{&4 Rϫ^qBtdv!!󉰴%xJAL[Z.uRS sJTKQ^^ϯ?]!:2y<;x瑻ΊU!YcRȥaYג'IzJ WbA*jNxڢQ*!W8sCe8x="h\o)Jq Omo4'C$+!eqiܴ2 ox*gu-VURs%B Jh=ZK*-hxy92W[2ިd%CK>]żabK(wxiu. ?Ͷuf̨jΌ%{YRO޽y5}MTn 8WnFz+p'FcuLx^өDŽM(ծ\E)p"g|LʬR*֐ʹ~?\l!{#; tr~㍠ . /' yAUgk G˻ &T+4?N&o]*UdWÕ كÃ;Owe9^y]o|iRȆ E@WSPB. /G/|22G!E0pul"x@\y&| 4.zZ)2kϯƩ[y4baUθZ!K%X 2bqd!:SM4S2$Ef[bHrG*׀*RbQ MQbA Zօ(T |s,mկKPid8{d'gv6k0gb8> 'DtSEH /jYMHݥ,i{6#nK51.U_g5zwGW{OGDHd=YcA6Gڨ@NSx>8jݰJ]5{N(Ԝ9MwNn#At}NnrQЭوZi.yL4T#E .n"VEi4F)a|#UHP'!`#:K&IIcA(h.O2oWh^[k#gH6Zm8N+FyiMAObݭF\m׍Մ\Mj@j2oNRO&88=ew}KnZL[;ڭ`~ʅrߧ#tJ1~*7 l%/Q{dƟ:|Qܨm-|n~St A6)GMyAA4v0p¢bJ|4Z8hnV]GTw^Ќ`:.nhH.lXVHݒ: s( "dSLVW+x9]B(h&Gfj<śŧ'@7.uBlIĘ"g⹍jB6ȕs@@4J@Gᛐi5(d2B{^)(^)#uhhўpS"Г SDFh#ѽg] oǵ+  bT [zk$ lƒ KJJN\٥{w EoΙ99`*"7@du*ZIv B4h7blSvb4@H쮴 `Aݘcpэ96NgaDW,T"ܸʷ♳[Y8khPx|:sQh{2|sͽuIs.{k3գKxuTk?n,OK <E2jMYOzVImg<٧gU=U@J\ۏKq# KܙeW mgX_ӻu">f7&N@: Ӑ$K,cCeZ  >}R$Ճ!dÕI8R hKp҄) ?(ŷ *Z %Ȗ .Y"n=!Arf7p].(&¡`8La(lZ?\ Ad p.8/ 0V0M.X3Ά4Ն 5dOQs-x?g F'(i6L]j+T[ #\w#fGyXI8*ިʑ TXYW$t-z 2k#)5ewk!Ǹ=ek`f`/x!:t^7<&KoӮ/jnxwŹNQE,RSvT&]#>Rma5~ɞ=R1!-XossSǑqQ|gtIYGo`Ӿ>JlV<'4SN\'הЊnGkVĚZq ={E݅'"\Ń15@vbDd'&X"yi7*~?wd_qk ^K[|KlCtKnz?0|wV1gJKd`~,;7s4X/gz"}-uz'Q\ zszgeYJ:ߖ%KoڋmHof"[fB;[C&ѻkMp2Jn/|AqkݻO͞K ߸^.?{mPVzߺ:m˓F!:V2aTط8EĽ-G\[qqZ+0 FІHeFp=9XcppR@Awo팖"V.8OFK^J.Iј^ ɞŃ/y3|s\^3쾏;)Juj$}[2I!S?ɟEy?ף˿Jo~m//~̳k{?!x'`%~ntu^`;cYwj1; 1};Mbac(x<]Z6vKns8M0Xw5SRKrī!8jH.Jt]VvʋE1D/~^j+d9Pqb=dj|;f/E6 |9hٳ<{gdW?9g_1 7ԵtO,˯{O̘'8Wz|j|V}X]r5ÛbFI? ^tvrZBp9_Uʽ(@B<< :q!r3{L FO~p! hw?>&7 |0E'1eyN&˛{2}wᷲCT7Qゥ/%~$k"7f]s_g7cx4H7[V.gr;I(vWxdoJnr'^5xKWh{ղC l\:*ߏ>G<)D?)Rs)OʹZ/@ao{/2N`vӻ~ hq|⧦'y]hj!|*Nzu /V8姊|M'Hn{_O L}GCQ_rjM]Y= v~ ag.~vc:y />?h]Uow.S҅=قH/ȓѠX9-_+-X~IB!5{{Y. aRV"U+btHH)05E@Cf{ar[_|S\P |W-PUuPl (va7l)WΧ|v8HD.3+HDZGW$ yY](9ǥVRR߉}hvm:pb,p&Lk{M*$8hیYi׸2-Fjάa$C=\H†< R7nX+3: 8'd%a`j-mt+ ؐ؎#qpPjT.KS"%j%vfҋ |xl;r"V21;;OF7+JZȯ|Rk\&LKLnh,Dlk9,kZ[9doko`!Dj؛ ⿜ @,N^))3 Ru)UZU8Drµ~Ymt#6p+Zx~_ :8!-SX~?q+2ҍ1a\D8BDT!檭\#)n$ BKh[ꯣ>=%^2}˥lza핲OW4wV8 GyY m@)CK&ѻkg1z<NW>Jn.kl^`wpq=pG87z/=}u%n:n],qނs^yܺۉ%CnD lf!9eiP$ħ4UpIYaaF/ z7GֻmOFnQR.pE,.5~СHN8D c' ȶ<%(PShý ߊѶk=D, M[&2QP&Z0踂VCW:/!Yzv'>1U.V8&8N.`ɲ"/Api3]׵QbJ+E[#w[iHi3W-l WZ|t/m3u`#=uL: s`& "r0Wp ձ±E b B4:XƜ:+8k̩ԟp !pXd)I+\!2 n_&b[)y7HC=)m3,1c[by(\@6v1 Bǵ2Ph8>`qJw/Ph9)2m56Np\X¦+R;顨/Qk߁JwSLW'Tcܯdzk"$o'i8QkOp|]xZZMB:fDI%hn.z | |lb}Dh2Ȥy.ijS L㗙<}E;[Yόgwf7IyVuf)vϺ92`ӯɻ  |hr\t`Q.b8ٛZնۭv~(]k'/%iuݲ=Q4ӆӀT! kio]8HYw{S_hxV7q6E@d}c?F^h dvѴXjIl%@3=UEԩ)4Eĩ1]ziB ίgiTč%y4έMEX҆wpj*MENm|`^b*؀,G'SUBduZ;]s[Ujj!R505]u!AM4ƭI B usEd- 2֔=Xej*;d;M\OdEgxNV^.$0Asyn0r1NB'̔ h#RXe}dTYK`f=FY_7߷L3oXNEɫY_ְLMzj`ͺLm;y$Uc7&BQJ/^ىe3 ZH;Eqt=,bB=}:NJ!k7r)$fO[{ܓ᪹7#=(iqhź-TK`IM z`R z\²-+8_㖠# 8|2cl팠zHl~tC[Aq?kzdQ7G&|%f!ny,6woVûB]߈>բ_piLyxG5r)BtSv jʀ,԰.iSRup \W%@Eע#jWp: UIkڐ):Rq?L3qt(S7LXBLj2W5[ߞ7Sy*e*P.)@:so:]Xl;':%(`UE39pvE_E6?3sYzE;e(1ǘéw_pfC>_|:ضDa5Ǚ#Fz2G[x"֔+Ek<Zy4GHe1d3{Yz LL.3{V8P{pn=Ԝ/}&_CjdRq ׵2Az1 #jt2(gRh8EE7//0f2(*x:jf]Ui=x|lnb~~]d>nx z˓]_~t}^W}?ݵOf3Ee 1'ƷT=uY&MRt02&(gr@xZr[on2z6G[҅n$0UoDo$p*/D{ۻ,CdH/{o^kwA.\wg2m7 PK[]RRRT[ƭ/!B2&<+u[gj\!w :uB*0D~ `5\Q􋀭z~h]m]/ ??uudϴ~l6 Uvhا XfוBN#Q)eOhr Z_u^E N(Z]=Z]gAm@կd3>gBUӀ*"T:2eLZrVdkd34k5@S(VI`:e/Dp-pX%@ BIgʣ } zO{8U!f7 h4e7:Zc/!MXUucF7^Wx5*kt"^13XϙyXUՑ#G:X'~&a_sOwuZ\EdƜnϐ/xL(_ֽao% =y?Z2Gy%)tJ6 D]YC "f{>nv->ptm>x»O}E{.]?>}s%k3Df̎[_L3yzk\=owo+/Xf K )ݤ҂dD oVi8dR L-O iev\ S]3AO 2֗~ w<-(D^`zl8 Y d!w{2;d2!AGJZ.E0uE=j_6O[/F62xC,0Y`,Y`~z&=d,0Ȥ0&v,K$> [Df㦶n))M>()I7 rVCӜqɣ)v iWƖ&[-N=͘wY ˰~V,u& z]b_)UA5m4m0]4/~f}b\wCA?X^-3|61V~B3>ϘI^xCm9̰_f\=3&L==8y\higLA1͚|K~Tª%pKZKfV|3йkubJ`f':")J`9GR z6ꑵ@&]|X+ \_}F>!^F~6IެCk7Ǹuix&}&={#w1XWU`|wBb;VIZ)z.BXhS-0BE\e%%hY3)<9B䣾}•w%  #:eMm/eQ&<˪B2"R{`.0b4 Z]4 ІL`y'W6:mFJT'?ZyLfS7zE:]i=B]}Mf~~x=n{h.26nV5[_<7 r}է;?~_Ż{MVð4?Gӯ? Q!lbhwp0$oخszFӭIa ;ud .`\ -$ܤڋ)L031ưuDgm} D0sEwBnDh1^LAaMJ/3#YQK"̅g MZCpƾ,acLM:Rgl({#Å\H`ƍm+A[*Ke І x@$8eGQ2Ug2q:ʔ(y#yE(Lu !e }a}'iwYǟJ#>$>nW-5å1ۧodmfr5֗q<)DRhj\;șDDUOG2"g8PˈIH 6jD$p$Ժ8v1feN@D0z.}"̪p.eIТ&hC}Umq[~V}f ߽m6QRdѶYv1,=k.Ds/`/kZ3}\mvZ .$dTK^QrwTi'c sJ*#~S̤ +!;զ Rײ)h5YYi2ų$3 Ro2;gܹ(zz|2Z߰1ɶܳ] >S;g 9Asz24g8U1n}3 _ $[k2g'|/!|a25_Tr=<1тǟ`G~r9h3+ &BBEN0ugzȑ_ EHl0Ary-pl_ɝ`%eKj/,O"U";Tc`VPe`F-[HCudV 9ҽ:Aq[C&;cFL=s71Lpovtь>mݱ7ҚJU04,yB,JGMU(h n8'n'~Q<+>#NtFf ؂ heGf$2VJ9*Z-a8XҀW+~ wǗ߾/yd/CAB9>t]`%:ݿЫ]\sfq$nXcJXuF"H}ɲis¾5V%m[DN6+=1-;D <"60 gк؁Jfԃu_Ͽs:߽kiڵJl8;g!9C9Ԑ^nLG ( z:xE;LULqNKyܲPB&Ԃz>뮭t^o)N_?dKv/ZəaVX^XHX!zbC]n-X bb%'Q"ItIWL8Ra !M=$J";QG#$duo󮘠Hl24 b*J55_B;W媶NV"=oUfuB6E:3d+kK5>DGΒ*hw9DhS=Fʮ`kʑw~HpQȴ;?$#֖į2RK_[-1f*3yh65*<#8fk\g7?9k)mR'Qʆ<ؐ%mޥF}ާ69>Oq 핤eir-L0hd"\cZ$V)O!ߣ6) Cdj=Xi] *l]ݥu+0␳2TA\@QcoVzwq \%Mo4o4}4"\G>@C4'D>n0 YEwV}fgGU[3*JQ"5uHkG$hЄzвܚZ3#) 뼚)n"ŕHNl34=6Djk}bf}"WPL_mf}.WãDM(Ĭ/Sqr]>Tay<:J+;k6Q.FVdxѱ؃.zՍNf_zAqUI{+ œ/zvBM@\8/>?#e0U'/&$sCܜpa=Q{9:o'0c;4&5;O@;O4|W՗І' b,b`V#5k5A^3(xm׻M㉦䑚A5L4zj܏`QIg^V 6[U¶G]%"R\V7)>?RDiOH'M4RMh`_v(G*en}{EyAQ&[[&22쑔%Qtg jvT#T\U0&{fW@P5*VH- l95V!&yx7}qt0lb'Sf&˶2RȽ|QhGm}@Hxb>'ijcq_?{_Ykn-Use{DӜ!ɇY&aA2H:hWID;(u6|4ew'Zˏ(_epDeRfTdI)\,hM􏑢dlX2fTN S/h4%^Јp1R@h@ʜh@G: u'`d 5M1BZF`˙ñ̚ Aڕ ܚ.An쏹ۑ%=\덯'"D{/2wpǼGx >/{'ӫ7 3:nӽoa1Cy?sHi^vB_AedسKTYAUu;n *Bt)АHN$$UGM+&кl]#*@>Z #U(\jUna!'YUm a؛}' k(0(1`4,qXC+  mc~*'E\[fӢRrV'0N UF2p_ܓ7` \S$u)rU !EVh0 ,QGK vBђ$hÕT%Vn]F15j+בWH^fK"R*j+dkfx(!K^05/[cʾfhN;:$y.T}vhv3ty.tû} ]D ]emkطV3w0eY\%o;$1%Gr"9%ѼLeI.N%$M%#uY2(8+& Ip9ش-!IhZ3!vveGIsA~7)euQ}b\>T^Й$x<`a wQ.F}v8áWӫ l8~^uq=|{+ œ/zUą1~olJۯo*wJ7nv;wlIv?Mܛ'/ߊٹvyc_~X>o^tuo.ךJYOa#Pb8ZHc;.̊B#z8棉#BGvRI N(sƓp4Id) l'e2R%y sKjJHI of&()Q5 tXg`qq\6آE6MYjD@O8s{i3i[6Vq:o.ڜQ^1r\ݵ, `xT 0(J!M5Y.iz5ń?HYԝ_J -dGiK.Oc^֠7tgIeHQvm)LTِUCq1i>,Jľ db;)vƿr+5t1tT9 2s_sREQ rCQ/\CVTLvzԀĝժ[J$JC(0Ki-O- MgӐ'hu(RMC=!r^iĿ`V WZD_skG_edM©_edvǾ`B%g.Bk0EhYŠio^[bčGJ(0"yjEEj N#]{Gp\ڻ/NA|_n%h$K΄P]%ʶvX,?+ .x4!!:ҢG_Hċ IEvr6:w1j~N.fFn487{RP`j.trG6uO+N)\%=C19A:C"wi4~<}=_#DZ)/2\R@B˵+w.:h< u&:? ڛ0F؏QwQ@!d!OU?ׇ6M.>i >SyzKAuTNgߋ)n}_WMuSC3 |:?E)w/D~R-U8<~jN8{rܟOU7wClA G{%+j2!l#C5o VیAnaknʾfֱ " UKQj+" ħNUL1f*&w~,%}El%t'ͨWΞ#ȒEXВ9~ݔ=fekV`9\qJ;WNV"=oUqrG@M! *7*7DodXkV\b [6L1a NsA)DT2M[G@C"J>:&9"~turrXM9 iir"3oC44Dj~R岠eMVN?rf#޲rCio)!R^~#4DJ@VD K!,uRJhs_XtzWrˀo? '0!Pbb04#-ؙ[Gyw0i5JxnHC`N(0Bd>õW'krKAv^T"Qsz 5UhG?5-/?_9ioO`cgV?gSvcO+m'~:k LW+۳{-hy U#ZOM/PH0Y*Ǣ\&o|C,1zW@C оx_YZ+z%h8YD嘹GgZ69F{V<4yrƥYɞ, 6 ڏfiEm 0bZK?zÅx֢ő{9>t h%za?Ж?ʊ̅ΙϦވ]]W q ~ջ_"7`E= !>Jfsм5j?6 Y}1ઽ.cΫ^9dȫcA+fsϯqo޼sϗo}x|wFi'ۓsy0ಃ@r-9**?zO >YTy<~Y1u Q ژ +:Z.箭ׁN_?īumq_¦{f JcM,D3(;2#&UQj !”pM Z?` ]Ky+pE[HhW{^hq;vu9Y9d%3m|Z yc_8VKݹ2]vwKj`D(MwNh x^{'Gc3J8bM{I-SyEo>xN&/ːww]83{.V!q'_*i סOUZZ:ͧȾXasWH#s U9lƲ,QVTW`j;34̐jH,k e-?tw^ZL^zi$32[:a,yH!uk2D<  _}< PBw,1C½8gQr qO#\11XdΝ]_փZA"]_ДB~HWJ9oɹm6qfKNzTݡl6hhK©[ .pSwxok͚Gghjm2s].4#Pn kCM~Sz~M^ [yiM}at$lUl'67E.^A#  1X}VMٳKZ0nw8S^ߟ/ E]+yvk Ҷ۞xk B_!U>@nִ9}1(Tast b2CnDh$4HׂWi"424R(> Dh$dhd.W;﷦_o3nia>ަ1lsԷRK+e5~kd!i ID#C]퇭ir^ɋڅ_t_|нko\,t3/b \ )1/>qΛGxʠ{%x KD&ڌvW-mwV7"_h&OL1&Zv0܃).6`Ge ?"k$"grbC5iFj[Sar0e[%9L,c5B8LGabi:: Xsq,j3 j(Vk[u#xnX-j3d:ִ9}<nA Qb 5edw;j(VMIiF5_kzZMز0nR.bѡxn-Lq ZQeIkMU[Ջ&2#kԭ9G̠BqO` ~ *B<:%y =![~ke^.CɫX\TO‡)"> [\~h=%%mCD=¶1<˾F<6Qؠ {F"d3dh~y$B u!A2P ]ِC|oĴE ,Fk -My8|^5L4^  0frИD|m< m#pT:Yf1 i4 G!q{#uzrqf >s$Lx_$fKXbkx#;O\WΑmJrБslesA;j]_ƴ+dn_%kMTh,S4,lniIZOBl^-%:ÔrfY}IH9MvƷ_{_uuV{l]7CfO~kU O6Ut͜(npHĭj_!琈ÐQƩU!琈ܿ)-ȏnǭir^6`{D ?0ȏH֧u' SjdO:i *u'SӲȒmx`P#X)O`!T,q.\DIKC!h͛PWB0˞?7m./庎jb㚨!_}> ?woI * -l  QxtB\}ؚ/o[1JR@Pzdws'tF];X^E%2(~}GԾk rFnkڿ׾n%[^ xg1%L1q UXM*,[XWZUaYq{k~UXRzQמ쾁X(VqŇAsv *m\dXŐv\:B1XfΪV*kv9;-CT(s;K`P/148BUԗRkݚ+%TQW_YUtk?$Ii.k:II:TҿhSgsmZb.V+rzbȥO%ŷjՙ$nHtȔn@W2@yM !o AXbuGfM ,qre ib 8~n<>]:c )1uhǓ=MlW8Ҫ~uHW/5B0y(XG *쉔#-FP^%ZB/>Jzqׂ2`*k:H}_}MO9xgx k6{Z*Yȍrag`VO%/yI4WdAJ6%"d^#ڇir^ih؄DR𒄗t$CĐCCn@ h2 жGЋWi4D@ɀ@d|[ѨT{ γ&C߾`>X#&/Ε U~Lqc,i^OP@AYAzzaBC2r^ Ủ(n@xAߵұ[yLk2")T4{$Ss?7lQf6ۛշGF/Wuu}m܏Ҏ~[:լsNp}g@D |M[lr ]}>OW~1N6~7W[ l-]nw)6=׿v1`.RF`  ZI{a06Q*$ZjvƢyV ZQ+Zy\Zj5<ŽN6" wir^C݅D1$(PiS%p,bkˢXFw8A~}SGE6tѵhJbD@.b_ OhBW+_v.KZ Dk Ghv5RZekFpݙ/p-X/vlAARShK7ѭqHb/Z4`׫\^oE8y@%73 g66LYЩwQ%:D~QKC}i!䬣ExR-,&Ka+r!ְӨ=EtN\/sd; h bxIб] U0GL^R f l,B٫``/z̓9;(#΅tX?ԡ@p ]GD%{,H )/W)Iy6 F0u(ǰf~˼N$rUtfᣱ?2GS= oXW9w ҊIM%%k3%R}akڿ>js9Pb JPbI(w Lț9(`ʣZ"}6hHAh"6%[j1ś]0}#-$86ȧh=6 cL9&fkrIT9 9d3II;-n摔,dQҖ`DV[LdGY.GLF bjZHG۵Pm5FswkÓA9,Qq&2Lx 3l!SiNhy7N`La}]C{ *N_|ъBB#<*CvMXĜd[/IjGLo*~g >e{m1HwQEH*־0*<m<CeKջ9b:o,7A1Ŷ`wv$R(@В; lhI:%# ނOR( YqF*A/k7#}4ʆs̻mٸR|NFy6M^%Z(qw .E,gbQ贗 %- Z+-wQѢC#ɄHE9Er6!~%pgčm$S!-XӒȐH>lNi$J#chD0FFyu<;Cen#yGyN*ˊY4iKY{Ғ8yg B R)̲~SsU쮭5o6l{]W7iV37CfO~kv2kKqmklx'o LjW.?wa|\1ERUjp\SzRW_o{tkKdL1 ;_l7԰wұd516n_k?#r7XtB TG#x%4/JhEϨ#X!4FM{F/oD)Yt%i.b˹ia>7_amVWri2Ӣ?~zZS?45;a52:Z囲L|V'4gkex)޳ dv a&~7ˡ꫗1wPb{A׾[@38U@8Zt#mJxݢ\.b5H cˁ\'zta^gz#}vmT&ڗ=Mo?;;;Xl#4چ)y_ڑߧ]g.R__3nMS2jyeBKh;m,Mh$^!ZvyUsLʙ<[t sꃼeE jYj1zeq=Ѡ'FPJLuHNFx2}ەF܄<w/ӂ~ݡv2G覷hE{EdXtE{E{EO{+)V,:-:4d!5xVQb4#Ѣsg뵜-:,:-:w_bk-Zi:Ӿ2Ӣ5о#"k"3vK{n~:sB h^  XS=4 mO:`@Ѕh:\w~,iAQAV"i%Кk%Dl M8tBgTl#w|8( ghIy :󑂶VHnHʋE,G 5#1o1n;iS1Y we?`99@|P4SV"` G3!7 GuB,i}DdAu[7ڥ_浏>.K0q)_5%\꾺9ʿVriLc%ZSw :*ɋ a3Y%=-0DM$QE" Ѥn | a^]o?޷fiG{dd^up:m0,'  3 zo^+8<.Fn}+9(Ā#f0m sO`Xb%0`KZ Y_6DF!ԻeĮO~jB?r={gޟEaQ_U&~@tThG"J~h@ٿĝy%:+-glAÅxtOO뗹hgC{մ\X>SK*/nws|HҾ}L_?/RBhsvG{QE[blOFu"ogj߃SkоSi}._~n LsϚ/Ij^uJη6׵j')RBX0a=hu_E[a 53*t,j=P9AxN ሎI`g;6N]ztU'/,x 7Yiрt/&C !er*,I4-]Kvib}v v2b&tiVGVV^&Cd\ASzR,GjcMS6[={/b` ÇLl|;xg>_ӕ 9ªgMzY54S YaC2EWubg h:k.kȝ5 8imѼ@dAmr'Q,Ȃl/Nu[4 D6d>jyȂ̂VZy,h 53FQ&r7j6-" MAnŢWY4D_Ҿ X]-L7ւ%6/ WXk8j,,ւ#i_lnks1c-t]'M'Y!K"Rl!65Ylu(@BXzDrm:'N<%{lÙBgYNQi)iuLG(%pPpG~X GZPuJ+!5rYҽ!@ {ʹ'\V9\n]wĕYu [oM͜f4@{o;~j0 (F]jFי,KU fpدԇǴ$081nKYq=)7H2B?u ws_3&7}M?w}Z[xA~N=׫87˥2[9k_ZoˊzFtNNS*)b3ȡ6Dl# ^t%mԥ_ia>7_a󜃌[˥-TNwRBn nՙ޵fwLXNgA`ӛyBSY.T'M4aq 2|i;l(ZeICy1Sbߞy:]<đX m!^&#̤|}#IY 0 B&e3CNC|aZ#uPG@GAi9YZJK46'ζƇLi0Sҙ ^3g\Wf"@pJJѦv>ͦ3eSHk%-@EH6Қ*Uc^?9OerN\N$RQ,ۢy,TҾ޷I.Lg`nU %1 gP#Υc]!;<6tmRG)WңTz\R[ JNG/=Jmӣ˚Qj=JkA,أfjMQj=Jo*&Ehb#;{b%ʢy=Jw N:p󥮮|NRф0lz@&6ipwޓֵ7ȝ߉W5]GDiEmQԛO{LK.34l52_ 7=w6>nno}H;ll?2RgΗ/TǷws,IS45v9lA2V.vjH2MNmfVhRV#uMN]P_hTP{g=~en+]&cw.on ^냽f_o' Cp o@-y KH9R/7/r=31p!ֹv<5VC 9HΠS7Mu+4E 4%Nn^ zdgЩ&tnA1͋dǚqSgVgJ+YeA<}KJx:^(I$ǗSj'M;O`m$9:aS 1=AT 9GFJtSTA&)Iz%=Ƥ7zW9KpBN[z%=ά?_vTT:>O;jht%I$Ǘ&/*/5Ҩ伒]Λ|zo*''Wr#yupVS46x: G^1R镤(^|WCOMnOWޣzu[* x:>jL{jJFN_u/}ژ(?^aˋ_g]/׸=}uP :Vu^c7&w"~9g{LKlz?=$~>}w5<*%u3|X{w;;f/y5?ɗʠ{oF_\US|j"DLn;q0Hv(8i`Mb,qwL1y3e% 8e. h^Ĵ YB"HF,X+b~&hoF~: Ԩn5FA19`%Z>ǃd C^EJExɄ0gӆfIiKFUJI+iBS^`e@"*Ѩ:2!'ޛhrJc+3q4DPx cHޒr20F%U&gBiDMY έ|D4𐓉0:)Tt**;]:)TP>jj#!c˗CS^8!GB2юhPH!>Wv0^RL:xz_q}Q{AȮDVu}/!d' _Mg0j^)P}ܷ(T΂ZnnIYcH8#mAwXR4e`}g ѸR1 q21LtRx\CCe`(11dp,`NW-vAq?T!Zm5K(r!~ʫp8D>=cr֤/R@)}1<)}.}􇵂`tg?&f-.lAz hPi +E%PFOP /TƠczB&Cz()m;,YF-%=A Fq$\Gҧ !'ƽً>d7|Q$øNvOR|Mbgr|iwA 2tg_7R})@SvKb3и{(4WZ{E%r2$(3 LeĒƒ5t\u͞C%L,FkX& |ԣĒku: MzU-' ~:m?F#V\ #?rXÕ3.``*1A4ROGjW6S%t%=t-?#6"UWiߺ{,FWp336sLϺ\}/zw{Am.R(J?Wu)P;8]eo[uu]g8[^I^u ms 5 4hꜺ]粕}|k^-%QGRvT5E) Hi${Mʦ&~h44Gӫe9x멌tvVZ]N{p[hbP[5#GP7BP+l*C]Pk2,>E*CxxBY'yt8pCISXYv;B`ђIݮCeLIux+Z:@ }aZɩ(nu  @JpiU5P*\:p)}/ U[vaQ 2tS E@p(" lY+0mF+K0D B`@`(N U>`vQ02SzvUL3a#f*|C72놮%Omn!)=C*sġpxhrd}ܺZ@Ί qV6 "\`+e2CWQLo^eu XbnoiP 41NL vg= ̽V>PVTjN,yp~4Sm4X$ǘD _]|IHyUDhOQP m#vʓB8u i"i&Jyx:-8MA7@\~J}N B " 1)R9w,9*=F9ݾ}\mV*!M!vo3Y]F+wiƽCBmlrQ2hUr,beģN9pI>؁@V0!ѓ LaGtn' p<ño\zh5'm#!d0@SRr4jEfF$4igEqʙ-XI Z򤩣=QXs"ݞ'<AA;1J`WifU-armcf(FD^K:2`۶3u(!H}R6ln0]x ջ"x4ޫ8E' [wmћ|]JEoEo 6߇fEV"} H b'JpK`'_BSS%i>xcXɚW i*"x!d0`HxYኡě7&gqkSk!7B$u'wXɖ% &"vwY-3qq}ؓ4(!1rVm9Yse{g}9d 'x@&#*"v" !d8h=,7.S(6byn8D}a1L2he-r68S*Ն{!; XCe1!Bm,`mv690b~bu7Niq]Y^gMF2((l~2j9./5D;?:+he^V3ʨ{QU;6>Zei͑* o{| xYt젠qR6Xl~!s0ct8w* .D~)Ŋ~h EDaͣr'YJ]`"M:|ax{ڣ=o$)Rs仢9b&FY$䭦b@w>¦c &֬eV;Sdwtm(}rH!|ǘt4kk8et7Bh|}j2] 'b~7M!ZvQ5F|DlB&dZm 2֠"est2 د2T~Xcwa? ~R 2ʸ\yW#;K%B[TWi#Z}iC$8*(="H).Y]ިMnCQ(n:+n>Bf)e8^&$H^C>B?mmq<-,ŬNFV?e '_yo2ǒW?hy\̖f`ٟe4Wf>VN})S&tKc6o'|0y1L!Sk\OnNóZGD_t=ם\M^W)LuOP1lMB+P>*R J5onS"+eq\2&1p!No q"ӑcthܱ/pHer,q,aocҗƞ8QJ2>ctu*o{R,{'Xns-Q,r/ݫ7oQitYUxc',Z ( 6AG2FQf ׵Y?N[&O䩚˯&'J;IZ8#n(ThOȪ?Ò KsRT<}Dre%_E!m߻7Y:57Zu}qqGvqBTp m D!@Om錊R)m}~Ħ#~ZXUQes8K*y+}HVU%+e]0{z֠NP B/M`a>+:6C3dt&E=ҷGPջE@џWKrjR,\-|<.6|~YY.(Ż B^[L\q/DGcNm!o37[o~aߵ!X։q##kmpbc>\k6}E"zqYS@fs00D<[On31ƒ3 }>}m7(HiW9*]/zWY7RȅXmSmhD9]eT#-*]崫vo`)];o}i<܏Z&?6ɏXh ~_N6> nQn]γEM# q6MQܨd< ~.Q~ϒWYj2LCx)Tm.3 {ba5 9ՏB^, Ln?dlW䔒3:- ƣղ}TFx:;+_\- #EnR.3HZH592vkBЯV }F OZحөK]'k4!òѶQYȜTY.8Wh20[➾ Ӈ a 窘0ULs`1qV6p lU-XPgOp-{?ξ.$ʰ40\bhxQC>TkƷo?|jx*]YaU\ra}3P9+ gBmW̨6{u{ԭ_ʷg^0ŧ"L7:? oMI%(F)y8?)?έZY]Vy_],~uO3CWÇ_3FoA!>p5>\x+| (֐k,Ģ픉4aި){X.kΛյ4?^LYìf7aORmӛNQYFK\hD^\'jLXee7dR_.aq KFmݴAP{xΡh% 28V-ԯԐg1;㻨(su`*~OvQ6g o> 0ahdO}RfAM@\lz6>2qѿ}ГO6-f&s;[7BڳyݚbG[:x~ncS1ACxq.o)v"qSGFiS:Y>ᯯ2ɺtcqu~&(@E?g'?$@7OS-dK{ȤfDؒB)όMr5Ƞ7U:h/٫Lµy20d$. |A軐US|{!L߅+9WFz3gϜk_~4Bk4d9lJ~B@*@Y[c,shy;c\ӿobb=-;?">P|;<*zshv"+@|ݜrH-\ZCJ;#9?`!aZqTuSBYUwכ3&,dJZ0-l)8N"4E ']0zAذzt~E=FkiaʿI.W}5=`%uSܓEM~4؁mHdjp`U33 ېȮېȮ6ܨ8 ~<,r'-X%۴h|tľxw4i; $8h!`%t|>0:v4Ww!f 9p ?Po[w:#&ke>i|`8$pˆYCy&4Ll)DgZϷet(A3<``h gOfR;̗7wSSC}JW~Sɢ> ˓{{~ ov}B Z<XE=j{Ce5a[#0Lo(G?[Y_1&~)B>YX./k)ɹu^jvn8(dž(~ὖJArZeU^y|` bObE9wא׍ٯ"푪i't*M8:˞X#a"\>n$!hcNB-5eb4"c^  2J_w *8zmS0&_A[Hų>eLv?Wp{-hϸz8LO&[r[^~"e&8쓫}Ϳ.?1l|cvuDg;ڟ?淋Xd&Kd ܻp9"˭A+=-~77kL߼Y_&/rYi%x۷>Qȫ~\\߬y;,߼ٌ$RUO~߶dG~ڹdwn.n𮋓_>R6 oY|z.rxaqwϿ(}+Z~g[F' Ԋ.󅽲{`6F:_.{ζ~-kk{o>nxSOYMo;{zWp/O7ۗ{g[_>zCuY,_PQ|—3ݾŗ\oq;a۸Y OX4.ǫ9m[O ΕxoZ˱xh\o}C M6`x/Z<2L1͟e!"wn;( ,WR{!dl!QiBDD )݇a.+%!,q b =i"8j7.Hc@JC;n%w5,~)=Jֱͺ !Uf@E,r.05HEI /"tUpIkq, a0G#%d]D +Ѳ "h^(E<v֝ZiF`mδ./H^Z:qkHܱuMо~ !vSWmP$u檖OWn(S V4 3;7Gu~>ry}HS8:E:ҧRS; EpR~4sr~zAv]y=g$^O-ڜevS#nn?ݳJܿ[ӻVVsFU/]~Kjߩo=?| ~&ЫjȔ0׽-:Z/M ,s缯& j25FhmRMjI6&5ڤFhmRMjI6&5ڤFhmRMjI6&5ڤFhmRMjI6&5ڤFhmRMjI6&5ڤFhmRMjI6&l 5IA)<}&^^MB/&nM&ahmRMjI6&5ڤFhmRMjI6&ioJC] $7 ̇EPEVIՋխ9,mխުY-[ K<< y0)`RI&L 0)`RI&L 0)`RI&L 0)`RI&L 0 _o&e)I6)J/I\p`RI&L 0)`RI&L 0)`RI&L 0)`RI&L 0)`RI&L 0)`RI&L 0 ȉxO=&E͵`RZ/xLs+0P0)`RI&L 0)`RI&L 0)`RI&L 0)`RI&L 0)`RI&L 0)`RI&Lj#w ^H;TdlكK<JLYͥwosx$eKI,/Q;CdtuUff IlܶLlO7&I~a$)_{AF.Qs>O0l4ŏg/]L\:\moKzxdzNI," vA<^mˠ59ss@ ^M4V )ϻ;yy{Ro\?^'/=Mٷ|Jj r+xar23 VR|WC*d˛%WMڌt| MazxW&n/t$bvUg%:[7jy)˰x -znv:x5jFQlդyjO7ˋu?Ƴ;{y;ŔxT\]\m n!sb8:KqкbO/VR/ËYFuQS ;1Y5-%~?my5.Ⱦp٤ε麪Ldfu&d3oX'"u2uAs{<ђ~p۰{C-Z=mI“1I~iCɮ>MmchE4> u=kVŃ>GH㗚E= |\$Gfi<}aWj6 K)^S&{78^ʸ`Ñv:Uy+~PmQ~zgQwf 5j /[.n_y^|k,|6:w!|;sOZ1M{u|S~MZ풟Ud%'j ;?w*wRqj!@gS 9cg; KlyY |~Qsֽs e[^'AVCwvMz +!]'1/-/`-n}'\*]V0>Sz%S(KmO~wBY)`Klա,W\N["kzħINx>kqH@ZmVmSWnRrHAg4<5vgFÐ.;ڧ/v:keW2j V"*ƣbĮǎMWQb{v[0>,Q5~]ڍ¢Sbb@6E -._ )~Ϋ53-پhJnI&H-\!E_RٷMQzAK:L|>s)tgl9hz׿џ)[FvY(Nr,e((kYTEt~0!ߡ̩[%wO̍9! SMiXFT.d[&kGSPU㻢He>5A0`Y-Io-G9]aĸY^^kE{ZhAWvZ5 tTnoÊy{Ml0X@w;X1]xʼn^z9zOēe~RQZ\x|4wGϗ9t{8ǿU͹~.h1 .*kƛ1%8e^E5e)eTq ^YX=U93 qk-cYҜYmcEe9c-XuYϼ+3a`t"ȺKV fUu$́]Q k?\Z"Zr.ާ͐] r &l^.bec= 6J&A;4+ΔZښD١[;qpa@NW] OOd}E&A8˂EYBٸx]Q/.W+ф4It`$g&(ܴMy;'ɶmαʝ8<(K,cN,9.kOӳ 8'D֕NM-Odhj\ɲ|X:&5Stw Ͻ~-#3fpJ09|ej9 AO)e3'pπ߂)#Of׿種UXRlޜz&lPb- J&8c-}fE0Pp:+ٕ"Ìcʞ: elxp<(L!XT^'W3RЪھ;kmB몆V~9**˺MU {4tY1M}~1GQ't&=3 de]oEՔu4"QY]AvPN쿒# [h"ӵL9'Münd;g!Xm>jbyИRPF9 b*kIhC<; 2$b%,ּq6K)(UP _#C$]d_1͢aJ6QqTס;[.mCUԎS܈7;:CRX#"c:i2M#BZ-΢ppV w''\F"鮔hQ=(JD腱Q2{4hjBHdu&ԅ`y0=9J]ĄzRi>]XDRp%A=~,p"  e${6ʱX(}E/ &JH6!^ЭE'p"*OS ;NiXIp^Qj!4}! tj.УUa^>WB$" 7+$E|l^k*ќN'ӜKG"G  dz a.DžCU^d 'pszo04@@\7@e!@$b'yt%'e2q:M " 9S_: R*>~$/;;VnEТnZG>#π3[+ zcJVZgGk 2{n6_)/%?zu1&EYx x'K~ؽ/^?-~~z truKf­n4PkF:zLti>wO2p+.>vO8%XOIf<.YRL ase[3SgmAl2 "` |d)"6^/c!s:T55S;'Lc^z`U7=/Hj¥ % 0[|eKɶĿ\rwsNorS_f8{50rǍ{;_"e; T"?TXK?ΧA;Pn=ݮ&y)6v:.c;Z5W%Wxx==0go/ u(Sطc||.Ƶa k ȇݡG#qK| 2Y{q.8H!6A$!-ՆXh tNPgHv?bv9/ÎW~v;O\vYt Owv Ua:mPSaK •i ޖ @A,bY^ZYod|_TOľ>ʏZɛw7{penk5wp$rv:M:\ٛFo*IcY/ Vw˷1 [8T/@hF/I'#aX*ݿP6[LoO{iu[x_"qm6Y5oO_j-'dS- S g'EP;5MkJjq f62i#,kēQ{'kƢE( DTY/1$0*ܰ$%Aa bk}asXبD]z>vZbv=c#j_ zA֔ EBNr]>tJEQ/50-ޭlJ[`ue)qwe-JbyW]#t),x.V..]6KE }tMlq!~t"2`7kLEO=MoR>B,4Er"s(sG}CHQG2eP}lA(l혒 TWI2yabE268 k%m$m(^7؛{Ԟt)f:OGܧ f ~췿ԼF C&_  g˯ߌ]ל|W?U =]g:k+U SʄxqW| RTsXlyd ƶ/eލtDՠ[RT~> 1ʒ?\ɪSsU٨`ʻ[ )p}B5f/Κ3IV0[}Nlj\x!d*\oq-|":fyȝu ٨Z\N-k"̕6M{BaĴdDƤVr,[1ts[b-fj?Wm" GХ,90 AQԑZgzn߯~wEK'APh>yRm=m^zfUQq:pfq`` CT,Ɣ:}1w,~@Rj0m3e#L֐ղL)\u˧4ؓQ/ތov>vIefom c/*R^Mgj\u<8*_/uLf8+&LdFI,Ks 8dSOgz68y X/fCy⹋Szo]Iqs⽊6.؟ A6@#|[ŧ͛(>p1MZ+_0+%upRvtBy)\>fC'>)c,^+kXQu9n.^['2/MW&ʚ>+ -{J 5qO֦ tmk^ leO*E-I{ I[lNO$F$\q"^>;۞R uEm<zgN6KW%Qk=bGR߷9@6oC0FX͢%mx&QuNK!o.[0_ϛY$;k@O 3 WB8cL5}cGD(%?a|=LBx-tVO#l3G#|f/ $b@yhw0OEVD$$Vv .~O3:[ t|4p֙˽[kCͼ_~fa`Br@פEfOscVX*=MϩXBNBa Xb4uhP I-mLmLiҀfFi&wg[ͫթRe6DJ }[˛5b\ˆJn3 Gq{sZ%wTz}nUI<Ԯ j7{;œIHw&-{+]" iO`A]p*w <흃Ð:c FeIe^,~\=&WCX ps>^1xoVHa. *+Zaʡs^V٦)sÄTމ̽&򕔥Z []( oyZuq +(1c CEpL):GW%ca\ŕ8Qw`NçW4DE{o廭w6A% w| b1- LHKmgK\iCTO p3AF+G.gyc(<k# XAӈ#6E7+'ppB#[=֠fw񅩲`F3}Xk>7=p y#k6Cawm+% }ybHy ۖ-Kı<俧ږ,J"iX.Ō6[W} C5`4D[(jv%ڪv}j[Ns߬˺LȩHZsdY/%1-RFMA*pqL q2v3n6 /`gҡBR31mtͭ|G0,aKSV m)cQ8sZ%DU#B;DY|5}|FHd`N?Oq}X `$DXn6 {V <S9ZikNwR"}}M5a^JvA)+;9]R@z#pKq:>BQŠS#o!zƶM9m˫_CM2˗|e7`Vn6t/+=3|ޥw =g%16$jr"s6j5EY%oD$ zC#,GI >PM0%N]&J*CJYwMx'# m6x7oD TZ^}o[4`|vrr]EqСz!^<_hsZUwJD*S6NbwSYo鞌-ṩl0 Y RQRBomQ E*mahUT5,(b{ Nvn?qnCMٓoqAl$0* Z-sLE<7qqZn=кp#FDc$ȟ_{9bTDo5yH&$ܦKFZ$1z`c.CߘHJ ʬZob_'Z]9Tu}$kRc3#667yJ-X{)NQGh%\P6( 0[}FZALJy.g褧&_},rKlgOĉejŏk"KVO{mQ‘nD.8 1Y *yP10.F 2 `*)sn4?wnZ(,\s!1|JF Wh<7uSVWoxA;lhnsIab-]l>@4TX>޽0ܝT9r[jY) ̔bCWQ1X>eQare <Ր&)nQ-<^Vi U/>|!+u1 }7]Vq$}qC؄|',qÈ֐K;|pJӶ ɖ Ojs͓{1`R@gpevS&kb3IWj7AXl%hc'eo8U%'yoX>֖a9+In [~m3p 7bA N3HQkD*jIgStӯi를{3(R %5`A%k[ +BLu!W&.):iR^~f aыMĽYy!.W*1TΦTñ\; /KḡY%\ũHftCxK#C*y abf@sیMKKƂEM("Ahp. $/P2 / @{(ESCyWѩZݓq9pO)N/}V_rVUV9on}OE u_EFZB1GP4H7b5ĕڣ痲IQIsnգ}; 0K"[ vY~\V־Ժ)-@i3vOgzy*@iilfOcYu cS7g(sW_ܪZ F >_"g_]LHx /8U7&AW~9Y.SA pѪibbNCE.X>晵ԯ)bEyh{O W=]b *_KK5kxNk5̕+*W}W3+omʚZ׀qut W{r3;(R`l00Xm @^>nE&Ml@X86;qA3* BLI&n^@WL]_T_W&az!*u\ 1Ieeїp2TC?pP0"q!u&"&)"qW5ԥW3StBl|q$2Dh:3Lsrbo'yfnms;բ"%f6x $ d9C'v0 Qe_ʡ/ZmAF$Z7mȲio'.ݎ W'dQvA`boMu2n6^%nI hKyQԈ׋Yc$&²zz*~/Ysr翗Q)5>-ҫa5D(ZceE1@S+p#O9zuZԓ YnsMJ {_"زX؟ӹ }Cm;J⾬蓯rjUX.Z/lG٢" &wfSۖ)-B0k#2"`Z;UÎ^ I)o%{r0jձ~ M%jԮZ1dEU+Vi"\)S^? xEhAP/gB%bFjPap?޽?e\HY,/Հ1Lo˗Vc9 .w2ǴZQi>&mc/0(7ovqsM7s>=v376Xӯ\! N zxV0l߹kY&6ߤ﷛h; O8ӤL |9 T"A)<a7🛿_nOԄ/7?~rMo͉hB>/`|ۯ7_ao͌ᡸ?B4xN&6zdC+3pl l0۞;ޛ̑tCy{$N:+~KvÜ/,i@_[mA BCMHL@v1b0Ԁ7qYHx˽ 8{[I)ybN27Z孊[@R߅H<'mS {V .<7y~Cdlڗ~)[Pܿm* eb!Ht2-*4Y;Rz5jҏbߗ\.W\n '?heſ~%U=<?s7i'x;NaB'M憾×}i <"+CVSeTx?-^҃%N\rϗ:Y0]C"fYR18JvH /.l Mw_{OI^E~#J`.h[6?#lg:iP*khM<ĤA; x0irxyI[Hq7`j[ax=R[L2Ѱ-m!:q9-# \B,:T/ajcj^XWmuvSSo Y ,#plDoSYx1NMnڢ>:'w: BDUr "pw#n$\vҙ߭}#?T8YNcbt$}TΉ2nFV>`/ʩrP&ɬ-Pse'朶MV?ۖTae7һCԘ40x p7a!.y4;:Ige`ެ]H_NvYlw]D1 qr>/c«ƭ126&v_A9 i- @I\6n[05 i}{Rz[`!ڼ!X9(۪Qr>߯&1hBAznY>bj}u0a6HkҚ녉E@v^hP;.IP>la/=.qþqnb}oLem|sM;?:Uqekyƕ'5ҡ̫ /lJˤޖo.3ߌOP8ñna95QBbE&yQxUǟ㣬*kW[%sd|O{;Asavs,De [7=0y.)Ge)u58ٖOiRhg sG0 SQ~*O ?TrN(XiL|9jjfbP>߁e%o Wȳ>S 늷W LXC5wgYJ< sZ;/\|=gRGֈtڹ 0Ғk+'ySXсKm+ͧfa <{|E/9YW4E(ۥ1<ҔIģrA!ˤN۠ -PFC/Y4 3Ũ Z?>=58XxnZckZ_@7 '9ȓСC_tPsb87 *E qd*  XȸAejv"mۨi(ZAOn?wnz4`C@6Yavb^#)焸i0W 0Lh,f;F }Kkk0 Ng9]4g`{걅NiC HBL ^eFRpx5d嵪 &絚OfN'R2k&c$ XNހqagST_5aL~ [P ̐`!EHĚޖ6UcwmBq!=+uh&m9./;ݼ?͗`|O޳8r#WF>=|?8$%99_r6${giV:V7[bKl׳>bUL _N)ٚb^yu#̔YyaIQc2_#1Opg)f2e^Slr[TJ_ ֋®͝IѸ+?P<۵د/K '//ᏤDOʇeUsV>6"|u\uQŗ/J9]~Âv{Y ,'.}ϯG4K;_m~ayǓ '`Z=׃^oa:F"*Q(fn8\Yze6^] ) D vlFwnS}jWwEhwۻ 7{|W/^2om yknōwU@g-ӽk:W mu>`#5܍_ȼ{~S}b>"B &XZ)spz4䪄'oڠ~zZ ӽA~3lj\@rO'12vyp̬|L"LNYScLHBo*FA*5NJ@rOg<{L٢0o"P5bG'Qv4GJ)MxQ(J͔=pۓ6.Q՗ϖٕx89J,=S5񦬔xM\ /ힾ= էk<Sըp˜n"GFg'4Rkr1A,:^ѵ]6p9k6W vOaBZPʜs]1 l OatƗYI1q7L ba<(qt PY{ޘzo3;\P33sP?e9UI{.ʫw #PF鳹B@ ١ CvvBMU^#FıUqv01;ԡQv LX޽rs` +1 啁`@JAJ*[N"[3F&5zfkOu1u5-;eNi0cFrb40n]Cr=KM٭-l&AM[j8*bOȍ,ܲ`Xr[RmDR*Q: fGi8ef&&PTFiXu;L~K̝k'(V&┽zV c"T?P pX 婵wO( 3^w y^UR،*QVy3#(:! 7[{1)߹1~?yf˞+Ȏv:Zmqz}۩ ;1u_Y܌jDR\ή % } #)[(}&9tĭ$Qr*{),Jz'-]Q+R6;Ɛ!=ې=RQ/C~EϴeFTJňc `EQ9,~ S^I^=!)33 $(a™PjxDg I-eU{˚S6>:F39QC.UT\* [ H%R(/{RT{"JTܴ"J—0;CP5X9VĨR-䓓w@ r+B! e>έ% ),H+]&l0t:mAa<[ yw;4Eu̻03edB,B,Ibg=Bh/l|(| 4)xz|?olM6Ũgw0;Tp%ܓ0AA=;יfǹvcк"jgEWN8PuD*l[#JF&$5;y0ZR G&8Wv~7@jN5͞=eٻRN/Ƣk=rZ ĥ7ȖDt`܋Ι)2ѱE#Js @" g֔/IrU(M^챜'rR:`':NƘ.0[`cѩBs f2 p1G4f4HSeE6DW4 Ab*h֑!xTˆ^\h Pث!8o.&=تghP8Ͽ[ Gs%9P Qde +%˞Yx eB7 )A9WIRvkZ42v5Č\UN&4 #<Z (Y`YP=v+I&LChs+}Bw3B+8`S~2 ϸS%1xE %Sq:(|zݴH,)a5o'/bCNwER_1Nxj+=T B\rU3è :b)3b^*NJb37 `w4y/CRֿبkkЇK<Ean $8. \`JЛ'*mZ/C8ԙ]]@gZ#=Ifoo6R?ILs^̩,Wm# ^^ܙp\EGm Gl "}ЦƂHAqmO;JTchYRկn/ Z@u!F@e _g,P {l,F[aSԻ2:^_E3/a%Rj[V|++2t" IWUrR D 7+  wb(Cwy0#B2s'noXJxto:M{)CVzJ?6ܒc@U"'v ~΋U/Ш[!?ƒ 2 Qm[Pph9ZdJA3R2Xa9xu"X6ưjjN`=FhP(9eFM/AC 0ӳ1i4=yۇ (k+l MˠcFf/wrm?V=>.50BsWJTW2*UO.5=\qC$BߞZB8' \CS̑0:J[SEXLWDFZ1“R곑b(l3yqBh2t^ ۩ݮQ( 4|_aä|0E&ww{yE"bhDefמ2c| exX]wa'â?H# S/ `Epυ@GE1Ȣo2TS_LCiIv9:ip2{ۻZ󱨖&<7CPd墺0LaN lE'7Pzh 3#Η}ܳE"j_@Nm^8 s%&Oz^c#tf)@!XpaߜfN[KO$F\ƹPg֗iSc&2.6/!IŮ4a_8K -*\%P<5P$1* 9i=WXr=_4`kKٯ7}T&_q}[ܵ$s,*F5$k#y3c7@#NIY6x v\kxuHEvά$"Ě4 q( BDFv :2^GJ;캐}jg4RhSNzX5m8_TjSXt5LDDJC,Hf i;I L5ecD_/,oZ=ȱ9i\Q\;g BAZ (vCJ yHF"jb:i2$XUxB#Cvj;c\U(5535v" 6Xf.9$w'^B'U!*6U }ueہ - 'KI%:]j;Szlb4ѴA1е*0.v!)'8)R}x࿙M2ͯIs7O4nXְ9Z/> \m@xÓ\6v̲WncU 2_ڃpS3-UPKNx a"XCՔYt&8?(Mt&!MuT t9 SB8QhOk)eF)U4ONuqճlb #W8#gmGobn/wp;$J;ӋWcH;8RѬb=YM܉`Fֆ2γG;X3]ʚR=.xI-mF&ڤO}s1$[cCm"D`該@5k%"!jw[0v둤 XVAxKTfW(uurRs!dzW^\8.`xØDNhU)E)j1Վ5eVͧc̄;:!+xc&K!f)J6,u栨r@UU" "˵"NȎLS&}!;4',_: ? Xb;_,f)EЪ S"fDDDey=Kz,! "1rce,Y:fY9YN<5;pIdJ 跕mk&ŞCfmySL`^3G6H#6ύw7܍W2K7Z ߫ns,>Dٚ@u\h&&7N푊N,OA[&NNVQJP5ZZ8񖐥t B4&Db=0pNkx*3< ;ό4zVIxVPIXJ%AnpqD5Ν\؎X[kV+L' Zb)oUyob䳦U۹1~2z]]O ͛밚'[kpmpUHU* B#UL,/杩E,jPUB{)\g\пIIfȅN%0u>E]K{xP=BJNY2`A"΍e\ kQS{vN8bJ#y.u; ȗK5w]Bko1Tl*HEq&LArl+(é3!i&iqtYSrJ;\e Hd,GˣZ[!ʬ╇Xt,3 zdj6~^?7MnǼD"iDq[ux^*4kZ8GTK6'@X| #b4"0v *G߁}mz1z@Z,%LXLf#K b"0Ժ-:ʀMZdRUNX xmc[QۉqUOA[>ka9} 8*i"iDWDJ@B{NY[թ~Y' 943yF6^eZs2Atyȡ B-oܺK^m1dˍ=ydDi85(;*Q_I %0wܺSp(j(!zG1o]")AjUO?ZC>#t*KA]vSɱFA"`]YkGUys>~,sA4R&xzf݀=eCUsz8ɴ7U ykDdeU^9)k4r Z˜\q[G7N&9f@+U!%"#W^ 6(zrrݽxZh :uR*79?߱8R -{fɺnI vd Lua+g1CWc6#r;-H`jB ߂~RP[YY=׃%sw{NG 7c3qIXˣYi3:8$;Z_54}N(bgQdn8QFQt+ȓ[˂{$8L8I q 2Sw_zL *X< D {XoSf[Ѓ,nm`6pHsg,dϫBsJA*p K^u'P]Th rk|&3!Sՠ4.Co2&\qTƼ}];#K!SXt4cvfaN[-6je'=nrSzFζpk|+Z ]δw'C\~Pe)r-*%@YJ X8* %Ӹ=r{IQ2@(<0($ŞCUgl_ϛ YkI1IKK%S+Qc')-7IJ5n!]i,=~y4q2gj48B y1BS<+4I"*- DJs]i Mu3$9b,O\mar_1d{jBތmKPW^n[Tm[o:⫡,x=Tuo%7a^M._=0#5"tͅ!DB`4^\&H텷Z`",x3< NoLc1 Kf~J},끶 SRa D|cl1LH"Xgc1Vqlu&O D6~FydO޼-s?>Ȯ<S#qnl&@]f 292 8Q-#9-#ߧvKcޫVꍨN્KLǴ7iK|mK:Ax Y/\^SXyQLa. B>i{o˜ә*y-uo]Lfx ~N*%* z#nwsK\j}1a 3Ð/bgRއ Zvg(O8Nn{D8H) bڰӣ_$6x [1ז`QI͖4mR ۈ'-ht/}sP4YyY\ZTAidtqGtoRD0wVt*df.ŋGϩ-rɵmʉ,y}.J(Iz qiZ E37=K 򡅈]xZᙚwc"Bhv6 C͏b{ki&3yGGv5 $rF=ma)Lg^?+)pd4xs lHS[+j7ns{8;adÞi܌[Jc1qp8sQn*וey+s@*v{1@/[E9>r~X\fFl?DXi{2˂pDz#i5ZU^a2#gֆ.ॆ s!?o7vVD)upI[Q7d V#FZ2B! R{ 咥BW5_(rRh4M2'z%Hl?4Wwc CDD^ Z2ԱFc'"Lm@\ͯ],Z1-/-X!9x( G{)cf04T׫P>(%S%+&WFTLAΫxaYOFךՂU.gyVLHLP_/tbϡX]2{rږ ]rW[gI;O<Eۓ 4{%4B6b7^iv-huB]i qU24/յ|;82i2a7%2j5-Լz+s#8ƂhgJ 54䖉p,6Eb8ƭk-tys(Ş^%%h.iL/ۛcvUnL颚S:^;膪ɭ?Z|r?22*SY"5eeDY~|.!o ۼj2nYXjCX<}L# ]ܬc~-+s?~^b y,mtaV5ϫ/{c r㨹)3 6Km}^/OΧkڬUl%3>VdQvqw?Y9T-; S=\N[>fm?uNd@sMk9.`MrƔTe8#Y|_suֿ4_ڰuc5>2Ϫo#8frn=4nOc/=.֘[HܢOKu5hV.hS$ N ħb=D~%O8[@dES^l?ɞ (U=|̹WYL,l87h$hD  4 JݧdQd]|B׈ 31JЎW7 ~;vzHOt']4Q3ZȊ*8fFIt1>:SBw˸^6>9n0!UȺ^Rmu+ aLyvYZ3SJJĪRDR"k-2Bx͘YO͛rݭ_BC#d蕫ZxڰΑ/;wL2.C3{X[Ԩs'?Rn}YL2w_J$:#Q'ijC0Lw0i'H& / alv%=C\%T{V9H ]aSCp"|(w9GЫ$߀SD;oEyP6 -#H"JNgםyeA@Z6I9ky&2KsX]MzCN;er4/Ek88AgzǕ_1Nx4p8{݃3/=hPN؎\:ߗ#Y)L'3-Ků,V9(X: fQAJKW)EԠ9U jGݯFR {}SZD7Tz/^SWfy. %&58 B8:ekf\ fh_9ot\ ^yVPMe@E0;ARp'b^dVNJ'  J2hNEZNPo&.y!`٤<9CF 6('f=%5FJT 4l1xm^=W̙]gO2.Q5^!,g!&8h)~ʗĚ&ASQm|(S6S[M}B_[rb%9]!BGحcN޼2)IKM\i?)@y$D)A_RR e_OƏK/'$ e cX^O⹏z\cD(HDN#qEP@uK!`nqaFDJ bdJ!q>+9Պ8qx7&DJG]&;Th}3k$/Uex:(%⎛Q$X!; Ϛ a8+&^] +]Sh ՊF'^1pr)Sz=t4D()&VQZ%0YWcVr 0Sϭ] L Y׼` ΅QP5O)jbBpm,t߰ZKIԙ go3k^y:fj=| uYoD#>iDC??JC !\w V:#I*u83 Knfwuk4(<^xTʙ­0_߬.Jo%?/-ٴBWXoO\o1{r|yIAG 潋@_J/9_cB;CQE9B8\%JI-)}4ˏq[4)=N{IH(oao(老ϙ TdݡZ{xt~c\! դ0š F}QMۻ3TIf -EK0)U>k/ 2 LtꭑcT-֡h\X@kf38ibv0CmQHX3k (:L~^Seޞy|f{:\SOa E^^~ a2Wu+Ur4s2`14 I"xږ2<ɮy:?_~ bHއBqIV`B"0ӊK: 6gǒ$cnn#mXn^l %Id-v6Wk4987GyGwt'q'H ;s6ļsݺ5t: i}nU1˺ӝu Jꐯt}<' ) 5~j7S.nZw`Z *n%CX$BTtS?Hx4k=Av$C.&BȌyi+Sd <2{C!K.!@* @BV`T"HjuIibu)UT])RSTu9{ T4oyh^͒ Y 69Fdbm@< VLW.b8N;0)r/m/2Ed ; X"9'w$ZMEt n !Ј@{q?KMeJO1ryS>IĻ0)#u.aA5@buMBj+|G h3uS, <\:rێѾ.SߡDk'X-fCFOGEO%7Z֧GڻCٴ*I΂t*`FNڬz@DgM| $T1@11@y'2aQTMhb3 #~erjM܄~\7{@%j܊xN ~&a |HO y#K!b^GsP+XCajlAYK{p<Ц%<) \AR%j4Iib4ӥP1,ܛ9<_Eq*Hxo$-շFRU)h&=[ؿb @HAMyp3|Q!H]fiƥUeٜ5 Dxs*\j=Jn}(RX `` Bd]Rg8 C$D]~@m!C^^@sճւEb3 hHO޼Chv7`UhiuxsVg`ޱT{}^x`2FP#1J 5E,!@ag9Pќ8}867DB9YgǷzqSm$iݟhh(Nm_`06Y86gLX]rSi 7'kN` /eWgH@h Dvp?|-&?a0K(1 H!$Eƍ@srT\mF&?.x@Phz^rueIy./tЄ!q_Jy mޙ'3@IS5y/<̄pNpQ:o\ .^:ams ]ȩEj ~J@1qh?&s-mRmR(0IʸQ 631NJw].tgzUܟwCv&5pu39,δ>,7ma2.t/e0kEgoj?i\koKPMKA#bjNY&410a(I'9\%ioNW&|Zi_+Z! R4{-lt?!7zz04/:cLKaT.Vzeu?n˕]re/OI8AJTI^0A-2Enyҿ,&m9]†(Z}M~mkl^R店>_=떪7=Ϳׯ/;s~~(4o =]Vvuif3KV w,Ǔt54zZM h/Kr6pfYG}.;^tK3ֿZ5VDURi^ o*G9Z[>\<_ m9֘cZ(daA"2 XXlCyoPXJ7T/?i~ /_nTx`n/͵9 Q=^ھc6So4ѹ(]C=0oOLe*lV,UOJ|Ix~_sy Xu :'eૺ{%g,DD֨pPZPZ80 J84k3A\N  T4s NTL3;R[$11o\F<1oW(IPgjfW3^%n w"C3n:)PYRӜ`m)%ׄo$41 ɰu"c Ñl`Tќq6W#~ȑہؿ!A(ȏ X~z֊g^p(*5ÜhACB(m $)1fZ4M&*B8 !sGEsr0;Ē ӂvpof` _臼Tp<_ĝ#v8̴] JI ԣyx2}1K{iA;kƂay+S|6m:t)$ތ ("0Ob< QlhBKU*̡NxNuաqʌYT{ Po6lx( 2b"L3 i;Ӝ!ԸȎ"~zvdZO}_RF4('@iQ\ҼPP,PgyRF|dCC}wJ\7]_a'8xm@?y)п}p5kL'c.gr| Og@ecX^)H ]#UF:c"QCX0!{g0֬[(O9 eV`6Fp\3k %M`d1J!`Dc39 c9\^xW{&Q){R[Q/mvG 8ՠx2Vsv9YY& ߕ glcg!䗌mJmxA*e6<ƍ;c1yoSV` SAH<0:5Udt1NgKX\8 A8!'Tbp0v_wNa((?<݄-wۚyqS?_s^j`8:S] (eUܸ+>,DQOg1g+ܘZ7kEV|n>)֊ Z qJguaM~zkKz}?5斗yXѺC K (LDF"U*SIVpRh8z-VcƥMv5;8[y?6K`1|}24s18ҹ@b!#3b' =2/S ' BquJ J@{78370`rrW :ZO*cF8^?` 3^P.HI/笀*KSP\y\Og&c;Z|ETոU=n S섁IU|QkȾO˜y0#OWݵ]]EMm2Fz?':cg2(%/ssx '$;:>u-s<݁Qו2ӲL,;3A7IȽ<.WdF NE^E3s[Hj*4A׼jhZ+qaWsܟn^ڰeTMy[dk~;FE@׈_)ϣ mȄ1`G+*a. !Fc\s:Ǹ2=N Crߩ`y}}z˖4fr0v9q#FF\e&Q@? JzϡTՀ@Ҹ=8]2wzV@-r>tf 'սyuT x;Y} 5`m9=?"/+s$ |b qlNI&é|RC J;]=pGK1=ˑ$`>G?m@a=0(RXIL_;e^]F[u@EFsvu3NW F2gHZ*2Fsxavs`GPuS,'zvC]#5y*pu,kq[F p.T |&s=OJd޶[u|u#h{EP@G0pU)SDyhO7h-zp| q<5O&SUr L>\^LzbSOtQ=Tlh|^d?.?Ϸ4/.1dlX-D/Ky~?NdEqـFr/g5%F1JK܎ϋٴ~~Kɿϖ{p~P[^^O~ͽ]}sii\&lh}.m%Jhe99K#d/g3?7YG?.(b,ew.@L~ ow.y8HȐq7R DBrj1!![BBYXq>7ej~7{\8 {4C%έK9E,>;u.,ͫ,L8#JF"m?f-ɐ Uư|;j\oV@"bo^9nl!\ I]0B©%],nf_K?U3z}&trc5Mgh4(Y!H q-ʔ9|IgQi{\PFNke\/@9=Ԑr +_{$BAfe|UD1S nTyMC )v=İ\, X5q#9dlKr_)>SXP) sqB1}\G33| ,Fx;aG8H97`2<|7pt< r|xiob$etn8I&b:7;MгH"l\^=>5| ]"W;dJM,#A)Poԟ`44XA AJ+#7dOeLG^0WɅU(8FvDtcQԑ׍8{,T%H ПebN:8Y GJqlq,7R."4fpMY{ ǧ;-s\P)[Q.VTU s$@@$6{1әoز)l"FW\q >&njNMv=>@(q##h dImRD.Qt̨n ,JfxȌw-g>?_!Ls8u2g RxP՚}ӡS5xy{06`jt Ȧc0ܿO ;"9$1؁)q#gٽ3aG yY^?Ĵ㜜!{U쀃8?ӻy[ެl*_;-L5[k(#`Y`UU袿.ԵvkTp.nC0׹5pԾ*bԈ4bkfo$Tݫ}=gSgG{Lqկ]6]A6 rP"O-nS"^}wݜ>C9(8JQ:(%L2\AџO~Ûgg|P{HA U-qE4lOAVg IEA1HP%!HsF.t&\'mO2i3x(nd?WxK OԎe=tZn ; z [ek' $CZkctk\ q&_/MrB & G V3q`t/(b݃.C^>!ٯ8s%IƱ.\PkyRP^@E(Ra QN ) SH\3|S/q:M6;LOV0sx|_Q?'z׾صon1*P6`|yO7>MlAN>〷~He?Xib|u5Ԯr9(dF"w"sf9E0RL ^ގK.{Uf \L.hXwxDWp\*.PfJ& fјي4B]1fAhڱg.n_~W%s,Lf-=hyWUG(וf4}ڞV gu g~:{XOݿCO!޺J͋EcO:q,6<3mXI~MT φ *&cp 0(C@^h2!XR~iUml[ Hȣ7ux=#܅,HG_|| FaE$0݇# PY083Ngi62gv =oxI^5/.Umr|}Ȑ+:7:Tte4FQ*j$Dkoy.5"@ZD$\$E" W7GluP9}K^>(olG&+/Sn?OfѿןnGP=ſ~5wWD&y&5#WT~ɥGk=&JYkI%4H 9ΐFr&n )W:4Fqa}6qkNA*SY?{6 Z ^ߢTӀcpuDFuKڒc"ץD^SQDUHyQZUȯd=$i-g2 7YTz8E/#kOd>iL_"tz Hs :y QXsz -nMAؘ@:?Zn bB!B2!}?FC@MCmwDpJGEsUxrKaPڄ4 T1p28*SLq0L#~YTeGOgrAEgpvpp @KCs9TⳀKCLWE)/cF(NjTDg5)noa^  zU *WrYq*)+i?tK_u+˽Ia,bDKXy*>bXV@Ó6<)Cr$xYf΢߯(x]_M+~ZWd]'}xT+f+fey`?q銗|}|G;^͝ 6FC2k/ ٕ!z{<2V:\3܋c*0b]$ Rq4m3ƩY('>'|TDz@"XI@,H4σmVhlfŇ[&]ړo]͢tԥƺjW3D鉳SΦ-%zNrl'<!K EbRzrάfK$h&s'GR YedxOM%Pú.(rO76.U>Fh#%OF 蔈)7($--$R*YW|߾tUc\ݯ"Ę+H() qN1Nlt9=5D98h44dF_B|Bu@Hx7Aሀ#[ feJb}Fo 6{hH<*MIf1 vM}*~nS~ wM{},(Z^Kn 2%gP!fX 0r!"U$6QsӜwɱ0YЋip/v壔 I՚x'+<+ }[|r\)4 Q~sTup*Z1UmJ# Hl2ψISŸ;mKm՞A8?eJ̪8YB.*h~-$$yюW}^8Wf (?UVM߶.ƿd @*h ϭMNpذ% "y|QIU%P'U\:2RE'|xwnܛ"1M0˗T6u2QvSTYΈoM6&JӔkbf3yM&&C'HomKCo:gyPr:HhafZsIZZJsr ƨ5>8Y>: Q _aUjIv` 43c/F2"G!&KmA0k,×KKOfQQ C:wGRQ%2ACSjT#>xߵ.Pxc}FoV_Ac>tVd Y?4dtבDjc2NLAB@*zMǂ)eEt_S&%/Ϣ tQꘛ Li,iz?,w5JZz&C0XHj6XcךBL8H|7`/ :kY% HK ;@BE!aAhBAg_ÉMMz#\0GCLx`$` =#whwG5 rkhLGG2KM轋~*rfjT@% L cl%`Df>"ffQi)%F31T i@]K:}؍ [VXmp~T? ^AsЋ 4(I%~NB9XF/DR䔅Kn)˩9 җ#2Guh %$xhY#JtOS/R$~O]] 1;hйbIDabt@ɸ\"r|{ 'SI }3JPȧ!t%0blK y̮pn<$j4}@ X4ջ')OB|iut*ଡÎmMс-S\<\lsuKvtuT{JPK$h=k[:#%G99ZhNLނR'Rzu9_T@#+EЉ4`ͯz<O5KTvUs8; :*-?QcǞ}U-ڎ7ǻŭ v*SD}j1zj;VE`=;gEFw5Kvw|t\T=6sk0#t>TEC)_lCjRSo_Oa8ztlNs@Ln(q\$||G"O}u Qxe4-x&AyHxےGj^/g4° twHzA8yΈ3vgB9m%`XU*V)B~)!Ym:{MS-B/3 .Re4^̥ TI,+<&^5!艶A'סl\ћWTv1~%FH[9w9RY)iSSI.r)֡>zY-/Ké "-fn?yG+Ԏ? /a{8Q9Tr.Y-Ur9H{JB9I @9Dun m%K.^^MCyW*jjyo>PB_ {pcns #r57- `kJ7h2/V%*񭖖 `=G'IHۈn4$-&~ۈdSz[B 3,BhʠPܪ e\ d+%b6p#\|)= ,5L0-_-*l#l '#{^2]4 R2Jr3Df4@|cC$um_}BM528ԭ.y9h)NGa{_n;9}EhSOLĆJ*]YwdU˅%;pv5Alawk rF`bJ]5{YJ zRu,HB%8Խ!P w$@ h)YX3AH1™r+4e*Zw W<>ިmAh|vHNFuy踭-ء*.EmtIz*67ݛm$lJh ~fEjF݆MŨQ9w[5[tU+{[ɴ2mSԸz~6™Fjv>|VS1&k9$+Y_(^JYh}&ٺXD҂lR!1"3kmF_Ep8`8a ;@` dvF=lOwdI-mj{vjVX#Y =W:q[*cԖZQ[dg/S)-w ܣ`,j}KfvvƼnw#sJ%5Bӎg#o0+OهjG# r`#m1KP'K0-f >eBIg^Y>g^g R5T*>ԡWc"-wc$BRN @)AH}a%zFsJé邩\`2-o۠}U)i p29/PwG1H;m4B& PaY@S iw]73 C$UTtx7kRÖpN~^[8 o!icVcz!+akT̚.My qcvv5zd3$7tJXıoK*^)]pX6?K!Np*Y1sNn5:`4dދ~('4w*X~)~Tkq ԙtYFFYPāDbd<ӕREXZ}Xeg~pp(^_ {ob&UmVޚ.-ő :dEEA*=PIM/Oӝ`3PZPS֝7݆pk%( .7- 7 y(վy+U4JṠ lpʣeIڡt:ۨjgo(KYH*y|F FtR474Ƙ^;;")Zd)\wa߾<ӕ~Ɩ(3# vsfefZ39Ğ/T;F pR3%6kBh>#oW1IҡjZ0+'%ݡ#r?W yQ0Hд`c,_ڼQ^r^D45_} p=bd/Wh)~(G[ hq=wqv&/m<QѯdOiUλnS[-RZEZ|F.īd<ղꃺ|X?XOX~4Pf޿?aяSPDC:w?zN&':ZͻuȈеmF{(!.F?0bq7 U0G.l ,;q bpwݯ^DMjA Q䃀F5txB0z٭*]tsC/`q.hƑ(jn>l'*g:Ť`JX>rg7eG~] l nv} *nW r!UCiY(/<= D㠨-9S؍gX+-j-8N8Oz_KK;%t׾5!"cR9D䠏[Ĝo0뀉)zhټШsRVjQjPeR+ +a-D¢%ιp&HSehl S ;W+\" ?պCGA,E>RG4=3D[~Ip(-w0HbJ+հ[[˞*8N `+U[j[??Ǝs[kܼ4Xk8ST:zȲ*peYhBQBMT+H 01:QRSj'@fKyDM D?CLw0Gw{ss;2ayպrK*ẉ'^/ w>n20V` _ĩp`D6*x փہ<L Q$Ӆzj%G-IOrk;&&;îyb+{ݯvam|3>z,Oo{7RVy&ES0c~4xݏYȫ"epG~ =!hH!o3"0oѰP bWR@MaR|y(uRoGj͚VN)V||OW=V>?HR0cfqhT( @Ln3sc`|F ,}fVw} <Хe/lr>̴ARY,-bnN ۺZþ8)={*]?ץIIOC*Dg \-NJZ36'x#/%T >\QY틭Jf[|(iRA!$UyGY <I[Z'X(1?WE*Wf "I5)W2lXr^Z Y6aΈz11yWuBo2WRGq%'1BJ2j=xV 5!(m.8tE92ì[IڼC%!CNTCc-dmHjNᇔh%1_HCUZV/XRxp"(E3 eDt˦z0h/<6g _a\ L:~\=[I54b{}REb9Ě#Z l{.4@Yɼ54Uxړ2Hd_FQ`if ?%$u},?]>42>X5 Aof7p<'ys5uָr- IPѡt:0-/DN>ȕ&຅R."S(FqdPWkk+ }B𒗰z؍8N()^Nok7Sl%%e@~lp& *<"G1%Sjh$ ) W=!5~qQk'!ݗ_~G<30PwiʀSԑ"iWLW݆׬$2*& A˛{㤞=٣5}67iڪϥw\C"v\V)fO6@1KaJ5`ڋѽrf~l{_tJĪ3 3RYͫs[|՘r8@HVE !eAU3/qx~֟\8v8|9OvAtZAB y1zs\V"uzR6TȺX !wZHE{ɝ^:$ry"$3&Q(L{ZurBʶjU]wCMҺ caz]7h]UR|_ۮ &mz *]4)%8KFn" %tL{ P{ԕf'`hPpͰ8tזC7{bw5f8:sP#I/8t8ġBkpxxa+=ӣ]7,gn<İ<'VLu9\\z:K[ݵˡ^S_S'= ,S|ƸyNRNoez+E#D8MU?Z" hJ>x3DG(E\qLo΅7orY֨&hy+τ8{vE)=;s'*d?sci= \ym_hW ok^f ёctWR M%Z7{6zS2N Io {7%Q71I\w?sx 9]f5%BujNK4Xì1K$bV0$HsE Z(=uV۱p ~8x^I0NLGS_`ܩvzHA`uƋ{*b4ʝ+(h8d Z(m,թVۉ=^iaߍ 03^VđkF>(EAHV=]ccHd Dm;b+Yo8fGfh鷳1!M=kN$gs.rĞ)5y뎑 ?#Y6RR:>-FBeNY'FHc>0 :%mw%G(br1',fA>>A bD3q G' Fi}6{r8ռ>c*YV-SX:TKWFXlLMAQ[[6Rc ?x4%Jγb۶11%22:jZo~mp^"$B F<Qo8֋&C[i݄,,QD@H(eRSRn߱*bDr q7|yptǪ( ̬9tҙU$t=%-xIS#H3x d;c0@G΄pt΃PA1A}9Td!l]8세n5ڻ sї3{g?덽1`z1 ە#U_O^`2N寔Jk)UR![@NkJ$IhQЕb8GHUHiEF1;5Rr}n \{ռ^7|iW.flzF!&1.zͥm<\f4Fa|IƥBdEM?.E%z'BaX\ysc&Ew)e lYAU"hY"g ((0] i,v{k޴U%at'[L uc+ڪ; 3Jď玿E@ ,ù*뼙[Ǝ%j^v k}+"b bTdtu)BTR |ڝ"X]J%47NUw-ŀ Ѥ.hD&Zͯ.W7+fבQhy;v`_lL\@զhPS)V % C(S.ϓ1`RJUT8n|OV3 yrjשoUN? 9G$ фQq ?Z2ԊkR7[r,lb?5Tii.fIL5_ V2xEAOiY(6u2<b[P*܎I!,CN8{W-q1zln]soڹ_82ß^x^Z:W5K@#o'^y2KHzפ֏sܝ&I_|*&"(U?"Ɵ\;RS-9\\c`?oFՏ>/)xR*$>PDETSò0v4u mq/et1k8{ѸڊCm綉9Ĉ`[$xlqel;e0g~͉~fjsVk,7H&(sM־ރwi1CmlFs۬7{0Jww9z cJ\(sg(uM)*4:>amdUz{rSA2o%dpUA[(j O0LAVYuE8"bQTPF0K$$߳CFfY?),LCyQ J),ŘkmD&*1(+q#VoL ʾHV{D=;lMcT~oɒS.E`H[ AlIAÝd&KpU֬B%JHL)9ǥeU [b5! T@'IiH3C'h:FG&睈x݅ J!e; 0덽Z "R.cUXV0NTX$%'A5$=86)`va9@Bdo㻙"1 qPVZX2K%Ԡ ףڏ9ԺEz;@ Oo <7!`ӽ!ޜd>nl>)}VX; Rp.$LE#I2G\ qZqJ5F@L뭔aϵ+݅Wd]~rfKF50p,HVoۻy5Y.}7:Ѳ1w e;ZV7*3D* D LDL̶LeD^,bT`̲݅c 5D%=# 6ѣ@jA>EDCqSQR A= HR-u뒏jϘA๬N/´՛4:Fc"@?K ܖ5J9*!#>4d8: 2UZY6{߸Sl]v x?Jy좬$*% K4LAX >e=pԪdFJLۛ$#Xf"z6k}Ĕ1$:``I0udÉT)ǂ*$Bn`WfcgO 8]ڜ^̷ħb hzw#tD[{uLnޤlqTn}Pu伻;j '~q̨S.Xm`jxl*KF9X})YNW7' |.S]l T.'~}Jbcʕp%/BzV !H+ӪW ZOotPͧjBKp),랶Y+0]1H>iIA,S?{=Y("*v6׏S+'VFfXyX)DBcGqFGv0L_Kᧅ)!L 7ץc؞%V#iwn5J3 3 %DWTVY%N |{py-8S[$&;vE7-թj T*k wa A c4)0CR7:(~aB`BB^tn@FD8%:wJSQ /ffSK =ٰ\Zܹt3SkCNmVj`fmcjiُ|zㄩo6fU@ .T%s8jC`}3kKQ:`[bӄ֠}iThW, mPxek]Rwޚ+=[qta\k0tS4qF5 :hʈ5w~bufl_TMTĦQɢYS@-\@ӂZA-耱ɕ /RBk7=+9f::fF>],J:`h9lr>.-NCp_°]ĒWeXjHc2F1]*:`4ѱp NbyK"S*ӖT%b%3>+xU+U=C{TU%at'[d!{{*ƾÈ@"F*da ϲ^Y[ϟPYtZjpp9C³InݚP~I̹ ,Q$(͔3Zo86gt8J;XKǀ}]ܰ+xZs\o|T< T*pD`!*,y K)?:\9/m$ɿ"a``'+%G߯II%vKd˖F `[ꪮgjɁ>n* @r#rn<^;#u.IDP.,NFC ۃ:vX:H)S@oR"NxG@|(:Y"԰26Q'Pzv%7siD 0Rr en~š#2Y _)5tÍ}jBJ| ³1ʵG羝Bq3crP;(]rT-]XbGL4MpWQCs3p!(0s@a HO@AmU;cml9G '[2TB oʔE(anhl!m'N͊ۇq: *n<@Ls-sTayS,-+y(dۈ.ߢ "1YiUƓ͡O1/,"F>>ړNZ|t{^- U[Wy^HMpm{X!O,FB0v#—_j w¥cU+9pӍ;Ge$;%0Hݟ7pElXM3f]$QcHnȿmu?vZ^A;V.IJK%'cP)h2K*Ok\\[IDl_NEb)\f` . !IkJGl)mwt1q[2S8;3?n]@Eu;\8+wR֍| /zZNy|F3~t@vdҿmO+plOxŎ<"H^c[{Tu*q Hzdgk-:)$Z}==BF`a2 T!;Sl9EQN>mi+7->_! !o$'P`E궮/sVꬭxRg&{SxwL #v/7թ,'7\dAH' J̩rz2j cxv\`s%URI`)*Wb oƹV.DKʔb B5LuzU྇zDž|uAU@QF0dF{M 3Y5ʌ6 O=)G@3Uy}|k=%$QJ&Ea,y< W84hJ: ѓtQ%**e}*/ieljO!)8Na^?š%!)YӓbL!;eKfELb8U 쿤G;sr Rkuu(< N5>ԢFh% so"~ G2WO p?Z%J]%zXJXni>.8A q '[Ws7g[G쵥~Ogޛ `2`z3oB5㭞ՄÇon\r7tSJ*@ED֒ctY+y=LݢywͿj_&^'i?Ns8-fyqP޵Rz'onc q#@#=f=zx$+~]frUIzV7 븟M!NO&ㆵy2Y@/ R:zh7:ٔ.jw{}BS(,[" =Q^kc Q-4` T{D^I禂|w^]~BF%Fta8"u^^ߍFx}Jſɫܦ&hpǃui)3q˽/>ϻ8'Ϣ~':4ڻf$)>$BAAkG֧ܺy:rŅ6~_%A{&6KTOHo.?INqq D'MZ>VFYt9Ġ|QLBuCWV/{-QVC\ ~qvvuyx tEe7g@.6o*lBPP\"xwB1{4S1}?@vrss5S|OH(YP@ՒϽ[$HCPb`$XfuL'DJeBʳ(N=zkK YW͔9}ܔzTu\r^C MgNaq@b Mb#Dz>sƙojq@3x QZYżpe/䕧Q%Kq)kD,9G;GlS( LB,=3Qq9셴W'\4F ͭ_!`Τ֑ZWGr mT=lhj9CӗVZJݾP2ɜa1nzc :pHܙzit k9ِ 9c3K c%$?uT>0{!ʞՅ6>I H9fuIKRLRK[) ܀`sU@ $ G Xj9DT6<^ hǠ,yaňEFƆ$E yUT._TpV0c=Ey^rk.G33kӉ(LPID[L3iK`zs'lW3Bi28,XK? AŠ3`!(1 j(,KfPln:>*"eӉB2ujDs@1Tk RilYv0ĉ<fi:fl<+1: PYr'0#g%Lfy:aF(x )I* 2I6S2ENQ gP#4O)rK] .SQ9#J{Tt%K#xo< pB(ije Ro0#!YϐkZJ Cr,)W %! %!_'ΐ 0T)fLV{b lf"rK`1K3bl~nOfYgfiYF ҂o_p;($+9B $d&cN^BIJq WI96&^b@ |J*(bf2]g!+YA8J[uԕzGdXb"AmמB* Oq"V#sj%Hm7MGmՓAg=YO I޻еRCJpC Y#@Ǡ;ĭ[%p@x̳ qw59R;uz;6@ -"U8UFUBa(~}J9A;xTA@A{2 dZ"g{)e.sI4  2Z(It8Pd`W0:Y}\UXpYE&$RQJ+X.+ei4 ~#_9nXrjèׇdqqEP@ hE0֝ei b:^icX2$ZoX7:9YH$2#> c<|N/l]r,ԃoZܚ^Zq n"l3{!Tz+8(cDˌl4\8F:2#.SҚOʨp S1pбj(i+1&`t@좪#ÂLsO ]@kirZ4Ľ7qr׉ PH8..RF9Me=i)K+)DzW,"c/KƲ([ԃ`y`t*%su$tB@ P[M[01 %AayTv.ק yaZgMYVˢjnY0I:4:u`dʻ()dC g}{EG+@pgv6g˟IS&ic$)w̏Q<ؙ"~qI+=dwqDңId2n)"d?=֋Nܧq,-TXd3>=ٗmTG g˔AW57~vϋKCC\-etp˝gzi-Hw !wH|x寧uܷqc>Vz;|<]K+׻ٻF,WOݚ&.ĥyjV3N>j+T7 =ac;Ҏ4P=Y* 8߹E3Ē0*3ȊP b x~~>*Q:36!liB|Dq rN\BF*J rQEᚠ:sըO.f!]*m3崂'$x:Co8%sdE\x*Ró-moYY9MPP~o*ˣ;sa &'-HR͍Hɭ[9 d@0Y(%=s 3崅T\eh2nm79!CU]>Kl@? R`mL)!ɼ bU: 4 |Rv? _R68`ܙkX|ogMp6 2*IgX\W0XXRyy-iInJj:,]&j7M]\+Bk!)]d!h\+L]I^3 @+f8k2oY5'K#?lk eXZ\y>IwKL#m-P@hs82וt%/,utbקz`1J:2@:A9."qq6̈ͦEm &(8% ڄJn z%Q*W?Cc] uc 6n4!Yiw0AJ>LޞI 8<!6˱^͕J%'4 %+J21+t@Ix1Qרe3,LӅX T'Y&-ʍsYaLFJτ ps!6)!@@ ST.:06+Ѐg{6eXNkVA{J,$TR-UnHN,>Oa8d Ud1678C;~ ],-N?>2R_qУ: !Ǔ>9*TYG<(Ƭ8E#ukDtﮍL³wRҔ$\E2XJR 7iCV #aC[۳E63n P}9$V4k1ҮS8!u#p$DzY`LZbt!\8C팕 ,eޡd2J7SMҦhMK9EƖ2Gƻe.Rs(("0C`L;Nkĩ츖~:yE<& k=64o)5"DAg ']{9+ESZa`#CY>`d9XȂ5@S]Yց'˵N4ؙ1~nN *l) [R8e>&вek9/1;W-EVnգ<Xc ZgֺP}DW-!+3^^\L>֔!Sf9[ R SsYB GbѲDV+YPQRKc5mi¤:85TFEN ) :Pے,'ud=)h# j_\ʦ6 qi/sz,D aRr7=;mAf8q/Ry'dsBV51Rmo%%g^|yŖnpg!4lJ)@U.Rcwrz2WՒy|_@]uyztUgȼur|xM~?%,I8&4t-/_e=s$?7?O>~Y|af۹~q}}>ϥG}u@WkloB^k#Rik)OFٜ|1?0W =fo0$QM)]sFv' ,ٔxWٳ" a lH ޙGMîQqt8J}HI-Wf 9ż.(uV#dJS\ =IF\osXt`%&ӄimޔkʤDE( K`@˚f[U=:ŏ1muрSLn"6]+5p>T<% f1 ECmΩ}sž+ETq0p$KJG"k8j!.KM8ӒK"|oVs׵PIRXAue 2&A0 Fs&+3Vpσ 0UvBPf\daldw5\-'lZBoW( }nEHXvR -0FSiiRi sJf(شH$52|v6u%Ng!E栣 [ V,@ۤw{匊4χ=p_=ywÔ} hY'Џ֕`LuB沏L[jqZdk|E+VV$ƛP Qkc `"V`PYlZ1|f3|-ё{=2¾^ÌkkurJ JY*l2)ѭ%RYΔzrozQW@q\s=#9'4.K|gu5M(D#;"ϙˠS\/U)$1 ( *#EKXMaELE R\'b_R%Ef4|gN]?bLa-wVՄCCf%1c1IP*GN!."44i*^H J\g(PK_h*cQꠇ+^_&gSYTJ SZ NPL*zt }R:_D E&Ԯ`e ^k$7AR 8|fl|ϧgWRY.C(L&5n=KQB&RAjE%{tӑS*k6J pJ~KZ.;MnRCO&\}Ƅ뷑p=כ^ 1!AN"tl]3JF0A% D)Ee+* LڹG.)d %S|+ n JLpX^D,6^Waƶ)_G04s$gLly'm a 8-׆oA7]d3O3h}#gWј@m,'8T .SSX"$L%PYg!/1\NE/; )xW:MR}IꙶΗ"H6WחuRs}% r%D cbD J,WU͞Z-q^&@M=3YyFYmyI$0q -rDԅ:sL[tK/Pl=YOEb~?:ԧ9yF9]C6Iq|'ԧ"a[nP5u6 _iJB^-&y.AglLF"QBTu$D50F02z_սr=)s>c\G}Quɍ;"هYЎ)^_f8>}O1y3+<xGVJI $ [2-rXu]DRwU7Is+>fvN?IRwH}M(R4WFXN6|B PoE-RJr {߭ն5Cs=xZzK={MվR>oNvםFNIchwJo\9:lRr;6z+s/VnLAj=w;~7Xam> Ŗ#j@7;LڃXy&{5EDL6Ҷ> /+X:9~Hah}HEt9Iÿ%c"n<ڱQCEII/dUDEvVh 324QRR<{Hwơ-U3EAx{V^"h&^s?"Ko.{8#*y 㔿s5ZHEFq.> lK/gRVRup<EU;< j HR=N3ǿJ Ur6^2`{n;! Xԋi:;t~v~-Kޡ r| !Q!knI1ZF)ŔL'([(0Yϥ_'_$=s^aZgV[gFyts]j{zPA1̟|`ek +r `:_z\6!@PL-Kgԥժ4Uhhaz$uV>t>zɦ;B1,ryPa}_;:@p +4LEu[ Xk}GƕnE|T QҰn1D 2C"LAѦLMhI|i|ih0bZ8)TC(jhZ6Xȡv* sT#(vJcިۖ4%^CZM;Uj$XMiq|$4=GAυ:\]_b ߻]r ٙpFNy 9 3V083%nG&\/S).3mixiOZ\ԉg=O 1;ɕTgyFTgzA9ܐ[J{.H6W-:Kju8sss:6VLa'pZ+>pde9Chq 2^~͡Wr^fVuyd5m?<[m_"}wfĩheGTN sF5h'IG۷?딛T"QАdP`j^QD n-A[v#ߟD;(٧rï s[Bt3_OZ. 7l#0WKoN>e%9'ƛrt׺>wt $9Z@uRiej=fwm^HFiAjOTχᅱ+#2)X9W̏bWRJpmwXJ,*FiePb܏tB釗8{y˃Mzmhd=ؔ\WXY&˱,EIjJፙ_`2-VJUpWU݈D~ۏ#JnRj(/vw; +$<*ND(_PV;wtTX! RAZ]~./3pG[a2;wEZ˵Oz_!V-7*ǚwG G>{>~Cnod {2t]X5f/_<+G7L#`dQF6_Tz~4^/5CIu/(Nl?]sωp#E7ZO)Iu3o9\) \B0\2>o~8;:dVBMw1^Cӈ^zoA5CgMVC&U5r&RU/[9ǶsybF+~ztʸkRnG!-x Ι;H-wg@0SƢaU5J9EZ"dsˆQM+u n`7=*YrzmA%IxT.vVN"3/[,-o~O5*A?r^g=8ĨӟKȝp}o./l8Ϋ6ݟȦbZ),/#4!+zjU iP9jt.p2#W8'XG=R%yHD=&BFAk-71Uպ7n΂_Ƀ%GiPÜ o:#Z뼓y_ٖ+ ?/-Uvgbv{kU3>XvVhxEJƝU'~04Hy-5>BvV':aA +mEB? Ŷvg~Ŧ]q[?rhMQ jè5 }$|opev8Oyoq.u]pBU&C%}Cut,_5qoHZ J`W<|xlI}l@Iݓ+Uր) SA`q}jeWrUtmp nHyUojgDUCI><][`kMVÀXe$h9PHR87i@oa`^Ʌ6HBTmԻtiѥn4IMK5 \( { Lɒ.B%>d1 4tH⼪7"CH` Qzyp حZieSYl\-tZtԵ66fjb-ܯpZX+l jz֣^Zi,Ӝ> iĀ~) A-:_MHC^bxI^MVX\>UFwN5V몖ʪ|cfSSL%ejHf ! qA\t Pi H*۶CΪQ_¨V5ރ5&)΢?w2g1FÜ|M( ۍ'jA_ޛsbsBPD`(~Ns4['Zw99KZMZFr.'2o|SmY6EģMzMkضY%Sqh8۩3sXm_ I8X6d@8%7c,7z 33Y 9TYF8F|nQo2Dԝ |yxk<}>4( =*$/ e …,nYhWJ8l HbZ6dԃ-Oiz]]Ai/6Z2K,6 םc ߻/#SPFډQ!cJ1Z'USRQIGC\#>(d^uTm9vϺA#v>IZ1.V@chgg*ɂ`TT'o }7#h0gvh0?IvzCTOOI "iE"zWFbBz -Ԃ+Lm8KVl|Kgȹaw 3ڿzB𹻘Q +Z£F8' {/$ˎA5/Jӂ)ŗZaKEU1̫8,}XKK<s}5S\Og0,kyQR4rY5K B)ݑh:S|%U@nB[aDWU`t=:{QSs&΅,Ź8c]SX7'uOod }#B_V~;cH’ĹRpI$8p0U&Prr! %ձ{ ٧, UY7*&mgbi7AnThDYǝE!7dN@;TFY]kP{XXTX"m,R]^P4@#,5"-_Cm4d>Y 7*Rdbų0h”YSS_W'a0#eܾ:EɥJNh(JHɗ.WTK,qw(<78TBjF:d'Qw n[g-2Lh5| jS3,b}2-r4|FstԶd, KzzU#@gBWp1XU`EUo~h0(` uSgRd7px2oJI7>HNǼ)ș/!֋BPWyt ҄{ZŅ5_s[*eċJMM*gTm `tplWS$yD udquOI(!3J(BQsiNqyhkQb!+;rX *lEѶ>eТ46q> +%`O%q,eY疨O:dT W(bU]^c B"+HՂRht@\sOE*xXm#{Ao<4R#nbq&"pԸt4H2"R00EXn:՝CtWhX c6\ E7TѲiڡaH\rE%sTGF& [5+Q+zmB_:ˁӻzG =;kqZ =ٳ7xq{m ե_ U@9[sO =Jm0(#`KP&*78t? q7?<.xrʆ8 5R9cIS=4t2H',>xFjCc@*Q6ʨZV~h? H'SSVAz2ݛDYR/KAoFaʎ"'`_X AwAGrkzy@q͹e6Q], 8?]2bjj0n:wjN7s}6,YUҢ}שP֍R'MFD*r^mR LF(YG£81\hz_g } {Zf!QpN ;FhyK^Up 7F,=`ƀ^%J1B=kQ|vJ61\LF$HOC||WL;ůon,@d HSҙRNG?b`iODGXz~k ;3m8Ath9 1#5y ›X)TwC_E= B~D\7uQ Op c̆zBů2]g1+g:])Ӵ&Ng30gyd>&d&ecAU0ۙQ5iLCYj2[ox%W/ݢh]%+nD0Gi-U&L"^UVP s}Bۢ Ir$M CD?R@'_ oN`E#~iNiTkfn;z'zK,ȰR)q"삝wl Ma=}ݧ#%`Yf^H%..*IRĿ03uv+Z}l8wh)pU)azm\vPI z96+m,{ t,$ֆk|v9](!lN<萗<?3\(NKϤ/G'_td4JLTF}aEe'_Q4ȝ"/Do<968 zR!< We&kڞ(30 U<rqxRR*cMBI .vRN6 Ҟ7Er QFɄ,2wb^IUn/Ig3{Q tI)%h¢.]vI(++\e^񯐭[S:)<( ƻ7X0c#*$@阍YHؙ: 2ͰQ@Oo)`)AY|WFae@q?5VE{!% {޳A%f]<ȧ86T8dh)34,*ynbyV'E,(t/:37!aw>>|yH +Ij J"ѩ/F!IZq4C=&@b#zW벳$| 2]r5 -I^/uXV2$3&SUfTгxBZvUEQ҅geG HP}CQR;^3o瘝 J#BK0UXnYuc}ZR?/@!wNIhEq:u_aZylߏ Eo?_}tk۞8rZd?{6_1~? ,3{N5%[dWLRђ3d(v]UXB6{_/?73oge4 &V._|k.2wעO*-(`d:#smp5_η7p>+.h,WlY~rfowWt?v-Hm`z_xn@-)7/',iCA0z9yUˣ苬px/eMv*s9/8JE6o͐Ws{-FŃ_$Tkir%r$mjD6ed`LF9UIU06uD3%^O1~?cv }7Gn,<vuS|2,asǪ|<{N4~]Ԙ)^J4mR<& Yh@r)afEOIfۿn,i5am:/rA?&q>ڬem1?՛_>m1X-YLJu&R˅SV4 Lu&.IR0DGc֍9bWWW$dV8#Fj<-ΥVbshmNA;+5ug}$8Q$ɨG;ޕ,ax5rgu[ jYxfdZA0ڇvו5l>)q0wo@Aaͣ+՗(ZU,̗+f[b\o+q՟ ^9^.Vׅ=[pwY^mD;XΧs٫*X.@}CG&oԝ(R}tjRd6EIJ[ЊXnY-pDaoRp-0j r9 76JVMϫׅ1&?`]wrd1dٱˢ"_fş~__/uQd͏ghK+4y~oM)խL{7o86X bF\5 ;K(KI\y/ݴ  ':gXH8srN 3P5;A3)T"![Cy7_UhmعfˋS"77#8Ԩ$-J(q'RAMs'LKfU:[zKp8/y^YUؽ8>@߻_S|lrc^{[Ah֋;og1vJ1đ3"Nbi,~X njzXM.[^TO4avtė΀=s<ר9UL(EJ2iX}SFPHVI 9yᤳvvP8 k܀2q?oo oݼFw< tZh'03fV(N*h0P]$SMTjΝ{"Q@UvcgLf.| 'rL|=)/JAbPc,6~ORgErcC"^3-0ffDCy6[%Bm-0vj}>JJB ˲RHAy+",3_aKSA!4 IB0y3rz*V)8JYs$KPPA:g#X6F Gd4Kн;Fo !m`kFhen%Q IHTƀ%B2 U%(#}YNff}?6&4dW5[޾7 fx6 =l_26"a?z,ǡj\8^u+wS1*5D)(tcTOR{Z+NX4A:2QEu#<$C"z4(Yj˨d\\d1aVtEf.`.+/a4D o)S-g8* (r"9fd/^7ћ\ Ņ\"Ǽv@cq;.[e8Zqhm2 Z[;u@k;ZS;-_td *] u.>;dEӐ,phk/Ѡ_nH@A$8fö{婒n~$Ҝ >[1VSq2:r< id}$t}2xsmFKzw-gkWTg!ӷՃ>AK,e_tdx <2Ǩ$Zͽ1bUC&iRYtnU)0!c$KSeItۅtn uo݌ny:h=h_cJSdvs1%^[!8RW̯(-}֯(jKŜ]!ð('hBY4"KyZޗbƾ)\:݋e3}!p[4YYpD$fp֘G@g폍lV7,J(\Rr SލyD8[7*ǖ% <" Xl6ZűUB>hOu-: *3 C֪dY8aS t rҔjoo~X_: 2!B uABIQ3vvcBh0jȨNewAI qd4EYfE0o?lO>n&WV:]i>k s *Wm1`#b(Zrbq+8:P>X!E 1bE* UNEۻb5~:SFۄ]2CdYKo]b(ǐpQmTo1BW==mCIaM 1TCI Ǚ7/FQ-hy1QZV R! _N|(\ `c#.){C/ q,>EGǑsm+I+|h$l}0 naeG0sld3p8!#]gC1?|op3hY9Outy9 ww1w!'J] |JJp"ٯm3rr¤&ܫ°GxgtFH<D)uP4x$$zpS v%"$2QnJ'abq0_5,4Ar@"@lpc~Ge3w`҆)XU7Y?J\tFՇly #kf{ |Dc(Dz^XX ͦ_6&CɘdƤ2rQڸӹ]eQ@o<~ģ ߃z538i̶͡cLNi|VK됑#Ik o=#1flzF {u43xwT8TeKi]?be=6~"^e\" ӟZrO\=7bT3QɒS LҠ=#y3傃%{> ?XйΘ 淼½/6Z$AIBfi i9+~|dolVZ-&`j9vwE.mY),B!Ip*>}X{8}P\Nth(W^}Ѓ1.E0opԬh'H‚'gZwB>͖@ufj* "┟ZWu}R;dTp7}87{Us') r`"0P b$KKآF2rr?BkoOt@}G@o>Out^&/ϳgg^"E?PEv%K+`9'1!Iqa$)͑aཀྵ$Ùp!GboFӪwu5ѥ&BnEIOqcT;VOf0Ne7FXFmy1ї~?TTmXc RIGWHE༱(ov1h7@ط?(R ?V:h30>v@cOVVOf>M%%g(w@Kh0>jZ@Sˠl:q4ؿ-jGBIܫF :.j6hdBV*&=͂ Z",z0GP^gYE3ؘl+/ eOHZ;#ܠR6s4;ydQv+a?Q)XӔ&ZtKg14aK-0*Uu-`n5K^ #nmQ.YnXx BĤ:E\<3^v)G,Vo}S&Xv;B sZUhCh9Щ 0lmXhEg`eBv؉4mߗcYI(H3\[ѹ񜣣`ܼ{gF*d'[ n2K-9wΎ{"+ч<僚 J"ufw݁"gbpӵG$yYC[fu֘ z}:hA(;'O[.BRxH@baGuY"uǦ@:;Gh'&CbYP_׃ H{74{?Ssmg Q %TJoƼ(HFUyEB$&__56>OqxA;F"H\BYA$5NPcSU dYcblFK&89{-kf<`9Yp &QtJ:LP~4B٠Y:);@[_qM.oM^Hz֑O}#dgzVUOEjTrwf=e5~CkHB}Xl1j28Me#@ya }p`Ǫל]ވCpbL36S9bpH9j騒jtDʱo#)ۃeke|# Ii\-K,7ǯ62\g970@ 67~ G-48e`풲ui[&Yj8ҶC \V/#x r (77D`uѩ'(M`KmO9\#0]aAt'" **HD;D(#ei eE{Hpo>ɫ=ල9(htGw:"pNU:"t)Ɏ^K2֒o-0}btk}T>±\LI;C$GM@tkҁhׂ7 ,ݽޓNW=ѪTP sjKz@SHHJşh@fM?ϧw<8,c<4K֟7~:RfRvE-.mr,1by^$nf։#% t &.EICe<i#Ƞr%)fr]rU!B#G9!9ksrQj]>Ud]iTiN ȓ7_+Ը~rrtLʘި%$ $$$ mಒrT%D{[oD o$ȝ`\`+3fFIF>_$pm>,4&*-41X>r}3Nq5"4շYqL_$̊kMsIkv:3.dP6 *нp!"͸x TJhb;c2N}[irZ{_}!#tL ӆš`Ө l_zj= W? ?N^V^svDžre"}3Vji/z\t իZ~~Oo_/B}Yմ8ȍ:!璶Z6@٦%7wcP D}\: $Z.]OSDkAI3 #E!xY)cEAD !oy^!irsfA}ԏS`i^ @eBUSfZ`Us{o9[ZVٯjRSJa5̗4Amb\]:&:<[ZE}?o vd7X-7M>."~||.8=|ިv'҄cVܥZ6(nhiOpOlt4oOk-3k7ZUK\WziU]__^B?Zr/WikÔ!X &U~=y|k# LR=PYn:e"|7@Js(W$@rrVD[+CΓ4 (JHVGq[6VYo^nx0YJ,v!$U0d T%f#xZ<+\G"YǏ0.Zc!3@UI)͸9ϰ(K9ΥyD"%Et''qcM dc}qЏ2#vYo#JU_(2AkX5/a*$cII%Ii"MLfiY@\yĪ[1z ^BA 2@$,rs,SI<  P(A `+`z y;0 3>J]]ϵ$mhkWXTbM< ,瀚fca_IQj!NDb*$*M"vJ\F=`n=k>Ѧ& '-Xr}j0{B K߇FjF@f&$`BHV&$QiAÙ`lXMr^4$oMw{;.>CyD:6Ǵ;Rg`9 Ahsk\"OK3 U.IKR2}.0vCYn{-MD|޳qqyK+A&27,u.~aK:{lr!)Cbk;NFq-as*^>MCa8Zy{cVp G"5hVEjn1YsoٹC8";V1>RuSeqQO ؗ=b`0Y#N?F!D ,1-Aģy$Y?qU˪adUJkpUB,R%Eط?I`,V}@xʳl4!xAf|Nl**%H* `JP&-0LZUrF#FLZ!4 7zş7 1Ri&$a%,J4OZ`sw2- 3!H&y`ӧۡ h ug *Ai^hia" jAOI yc˙RJXFJ!I#N9\9 H[ҟs^0Z`/Z{c4ȝupi17,aygx\x˅3dca ALvŧ8Qσ^,޲9'[4di1FfF/t.Xj9¹Aɧ_k<;a|ZnhPnb]v6G8:0,{>>LO &tᯁ^pZ`l5Nv~o3 /贚?^@Z{8`dH'.||pwyḪ[k]/. 5_$8.7m#Z{d>˷4I81EZ?=լHޟ8u%m 0󠜶Bhч0ۢRp%g{=ei9sv@PvnyI^3BU,<桟YLc',ӹJxǶCv`Xs֣'k3O=8y0GUQD~{8Il@73\#$ә)1dzs~q!{]N, Mև`lS4 A8R!m7}i;ٴJ@Eҿ x|=OKe7_߭W)wpNBtǀcV"x:1cĀbH!!:#t:0skl7v.,1Ϡy(@0>V GvqK_V8G=0 M2OL{Ĕ=s>,KYΈuK4##@LtҲ"(=d+LPz!)EPq !^^].fF F|޳I$%)TD1rhLҲ a6Y"LTA;d9cX ^,¾i$hu?8"A ę#vu%&LkF adazonoZl׮'a}Qv3F'~0ZNn \u1FOj|p?^]*5~s;+?A?.ʻVկsuHXbf-,;3*2\7PVY:ݫ`ۏ;{/.v؉<]WWbmy?-"7ſOgת3k&wj, Zc r ߗUMKbDҲ]L5ƞ׸M0jU%zi_O Ѽ7_u=MeaY0$J sؘd@S(+e B<b8%eFSJs@R)k|ü~:jq UN}i,6WAl1[k[e^NIM)IoϬd ]oGWi~T{w;lp`CODHʲ(Rġ-2c#5NOwգҰ}ՍVG:Uu8·IʓSͯpuKΕwnh~._6i@3`-u-"qu~NR.ۣڬ-OZg; j4R# 'BM# w(2g%ÄFH/ 0pRB Qc# & XAd"K1 ``ET`DFѯZ.k;Bv#7U4dc-)qN!+'%d BN'<%aGwV^XV} cYYuVL$L+BRp>JWr($ yŠ#{&yd ybʉI‡cqu`jLqV<]RNR%p簚#Ydru 4Sf[;{͌ff>3A (2SJ+635234󒙩cf/(q<k2C4qւ |q&^^ug둤)n[޳wQqpz"TyH\xxhgJ|{F ݑ8JR͹8Tû|Hlbk$5'.I_kekx*HHɄ:!?BӃZ|Y`ZZ]r2Uι$_yqxl1*[t\MBe}ykdFTE6AW?)LCΘiC("|yL`}>HCoaCZͩJ}%'3uԓ=MAZ)!PV{K^rm Ys+VDSVP!g53d@F:+5>#à\&ˑCz3Z@ƠBd).! X=A79~PE#oqM c}7sgFxo и;5]pt|4GLoE$#DI-79뜈 x@TfPè ?HGPhYPXIIȜ426YӚR 'd;DZY>NOoFw*f:Q[ұ^w9Ɠ~bF$ ~kY.O%kX IFSct(Y=I#?zN9h)ΒcQsvHS.%S)*-cgJyvwPjlovV4)ԑd(rsϴŎE]:r TS̄fS"X926D{פ46®PEz KeC5ǟ>yFIǝqԂɎ3ܶ N$Yia}hK9p >r+E` A) ?8o{JAfsgrXju%RÓ0.\qł*q`uR1H04qA^i*9h'*6>n$QBxrDNe/j6)2/miOuJŧ4)h4$6VddQ6 $*]dߟ=찺!7Xa7`ϛm<=ox'sTW< w+1J⾕~n.z˫=?panŧոN bdJ?nPz U,pY?nZݎ'W-Q>N'} W~}ojB `'6YJRen/  ߻F$o٢:ɗK^:D0K#E`Q-)J= ` ޜj*;6%QH132h5F@<0[bv-.+̈́JFNNɨ۟l3[#.&p${Gl qqTڒsZ#9xԩj3Ig&9Gė[>4`8~ǸWׯ^Q)+Ya*WDIrH3 RgnGAզQlgcKF"{K4&ɆK,ĸ.z׫rD4Vpزq3OeTYRXZAlO"Usq'" _iJ'($D9M6kIK@'ҘL'@]@xA 'CYjֲ$D8ᣞdW u!r0.^mmy;zzUaZ*(ShnXvscH(J^i5oD fއm_J4fL 5O1[&sqi⦟>_9L6.y`iI; Zh)\w*jnŠ(2+nYG6 c^"%gۈM AsP qPHhhD5MP4pF:Kb+i]_Ml[8;o @׻ٍ]c,#6\#l2 62Gp"1H-::Ȭ+w"06@x3aԙcȃ3dKB,$MrV#e\֛\$<88_u̓k/Aɲ6u]QG6Kb̰3CA" ~Ix )@DXI+];p$a#:^~( ] n#rT%HR3T[pRU56ҠmʀD ` g߭ V")ے+EtGu{C>-[:`3H)#1R7Db) S|2z##8|U>-t25y`2p~zјp4drz8 ¯Sc\OGZ]~I;n0ދo\qWӭD]u1d@`\ɭ[H[%Iz4)oՕ.  ut.;Τp5=sEEj]5qަŏ4&ޚ$mo}';'|w+V3H#3wGjrt\aڊK #1t 's*$_HOn@nQBQ t.hUh`1g<ڹ5( BnM_l(轛Ly AȵDvp\GKIh$Nph8p j\a)Wp?WOMb-5,۴ ]6ٲCqғ!" ##Cc"Hm:ak X Vi5xGAr7Bꤒuֆ)]JƁ6L4"1 6.XgCovMM1c$V/F(ɃOS+yu[O~7Cm#M M! gJ"2o~;{;1%ҩ{U^A|wrt o~ 6\5ZB AA8/YjQ&Rqe6R; %xE h"-5iژ'tφla+٭w[_ L = Q9!'8)-ے5ݳEA3!;_IqǣO;8XtTn I9 &snC MNLtVHF*BI-]Lٙ) wK52~3>EͪN,%bơ))-.̣H*"'KUM)~u6r`xA$ XFX\)*zj |Y+UrR=l8wUsⴸQu,V Lv ^ AH^Fe+C.n)͐$B3+Y 2q*DWRD!ZkJDɽ􊤤DD.h,.*j"RCVtؚd,%I==!s d$`@MEΕ@& $ٯ õo&7' D ȜPTS*H͂mR׊R,G!42 Nׄy] W&-KЗ/JⳖ|ɸC HP@ Iq2&::$A?k?Hv:F猝 a{_!9zRbN%6O%( Ut#Y)62(r$=t 8a0:\FGoy=5i:?^n3g>҇Y0: }8wcrŸQϸtRҞ)pI-q{K6^[,Gn}Jmo)o Ԗ[?=iRisڹG(94/xP`a?QeI>J=X]|+,X@Ălb.pؾZl^B-V npE/N6Ձ{Aׅ2}uajtu)]ҔE{\Fv29<_z೼S`Z`IYdjOV얔Ӿ6c5o3*n9r1@'y)'/:xP%%9auC/PX;-oh:gWkmq[c;ٓ9FL(×%ԗ_2ރo 8i5QrkF,Z[R.gMR!FCnbkG?ҡPtGh,9&-r ,`EEr떏F:z#7A}\cIR{,3 .]3J]];"Q}39c<<!)Uɖ89[/Zѐ_ؑ]9}xw=О9or\UNn4lo3}3 1ֺ'~ws$9'p쉠N" ~ﺓn Fu.E/ ^9ߺN>BU{|wovb <)`ʌ\g[Ųe+7;ӫ-NSbJ/`]"F{M!2GR\{8 rXn3m`aF B$j~;/@v!e;>{l`[0oL-Ni{~Cog=-:\JL4*cA|2:Y)c}Gc.s3BaQEV8DH!/`l苤*#F% 9|k..I&k$tJq$_E}F >A__픻©1xkISǓϢ?|CBb CD,` K1).bcMI)I7%$J˝Ls˝9;<\[9S,]ӸĆ_>\ ,``A4Ĩ Ԝv8E,m_߭ZMט$|3Qu_{(l4/mj]ZmNl˝mQg @o%<("*IDk^~?n]o],Yf''伏)}-stE[ #H+ߍn"eGh&0U͹51RUHuh4\[2+x_$_ݖQq3"E?%ER")vj%&eK{}tթz(U 1;x}WY@%H/ 1L,^1i ʼn^|&~J(+ &< <=hJi|ӟ0 _kչY˄Vݐ?QZcL[u4`# iF#~r [tJ6 $.^DZtFӼa\7Կw dg^4U-?C3B͕ř_.å - lrv3'sCW>|A].8zڢt#!$ϗxu=ސ.Z47'*;HEf314s9+ESZaJ-E2Z>d*j>xs6:m<-!xp[M3ρf^{\M!@LIQKO8=W\7Ӂ>]|b. K!6`Gs9z;3kRc~;4 OqvoKb`߆W_4 m`[ZK -yY9%3>1 Ւ>aCi N>r @:k۰c̟K_j`3tX#.WpMzGZ|sR/!!jyn TXmȨ&C·f_9=ѺA1剹x596n1cl nnl܃o{-Js5G1B2-9"$fs 1?M)d6JGɝ"qi.y頱I<㼼0ȝ&Y'6iӀ~wўumMF0ZxiFzDzJK:H9h#4_p<_9md9iMi,Nt||ZjL:I;pb< VVpMtR$"G?HW6^p {J+-A^ݼĊ_EoP]'X`l%n'MJP^beKqHZ$sӢix}~OaT*'u-cVI`VRRbn«D3+6YLTؕ,Û&JYt/,X27ݣQtn{ѓj:L"~H)-r5tLpLpbJ֒nr jS9%E7`#a{0it~0E ""-ʣV:i| :G_ʹ^ONOdlXosyMS&}׈;o9-!ą@uӭ<r)KDR <(X\/c xt-xծ[8NpݧPqtQ#!m^XI[: Ia5rw( X/Z6i;"r6J؅\(03D35Qv4xGF5-(:p|Kt;Tm5KW8mW6^on`"4 BˬTKY< \9ZWq/RHhkUj;2>p4!tr<s&\cf-6iS,5z @hC )xB-il<5U,ZMZ,042""<2F*teM &u86~M[{ }5y:&QTMTXi>[{*1#B=M@-$ug>' !U` 6qa MԲ;؞*IWW(]DD=;2$kH<, zd.y"4w"*FliYC.@rA与+X] =m ,aQt{x/~,;ݍ;&dgNh#y:oS`HE3rIgؚQr\?0;i-wպ[:2Ig1JINt;RY(^Q> C[LSɆ"+eqئ|&Y?Zc'm$$9K B$_Xd)$&҉w`+jҳ{i>`3:6@A0PK$z]|XnGBuJ)Qv;/FI:^k Ff Jc2X )6>v!y2Qc7!]6*E-w AӦ;[ugm%11W(@ʹ(Dzrٴ+q"Y:[*,gh 96NG L7x&BIp0JJLSdRgkMI!%R2r3"d-dƵ+WGhYIV:ve&'X(\sEIదDW_[19CKcP4ѿ_~bz,(,gAK*x˻=R[+$nd̲6)xK.u9f<t2$#s<zˮ]~>^oC5O9IN[" m-z.S"攢-T)惵_ tqgkD-K.Q+Jf ݀"ME wgfw!rta뼷F}C45uķ2A3= _/g` .cV,j{`c`.-q:!к- I3ȳq]R 5 EHICa6 RJdI*J %A_ ɠ!a7TwM2یbq~p]_rbn7UvKgC'Nz߮JDÅm|Ps:4wPz(==3? jAl\6c@cR$+'TH/Baɒ 0"gk&ҽ,kU%0b]~Dgm6)j+l|O kKp#ُzېU"F̈c)pbk_ڣ^GV$Y]#iHGZQSp>z|?rWCQRo[Gzj_nݣHT`0_ƣW\l5% r{|Lqtp<-oh? }CW 汌%ۜB΄A V(U"gA*a#x44xK<dU dL{iZM W=R5ߝꎌ.g,gi&Ř#K 1 FI:\&hK(,!$́ZbdsꃟF|Ô?/! b{<][l|x 1Jce-ce߽gHs']ˬycL[u4`# Un;F 0K䊃d9Woc{`?޽5Pxx А5 VD .4XLkv$}[= ZnwtϴsYFX-3]Te8i,OzǦdF7io'uجY>CR9$g z<4C~Blv}٫i{PD'EbyһvZg.zkʴ|z8v P5gQ~ֿdw3]K&?46T鯗Kp T KB~_ᣰ~o6͞>#:OmxۡvЎ7.4bT b0@[+s$ۂBmd\ӻH?;|Fmto0lƁfn%b;156ח=yH}3Wݧ}UHF8'ao*^tR<,u"8v.;YnAa-CoC_-<|V&4ܮ = ZYxi`-Z0J^fjLFX.\^p JII[d(.b k䫖Zy(=b޹54]{E,u_ 1NaD4uP)ͣQa&ȺMo\lI.* t X8%&.JI뫀jDu9`FJ8Qi_-!Gqn:JRCIyD\%4L ۖfsQ5Y֠Sx8޷#ɳ_T!Nup;aԗd*(RJ/{|ZK&HM.@I!2q0 |Ji% \>$}f=;gB+uA/9yy= ' r1.l0 Ds'Q1ʮ zu7UV$F9wᨱuY.C7 ;6ze|DC z^B vO`YgM\))&Κi:kF XʎSgCBL5 \!"-g=.lgT. P2]̎n}6[^[-āxT~G^xJi7JT72*AGvp^}*.fd^^ T  ; (#KEz~z?DAO淔UA !%XLEb:a:sR 2LlܑcyOмh `*Jz=3&ִb9pƅGj؞!K0#):WZAA5w~RB ^dfd!c`5HѺC<*vuayxYkN(&#6/&,,XEGj8 日j.CP:VSე*'X VRzN6+vQmd__[j0RzƄs~@5ȹ:(5h&rr>XPiUS`\ٙʨU#}|C5JQ˾j9W3|):vThu9V`T"jSPC$_|MȜ`FڇKL(.9xoĀ@H5 ΁_G%DsuL7YlJՂ"”עBJ`5UH/bAJ a2t ]ԆPF7uZfQA!qGEקz7? ?l0H; uI I%56^"/i o$oj5">Tb㯟:AOڣoe@(ؙ&@-uDz#g&)jcd5E`Ռ*f^Lk)ЗG=VqgX0&}*i)j =N .+Bvb*H+}U͑*(DvB߻KdpO ȾQRMF) J8O)Eb:*|7w&s``d=\HgyDžj2J3(nf}ㆿXbcs qvyêU*H̬j'$ab~:9^Vg=`(EdrBJ~]1UKSS,n(u],DIqu%QlẅwI 2@dl:3JQPR+u8G?_)Z(#5w4a?L``M֜эSq6R /1Ʈ3BZB$V{ʍӝ$^~@]P$:K©1ڢ5ߌӍowLHh̓y$ҧ=F6!J:H{:滬$b甍δx)HY9C &dag+>[o#ws5U qⅺ7荔0 g.F [Z̓曥 x8#;^R9|hCZ=+`]&X37/l] $"Ԛjt3W]m»I] S,Pũ,WڗIh ڴ!Ksqwq\ t}_RJ:;).Z & P+1 \+a+$ ԉ[%A! D$ݎx(n^.rX5TZ*&lw֥o|/9%.H*Mjf1hwFC4v }ItR}Z2-ÞiyAL7p&+0z5]$SK^.R\{+sq/o1 Qz8k Hsm!VKU\yVbdɩ8F|-pS%eZKJl ='C 6d kk ; *;>ݿE»VXmF—ˣhcHx+133q֙z EtGˁSV#mֈqW95;⣲}V '%a=,CwpT[f)ʰַ۠'2ʉtxm\2gCYMp= ʙ^7}yؘ)9dT{]~iXrRj6)^' Z'gvkvY).qvzPy2"nfl*bpr%.`!|)^Yސq, F.QjDA` z. <+X_|Ԙ`IƊ+8DhZe@U*"{ѢHw?bk c+!5=c1`,|G;ڗ{'_Z}S:SvCp1:x߹ S;eES8 ~/ovw1vW4TܜbclA-nchEM MՅ(t=mޡSEbl((O I0nn@TӠ,p:0~;F7Y8gJ>; .29[^f/Lw Y^h( Yd*lHSUfgXpisvE$Z|/y@FW[>3]ˀntZ&p8=,63 gfX8iNKʹewG!˻^ar^vFAG TNWFD|\P[U{{d3NTQb5-'"2T -FiۛhƄ4@NӄPMN:%wBTt/qzy@1! JܘbYrbfZ)6k̓SkR,Zhm?g*q4Vί\شų*wgi6ȻeΔBlVyBH!krj)5fk-te9iGnRJw!ܫhI>[\QyL0:Q^Ց}>ki `|s xWc:P|A %Ll\БV)o%D-eSmNllKsWtE!`S4V2X?/y@QuWk.^!zZ2fz}-"OrLfߒb=kਝq3lW"k/~0xmM: v 12l ͐XiS΃_E=EJQ/\c/ :JyhЦ$}ѩ MÚ`ug")ɝ7O*$?oMYESGvbʝ3 mۡ#EQ: B[C ie]Rl`:dM*50shVj?N:$OF|*rVLٯk՞gI{8+@-}vf@L<`BP M*$%[ UjblȮw^+>Ѧ0N.`sϿ6F);:&= >n>VݜUM/qUg7 ߏKma Ӕ!.C0VOJuB*]:Ό0uNCI9m:`ÈmnV5}4(IO[MOsi"VA}}eg蓦ΐd-/RT{4]iY N?x %r "⼣Bs.h2,p"jױD%jZ1uC[ݤmcOa&2$:1*3񸂡Zq \G.*IlTi -RY^mem%HNWSs+cU3oյd]k m3%v +HGa\t"MË4 /NlUeQ))yMt?eW+q}@Ip[}З8v+}tgϘA =Y}zOзq֝1xtIߚrF^jK ?<z52w}ODYPR^۝vݤ qQI/Z`䵏[C^R5f,tBf+R'3i,+~u)&H§f67-HwfO.]j -kl R`ړ䖹7gŦ3$/VYj4T+\OL+ڬ6k2Z8ø[XmlnwD^._@y>z3W1 ii]5͋D: 7jw{W0)~ }K,uz0* ڠHDbL5 ‡0mC٣KN,w6q2T-l7cЌH9aӗ} 'whu lvjƳi6vI$_˓(i=ƭ}o@^LKz^zEz6'58. S4?ߛ,o ^|S9Y|ݛ6ۙɠ7L Q8FB2r ( 4k``} {… ǭ߳l ;dF.Lj;'gWIivc`s`FcRN(7cDD48 DS0~4i#4EzTv̯h0eil* *,;z 'uyxY{7N<t ?:n_ؿ诶(aޛ1<16$aswSPa*wf4Je*U(?"Ч%i&I~0|~;zD^!Z2ڇ{2fi"7wGTdMCf;GJT:~|& r0V`|tZPee F BD 7UqPU}$:+BZ }jtP(rbԂsD;둣>JT*( Ks$ñQ6)r:*? @~fwQ-6ބRȗ ĥzo¸7)>Yta^o{tIؿoᣨ__@4UWtgz5}ΎS4_41U޴oՓIYeo bJ߬f^~ڧٺ_ܐTEr/ xjEH ƽ+DR/tʅWGp[YSiaNB< T' Dpc fT w oMVs N+˜,F AiTs!ud9g E4lUYEHˎyks15Z|D럯==:+ -Y `V`nQF3%E&Z!8+QH BRm DgTDl0%S3Hq >x"6,ܨ2-:Ryr=ˢ+OW b +FqI3! gD0i8UtP9 kV\@{<{ $h$FDx:PauHd)-UpEa :\\v2Ion]Id+bdH';e<,{э1p7\Wq!I-Dj&W?~={@#n0~GQvt~8ۋL~滋La "^Z] $ ̨ˋUh}ƚ 6f-0a0)h4C>z LF)$ObҶNqv""HcF-%#C4}1-k/$gƅy:@'RjLc145xŘml:Hq2cpfp )jihjl>2Ɗ-~{ANΚHX`)ЖhC ^3P y0gry `5J$qw/&cM3oʘטyܮĚutnB/xJwĜ2oG 9ko &xHCS,_0qS[:G@q2 e%j]|V)WpV-NgԆn~ ֺM.4=$;Mwmrzo3Yu4bmt|k{O۬Cmx)zkzgIGa-^#EO՚O 3L63T0 %)<:b"'(QbV%ӔS/09%9댊^am W>DH'bk*vdơd|LFZ2&y i^p=xHj; ` 3-"1 X(]X##* nxiJ*N5-¸{, 't|eg479wj6=!1M{f_˛*hYM_ wVS.x3׭rj.']jNc< .yݪ\ A "9vM^)whnM;e'vN~hSSکsJFb-r#4E \IGwnL\0 ,YA>)U5碫f2*50KN 6^ (}!Ia7R))9`vRƘEM(SK!AbR{NI~&,D 0KEW6l5\UiPXPG%#+`BӥL`,]ERi ZU9 e.YVEVq05^r چ  4]ZqŠ ɨ+dt3 1gzڪ<Z,qT٢{" -X PJ8 kL ۑ~ށ9g"O0Yau9k1I:w#;PvʫTݢ1jȣ@u!mE0XE"E&-!k#˶$I.{(JJCrFr.G&̜33<'8M/ e[3$Ԙ|nWUcOxi iE+x! 3$扤%5ZjnwrW%sndC1>i]~!Hsb?L 'Hu6dY)S|(LZhԳi>mz3yUَ֣Oc[nþ2C/xm^S-H6B[ B<$3,-8Jʒf{Y\G`Ey$QMj]қ鄋SՅDv4&R"Д*F+&ѷ؍j!h5j:cɛ!C:DaS)y-FqXc_ Ԓ'% b(n wؼQ+,]!\&Y粹ys{2. ŀCpΘx̛ `SR-Eϓl %Oz[N!j#/CI vzȥt)#;];x9Rsq讻YecIA{AuYkZ,vɞuz@RQB:sXә$0. }à1Ϭj=4.3 s$R$VmEsn~9e,DhԨXW;g g"cҊ%| OHB¾HHjeXDjʕSVbg?5mN|a:wql\ sNJF(!<)MK^*U!.*͑Ti#) P+V"҅8}L}wSl w'pպ#lM1-9땞ym=hꗺyڂh7'~枭u5f`zX?YOyIyEX,:,#c \$ZYP:g V& "zbMXj( !uvdu2Ѷ¢:LqdB }H#glxOp {cjpJi ^S`;6;w1)T 5"⬗3C-gRc˙L)u">b*IC]o>؆LY% >[ȓ. <8whUcZ6ɌoeLI0a*Bd(WHHpCKu Q+&Y)y| Ak&ACT&>ʲ[O-r;M:6)g+;+܌ج!kt*#"}AKa撐m+ : & *6ā XV@S&LP.4w$8;v<tε"?`hBBznB=L"*c0;žt@4˅@*}w EށVW\$ws]lb5>GqC1Ī RD*fyb5(b[p=|mlb^h-pLkvv{NKuZka- T^KL{-]>LoEhWYS0XxeT~ Ɋ. rk^'+Ӎhp ۲Vΐ߯8kg",cHK)UH֥^=΍6A۔i, d5;}X(}sŰ6HwSRڮ}'}w`FM;5oӡcPDPp 4e"VsE˰w")֝qV%n8T&yzxZ4`t*lM'wsʇ ^ۑi]u6)驎Tnp|F8OJ IJ^d,@hBc_%vS x&D?yw&+4 Qd%?pXV&: LsnJ)'[N!#cd ['V :n)0HO rݡჀ\2Lq^rPR1eޯrNb9*%<i(чjhطFuCbSaI)904J]fi+ Ra|\Ѩ?.e[Jy[RNEd8<jh}&xFyR 0{}MKў7Z\qOqZtbc>^jW"{Zɸk@xAꕞ_&zӧq~0~RAg<~ڣZGh+I}NsFo⾢ZtT}}?׶ҖW{4#-lD?Zm[ D'IQE&{My%C ')R9ʆ-4jW6KBn̴'N=m{"3L):$%ϕ(Jh`M`MיBM)hYɞ/ ԟ3 Y % I[DO-GD<˘`f1 f_B "[w[$PFO}hV4-e 1i (`*(yR.RaF | yı}xTy i_ Dm`3M4aFN-P%-4̧֪x)3J>g`ЃSuMW RG^q"Ӥo{9\xlJ;{Y#W!q2!BJ X$?)kZ*, bN2ʫC;{{/*3[H$ ɀuc@0*!RĀu*ۺ""BRO߄mC@H0wئF/ҵ9RPGq~B6: :srJ^O߶d"5Jq8E)e*=rğ}kN\W 74p/cg)-c.LþgW5wk ;_ тn^(in^ndo;e ~/FzyԶv}:7/YJ؉)D3cbtw/E= ?-A}{vk?}w"3H `{܌8Mc ߿ ӹu\A,g_^!˜!uyAv+}D{k/C;?hJ$u9+BM9>`@e(:I C_  XmF/91&1p%`5zeA.K{,v.>Cr.L*ܧJPbV EVpez|(8s:ط e6ۉ|vLpk\8IrfT+&s^ˈ*4;EZ#voب @JRgԶ\,TaoT"];V4PۃD{3[2bX!dyҊPZFK )BY;=EFN UM5Z7B$P#g((\ΞJ+L% w@PGioi06. a/Vc!YDUlq4d"%a`PǶ,JV.bB9H΂R<8ՔRq:}~1E9j2_e !Q0{-0QT8Hk2V՞RRzQT흐ݴbxHoI>oDPдs jYz鏡>ZfLro7VeVӗtپV?R+0rydoRk>Z_MzVFGbb Ha\_i>8z5J̣xݕI2zՠAFX'wY:itvǻ!W%fr  \1R%aGp"'"º6LIIEsS>4ŜgR#fU$iYN)1ZdPV1Ep{+jU?< ;с퓏Z$Lkêoϼt=GICaڝd2> у@QkC!k4\sH$<%%_Ӟ`mK x#*-[|PH0.gSx$W+^ǿÄst4pBSSٜz ȧs:#8BŰS/ҵS9>>Dq4du:_<$>< qRus8n.CJx g@C¸\hE_þޣH Wµi|@PllumC,vDfeD8;azkl2Z䟋S5/:K&+mS]-29#9% @"j`'YFU.aANTWy ȹ(Ņp"1*~zKto<gn'2ʫEb%^9Пf/joxb++*~jSB%={~<}V@sݽל6net8U.R%_ܠ-l;>a`^'Oetan4_"!qWP* =&8QEC$%bImvi\/⶘.k/[jVO}Q.H/b{) N_j_n(| +P%N&|nNoލo7lfP]?NER/ VcF ,13;: ~CM![2?Ǭƹ$_|.0[؍[e=Wv0bq?B'_Ö^q3Fs;4-ٻ6r,Wc_yf=;y z3/=,VHjI,/YJ%eQUEV$Rss~ _99rg_&ʄ3 IJ85!lHs0ǡFz3S 6>F3ƞ]P}'kB2U#0"]6mzf9D搡Dzl0Y,DFE۫Y 2[H1*knmnզ<+ȧi  Zjvgӑ*L lrYɜ6{i~K* :S֏+M`zcC\/}æ2 IK'x}gg'|J]x@LM5Np%ۺz4Q>I?5w;I YsxFH j`|P3%ׇ԰>!&|p'/H1NL!F IS 9C5eoK!ɭ,IZl"bNA(Rg,'zIX*5/@SҋX LRv Gon¡OuZcBܗ҂/8-R :aȈBy&!S3d يƚYOlȹf9ә^y8f1pE*h'ڦ6gQhyPƥ*A.vm0Ѕ'ԗ%dnG˻ۇ螫} kGBCOF@Lah:0kQڪ\9ZlS%jX0}ƴakuHOs&3s5c[=C6B:k7݅ qq,QVL`D2T;hlp񭶥nbn$ԍ78S,2{vw?WcJu#7TJg ǓB [c9*@5ШTbhMMNƥ:UP'ZߴAߧxҙ9/,Q@aY5oa<(J82sb=7SAʼn8cOGp0Uk3qҬ}AJA3.1uHy@c͙)aKA< QU nD?xE5ШƄbai{9q o$IUx7!>vVxͅk!d6C%;0ŽI*YYDTKr{}D@sZӀx=9_8 JutQ#9"ҹO~w([:ٱ)Q,zY a뤱XKuZQ C}:m=7I3" A@%TȘ.+Вak ĨUH.;/;ODdP#$3]68Lv DcVϧ Ⱦ&v\m}'$eO0cᲷiSd[)bo%Č(ptm%]ŘUCq_J0E%\)N7 (E{pIK>m1cE4< 4#}g 1C 0Ig+z( ǢB%-*{Ɓ *Ⱦ ''.13}>+蹖d؟3z-BXCܐV?(V)9ʃlqbRdGS?INtQ!k|etn*DJǵ}DK1CKTLq$9cKZ9) k̉!ɞZo69Rd)&|TƉ8LSȢ* uZv.0rO b 8&f1 '¬r±~I ok̊!ثE,ĄRJLcψyq*PlqqS=bS'd8v3<ާdHB<b1Iuu]xk=Td17H8VJ­W{4>$S콺|Y鯦EX|RmojfhD\=kI.g⦴|pxLOync5ϭszּ5@">%NPn sR0}V犵OvPEX$/٣?%V;^V݌ GYғ@7M)zA M6HijǕ -J"7/"}/NF6/ s'0@7:0j'zIkpxAWMm罁ѸVRb|D6kA!$@$A-fE:NekxM?Nn_S>3}dO HSlGO qLÔNL0J鷶o0QAP=AAe K[N8omkFM)h a ymz5Ӳcq&P4N.&j/n5%=mƙţ) *oXM]i>` :<~2-66"ۺ>ڞ@ 9m\'8"XP@9,m&TFנ6#ޞ)~ʙ.88ǒG~MzMiPBʜٗQfo/jr7wsss.wo߯OsgpPa2j)Έ((2I  ,`?ǥs9fŏ#٭ K.wVOVo&eױ|Ien@5~r/7}̽[kG+:'4RŇo> ?o`|wP 7zh0\:C4Cikw~^==dQ-o8O7xdťj7ogkGvf~3o@?V!ʮA@#ƙKa9`LI&L(d. gD1A"LO2~^Ϳݨƒ}d6ٯC'fļm1NykQ~Qgr{Bn΁RHʝCUh[k)uJ߂Kuem\+B~׻MPm z+GrһGTi盪hGfwgu2nO-rL;qe ɕM{~9'?E ?G^D㌐ :Ig('JDB9^un3)f+i]۟j2f>&b@"wAdɊǴ_*fήo cH-ki{Q 9oCgqXsZܯ||ǐi%N扲oS2M.k)w)ߐ%g31DO Ɛ"M儣[[/#4U\soAji̘k>8VrAI=dqc];|"x &R b>(d{i!{*5o*޷W'$ᤏ#{0\N8/iᗬ;/9qϗq"_Y8]-2ix["Z5_6WL'h>jܘA6p?4Wo~zx{ޯXWy|Z;eP.>H6)|bw1W)x>V?,o!nUlmw??9-7p]o-nw" n$)o}ۋ< K3$(Z*yS>^@Zo黳 l2 űҳv8rTmޙD_8xn|%:1Sq6?΋'݌W "W$aTB1prA&8QfX>GŻFy2Eɿ a !L2] eLh H_ډHd!2Έ(V!ks job0ޓX=ur+;rSzKHA! ibXKSZ&fƅFsU yv\ҡL8|%'J8!ڻCNXyv;$~aaփSP ʜ'|e)oh,4;Ó:~im @އٽgFk`6&BIPB\b 5\*܂\CEMֹ@&mx9 Ksʹ)A IEl.k#ܛ2!rDa#V?x$mT.!eP6`Þ`\ʜ W, b!Q #һ_T+ŎKꗻ#a"/aG\F8%4Z1.v'Ph('h.-fHT\##ޑ';kPY5<)9$"`}H&p^?R,'Ȉs3e 3btN &vSo!@(m>;P{Ӕy$=I: 2cȁ$Bcu'" e=6<%yH$%Y^QLZ)"bUq*AoTh[깧L\YqYHVH\P:J&Z$M,!ʖDK"aIBj=ܫ[D*k\1j`|! .wTl!Oy6kxr:fv5Λ7  _~Y}y&N0%83_m6xɣk\sr'''ɻձ1fX␥ 㨂'b4PUw| .?{zF8`-x/}NFu/W։o$: }ۑK{jJ趼c .d8Z:+ZaqYk.iuVn5C;mJeY"d>uJX6'Sdɘ\LͰi~`ˇHǿjaka]x;`-9 %oSV~EMe+0.NJ#HSP.Oj uzEO/K),FihoHp+2){Ƴ1o`b}a6ߗ;EFڎ'v+>{̃zYl}~~,Y8|8k3+/7vPBOƵփFm#V+i*Wv-gzVW`q:_ +vX4զLegi?;{iW:odɃO18l#P{yt3TW4]˸{|[dhζfxޮ6g+\,Wi"mĖ@|5sPNKt"CV-qbovb }$~ G#!ӌ${hp_vr)2R A/1'"qV>Gml^GurL*4Sf5in9.2py:;3JEåZJ f!G:d#n魹f6䒿 *|xD%N7!v}t4kjkp,+kθ'L.]΅ @˩'&X2I}[wbk"Yz}8p'0O8z2D4*WΌY)SyputKeKOІKۃ+{vtZʮFuC +=:}o8)܇a:Gid)L>]" g  D#.Dh(57NQ6'T>w&̀Pmh!rFl^}\Bb4.&H@ ^o_Zszfh8?\6W]iY?%F 4h$ad9Թ,J$gswl4=ch pCz ûא2#A8J& >s"։Eς@}m(] J r8D2r2rm o0IYQᤋ&ד whI"A0\@0b,΀<!ܳ>\$={1SKPObrK9k^RSU8ah_Ecg5hmgnt5L.C| "w˻SzV;\q]|+aXX5;vW{XxfqWZ)`Ji!M, A'̼(( ~~=Q-TݏgEc '%<g+_;Q+ 56ܳibC }9rQ6^J$eͤl))FG Õe  .Eɖ/HAEcb&]Ӂ<~/v!T"fgǢP:al(@'">^*- \ZFKPgf(-2DZޓx#QAUGm^"rvcl@V'!#sQZfZ8N eI `.u)~l%bH/(AsA$@jM ׷=Q=t#[\0˪Ԇҽzƣb*0#_hZ d?9Db52b2r/ksrECBez>-~M%2kS~'x2p{[U8׽^UjV5d2V23)82hD-E]%=4MC ")l8T/jӰƪX:EeB`rDΉ"-V[VA`NGwnUpt:~3tӨ 3]TVM8{_L]#hՌ' ۛn[5՚ЭlCtq"jM7lb6TFV+.@%T2l?y :Ug`[{I:) JBCD&XOd3TizVʶnkI II$\Tĩ hpO%6ckW@a\w}q&@j+l_1&h2݌h 'US!z.f3Q„,/!(rrBZr u(m.OU88=LcgJm ]  R+ԇ чaz[ӺȵEm DrgcdR^_.яཊd:PU&;D y#,% )3Z"ڗw&X%Sqɰ/12J8TEFf t[\h">'AKP K-#sR8 _$~M Hu4RE=*ۧ5QK-?*tKH}R\$oLTD;ҸݣmEI|$95_=/p⡖~O ϔƇƻL/Qszωa_XA\PϖuUBDm%NOyt<:udPDҔ} *|l&!RJM'* oHVrZ}*\ $J Dr` I VQ_I ī m5M*rR/sm\Pz>soQun)n@TȄ&$gC+u ='gO}L}u6Rz$56ՙ43̜+E uxa7I*wYl 'Yzoq}Y&"Ќ ȑYUkRbYoLŽO[IjjէAaΖ@ThDKi)FZ oc^ijN\R+cޅۏU}a6ʾ :'b,H(*ZI% QupU3.Y8!5D TUR'VnCJKZπH# IDX:g5۶l] a L|Q{~CF@%#!IeC%!tSeƿfUG#ʨ\xZt\zVl7 ,H%+H6[IMʏ[: W4cky[IIDoUȬ 1%N)٨M.JNu>* vFr^W !K<%ST ghJ"1%R `͗q ڸg4x#.(9]T$Ym'sQH,VF@k`YUUȞ |E; \@ smy@ b,xH!)iWZZ|F*Uz3ZeDUjRDBD&H킰*R_ÿBy)pX,M9ɐ{cDAkmoTyw +=MzlWԻ[ByB V 'L]6uTye`X|ee8 p#XJi&A)UL~5[z`@ q$aRNuR.GYޜʡl/qB/KvR(P*1DOdTc#>ZZ֯Ǭhlֹ>!+^jqmh8dZhT򷆄67d\@P5ҕQp_.%FSq4lKD9t2iYav);."_5oV7WӪ{7~V÷mCoUEb4]]Os7nN+&C+m) Y yi4KQ1]{?/}te-bmM@/}F= :rڔ. prqQM w8>޷8`Ҳ5*26c}3+[!h_:?`m`ihl cT2o! & Y(&e}Ԁl92#t*gE16~v*km?*6 :hIv,""rdOq/'i.R=4DyHAW u$tWu=z g=zko㏷?A鬛+_}2bp3x, mHЧ$-EFbpY}8`c]]W"ip=CJ"+JuMu=OUuW]@ ݄nuzlu:ӻۓvk' Un/q>x;Bxz~v&;8N3fFiPqf0d_jVhj,RKfiv/`DSAŤIB-3u2Y#\Co9|˪bp.dBBZkp KNF.[x5c}Tw[*;,29C1Y(G]Y㪞= Aoh4Mؿ6 '$YT1+eO[l  d& "o]dJx8&݇T I+_z_˜7LЯKZ:b^?3ă ) "2][ye3$C!+( 7jS.mF&7e>ITr;٠%%M*&7XLHiypYe}FrPA8YoS ކipFcqQ,B,yȊ>&Y#K֤gKMj4פH 5,"i0ȹ"@& $ٯ õMRg& fWIrx9I M?4,^eXVĭ$wΦb<WNܻ*1L*:mvAP7A 1ǮM%JFJڨ$*]L16%WE! @χx =OSL_LE;=ln?=8G@R,THa#[.[E7☕`!.A:ߔ2?љ,vj==Nۦca]`{Z3{U&h8Ղ[EQ4'R[4ʆnŇzNWUFtNW[:7Qts:p+R*E&d==}(@s1@'y)'/:xP%%9auA/8S`iƮ{Ѿɽ7`.>KV{o 8(@9ƵKL#JJ.P)&!FC1nFuC/0$+ "mƃYrLZ"Xj-J%%=t\,'")&L_Ȳ kS3T{2:2]0} A_htla^_m#l$ |\K*'zjw%I3emIf6|ȤF FGQK/&*iL>\ק6K.1C J99!Acѡr2OX)ncF Є[#,J. icM\s}%ٰ\<}P3ntfs6kVڜg̣+?49tմfݠLF)_\ҬyfmF6B\}O]!01ϱ_np;S/EvEh6jF[`zazO-%m?A$F2s/57狑_>w+a}s%c4$1뾦~ I\4EwkJ?<9`y*/_>WNzs櫛U Z0?;{5k$'0J{\nq7n8vnU7qh\qoKmě70M?|P_M:XEܢGԏpwfn61Heѩ[ZXOsZo7J5`2ՠC;}=7YmvԐs%㙽 ͖C0gÌ*T<>v-f:;ƚ!r3~e}u]WM[Se{R9]}NxfrJt`lֵHd{P2Ӿ*!Bry=+=VOK?!LH2bT ]dF`J"HG- 3b ԃ࿮hz0y/ O ڙJ﷏-I"Ζ .!c(tP a V"&Eľob7Mlm;Ho?,9+K6p9/rsᑥ%.[l `Lؗ,8ԣjuyv-7N 7rRt31Jؘd\X)i!ԣb a3hF1/eiA+-p}}<5jy<.zDY "3V{V1 Y ~$hB~'C96UUUNI0\)c7D3Hm3(C:{h g=~B}e^pɲCp0*ũ烹V7qnˇ|8۟~jvIK.f=^]K?[#oNoS/)Ks_I'LYV1WRB/ NuϤSoNW_<;-)SgIo~Dly.[) Ey̥F/f4HM!os{<+2Oczȕ_iiƼ 8gg8gg_v1+%E$[l]Ȏn9yqjȏU_uWЕ/܅c0lƏ t~S4+retBdWWY߆%`iZVVfS<{вVW􃚽{.a}K@wW\q D}7fFя?n>QoF ߤ-@׌mk)'Kxowwf'xb,ۮmU ͋f~0o+G{5`֮vAYS\85,/tu$Va)V"o34+Y3fK8S,ìn-k2ª{/<\d(Ѡd|/_P|L7. ہS aJ%`!F\Ld3ZJL')! 6Xeo"dZ2WU'j^ =6)kxT$*FN)U4aG#gE5M4Y>N6*;J5[" T \K= TN50 .sw/Zgkd1]]|m@) (s$*)A$Gr3*@.L[Й "9]yH5iiMqL QyAuljԫ (Ў0K0ev!J'c=N<;`:#bBL/aR9|JNCK!Mi6 h~ˡtZ1+^U}Խ$@YFQc;=l?yu0$*d%!z"wzTvAk3! u|/iN G'2J`V&eĠLFMi\,|Y; 1.҂`ؾ`gb*C Rʹ>I8{ym},{ E]+9H"#j1[ v:uG= v7mR@E6j>d$}mF mF~⮠wҘY`'JUAH0J*9~#u(J9QHSIydYXZ6~YtCmp>ze );^NeiK]Am6d8S* #mxpg؛/3AމXf_.6}'M+*xIbHF /5.dah5ꐞq<&qȕI|C1-N"=4 T.-L8!D)"Kx,i녵VB]V7ФcXv;V^{?){ ZAJPs:"pnΞ[{:=t T\bZ㩈4/>) t)0Gq]C "1C5ߝ{E]J!RΩp 1P s]q5DDYџ 璂ߝCA-q(⽀Hj| eSS􉵌s`*R8)^+g!hJS`JW5!ĝ_f}˴9:UF`<[ ,}u/ia 4qRq%AצM^XjgXMԱNX!)YELiZ6^['t 0%.s+V9mֳ {7<ڽVH&ծlCg_(=6 7zl, &N@Tyl+?>c]OoRAɞE.5w@E)pͲcV:: !eyH%9ZZh)?=ePV*j2qnAV@ 25% .dF~s^+7޳-X^jw  [U%[~#T({fӳ"[;[$ӆ Yb,yV  Fvz6_<}v ;9^ 5#J$.l*LX! ۠T/ Woǐwm^XʡFCjAGaގpaq—4$2A5"͒ !z+@%82<̪~.)-(Nƃ|v)98c9FrgYٜxT"XG(ΞV$:gր\,&f0Y&W?$Y@wQrZ;XO=DtjDy8Aq)&2xyrIRi$3!RʴKipLfвzj1T Dt1Dw"(y@Y۲}Ֆ-ӵ8"X **l)Y:D57҆{CR2T{ўA 'L$UPD^τs`V;4SF4T֥g@0 LSI:WovBT[Vk V봧B(Sa }f_%7_}nZԯަ $l7`Pvbir{}/3ݖ5Yv# ӂ}) lg bWXKŷcgYk`%5Y& d,XJnP\%kZ⿖P Z⿲BsVg FkC,{ɬFKT чf -5 |%PA kh*zhZZ0 ʡvkZ` |CnaRE737~q3 3&-%naӸfn(H/n#'2 nA YEѼhf )oVKi7 9nf}MoZ%]LWO~k֚Y2 3fKS,oNz {᲌'.hPB_˗/>oWL DY-#PN5BEAhkso|\#JjެI&!t®dUvgm](w!7#@ptd;mGʥ9_}MsP;LM]]V7f= $x )BCVoN0 U%*+w~%|qF$;9Ǒ2'ZB0^:Y!UeVĮoWW!|y&z*6T#c$LT7%# Թf.wNzz?Dmt:w1^j.%; 2vN-t.52M|N8.ϗ%+T=9R'ţ~&&u2Q|2K;^GӖdtpl;[ez7TEێ{? 'hǥCb@"G\wπ=:QIy!]NSrw.$tҊVȪhŮb5r@$LDF+YcpNɤEV+Szt)la :hd?M~ϴNbJ8]} vEUugOj`?!Vxx#H#Q|TX3A-c!Oy& of%x'4٢xQTQm7jyݰ8 .ΈV4Ho &H}|\÷r˝ JpM>Nb'5/M3o>ZFh5=@*@4Vwf;Yz4-p.x"eIJd3@4`*%<ըDHimCD&y>t;|:x%21NR<5<"<$7+60weq$4]S+XS,٤M#c#nVwW3(S/282.V B&V0OK4AL:fŸјD^zꞈl h2lcU˳e٥{꒏'1jsuk6Z$w8J5}kWmJRfQnlv ςa1ŭJ C]6 h *H2|UBG^ /8'|#255$4% G2 .xͲJn-} m" 5Nuٷ*'}f,dkq"ꫬ6TXKF.4R. 7ʁ8'H"XJ+ Zܤ*ܓ 律g @lRj͒#:[7|\pgdlHQST,9ך8olY*|l_ń+` |c>V $nF@/HJZ4h g &Ef*8AgVUKum(p i%Nl.EXB(]J70xɏXs /{{s0ț1\~UU Qx&Wk>W lB)AchrEb!y^6FT,CZv|2t{"ג O Sx/M BbS چe>yK@Pp VU*Tkx=$DcWs! x !l%9*Iq}DO6x:FQ.9Sr⸷d#.{/]V0V\J;<-&;p EK `] )YZbU0k1Dc EmgF! ܮ-p}`]V"-!͌\ohCVi"ċ>dZWUx },cp6*Ψz.Q.gdӜCdG>]ˢjtggh6yψ>AҷEY:%ezt>,FX *|@ V\gF ᪠m}쬹 c:h3-oYj|˱YEn@9b(Iɨ&Fy!z ^=>}!>k{a4h %R3$6fAZu0,J^jpojX"GJM@I‡W*D ]W/Eȵ嗳څg4FR8%Ae@QGv¥w@{ ={~|5|mfWpσ""҉EςJEJ.AFQH$*G]q<ٕSMhL]P ͷ\B@`]JxVfEF(7ogxK <{;>靽^ٶ ݾd4>{w]tE׭q"}sΖ[CQUh7a*.o:`ɗa}VaMYZ8yJ?7,d-ӷ}x ZfgK#2w]FH* dAAR iATTjJj^zBo1WӫˍZ>\_Bejc:.0&׷ 4r9~/hEd벓< ~6orkMkQ ]ۋ'~=uiCO~&*MΔ49dJ-$ !PT! \LlS)>YgNԖNڟƳ; ϣg$Dhӈ4V)f]mra{`RFҞ8υ*楉)4H",W AHHECq-834Zg١/m%%]Y 1n{`ܫȤDҠ YLkE>2Q x-DeLyVd gGy \RAj3I\Đzc ǎ<2P<p0#xbT 0d1hȩW ,@rrBͽR6B$I"\/$/&0pس~aR.,)P չSʱ/`dhAڕ4 `)_聯{` 6B$^OD,c`jb?K~ȗ/Geٚ2@Ldf:ӀFK R_/Y ؏ 휎8f1 k]cĴ3˜`H1shi ȣV>=:#EgQ*L^2MZ 'm%:Nh4czn0$RtXTGHډlf /If[rn[q[-'c 8H^?ܤu˷4IJmDw?M>=C0Yl˸fS–G!\p%c$%9KXRVtn.OTne͍wWϏsN×E?O~(N'|٤h"arA9tw*&\5jSRۉ8FB'~k!o) G-.嗓5-w}I ~rW|e#'|œ8\)F#$jo0COٲ2簤ͨMM*2$xX2Ll>#`&Hqi6 eY,}lv):Vgk{{{o`tw7k#00܀@ !#sJqC`4"*a`r e5o8?r_mZ6/yN6s5N?S}J!u6|b#oRr6@N4_B57:@JWfKGMd:柽7uu8Gr8 9=2c2 Q~U=Zzp n2rIK%{ahA3xN1^RB4dzBȥ4H <2>O_܅Fu˷FQ/;HF[~XٟBaww!qN1NltH=%hcʤ:`A E#*07GAZhi4h*B^ *C&g²3DrfQte$ V.DrΌ2y4 m9~cppq|:$P[պ >(+쉙x'2aWakHVqW[bfG17y ^3+hx/N:oJpJXh1kvϑ@A1ێ,eeP,¢LR*[L>_ (\/TI0{닾q.PI7%)ܓ}ǵ#,% d&Ni_Z8 Np`\v .4DTzd6 ` 'GCѣ_dCpuκL/4jHsqXz*Rusb)`[ fҗt{^ñv?^%y&t],۽ +_Oʺf.lٖ YM`)uR86.6(%2U)OruStc]7U Fz nK4үS(7~#F7TD"Iy[C#liIZf דӥ'6iuːxUwBPvӒ.&gWKqowpwwDkLU|X%c9 >6rHߦs08gJFxH|&`DXed, l"G~Z+Rӆ|rqPp9m78It'{XgoecsCʚrޥoSuQ80L!ě}>:0*H"i`X $3%eFepM%72¹Q5;!uqS9S\&VRYYȸ);NQU=`stwX] 0M_Kl_kM !G"M9at‘L H ?{W$7_c~[}O1?t>uy*IL%%RJ1%I @?([F#!L1CbU#CRTk!Tq´@ 11ȍlV'm> Ēa %}V,,U0hW`:6T\8H[ds[0l5̘Lee9$k/;j 4?fz;1쑷[^՗CnK2nR mKJ_B1&i[< }G;E6Jߊ~j႘9"vG3[JlfTщ`22YŤbFPx1mX@@ŀY*-$>Qh2FI]%3 =oڥ:ëRY~5,5%^V""jq:NNcQ^5pR 2Ў/a3޷9ad9Z*>{L398X6WkBJ1vݨ}#2vG:@wf闺1l^vuv!pZŸslsy\!LJ"M_ur\nzc?U /$QA9= 4]:k97iy039or̞`[3R``x،`W~e2ߜ-')V!=]#Gdܘ2cN8.qz1!Ј[XtnWjo7"! 8(ٽ7ʌYII"IjEI@ϸ$uEF`P>5JY4ڤջTB ^[xGN9J(;S;t0J!ҰYs1&b2\rg ~@,sR7qɽ\M1]3wG5/I%f-YEȹцcb*ӡ\8YM٥XEϸ|E M2lޙbYUb4Bnrjp j! 7-^,E ]{=c xqs}u6[Av"%h#wnl'6a"V=ڻx$Wؿ&r'8 '@TnU¿?Ķ1VfMй2ZS6hlaŘ%o:(BO^?> *bX aj;IΩh)i54O?|#o'h~I~X88AKHgg=ɓw/Z @8&mx/Ԯ^s1Pq%Pz;̊q4݆70a)Huϛs;$jnw"(Ŭ"ܴ왹$̬RײJiNF4<`w6ݏ',(!iEP!|<:2w $U4qZJ|}P5o5go$i k]M:Wr}[^g5_˷n~%b+& G}a!daj(iz:dӇ7oU$^7NYn;̂qzw"(~Xem{'YSjskbR~ NkaۆX.*+M*8ђ׶j؈e/Mez܃F@ga9YV8y]T`WրqXWHȄ!lpiVrj7g5.,ƺ.D߿[02(٤j|];/_?nήFF{E~ G O2U)S.zC7E!|8j#uEwu`$p xHd>:'G97ܫ>eJdQb ߣ:ҕk|mOvZP{@yNsA'tkfz1`j/By#lAZwNZn ~Cѹڵ}W;_K^EeML~x+Sml]p[8YcK*8YW YU wPI#(w 㤪/@/Hxe(z4LAuY7-Sa}}Ks/RLƛ hGqm5UW~uӋeTײMSZ֍#;"uk[PCALCTp4U%ruۆv\E*lܷMWN55(yqUHKwO jXDz}'X0Uqgz08"&Uh$BynZDw tW:.W,_o?H c}KTc-'QhsjZk0jݓ/b,Bbi/<5S3ۄuY!꽑P5H3ؐw"I03I >P~NTڍ2x]<fƈǍ% 4b]#s#XRQjNCN^}m䓩=b898$n.4tmjUZlK5'7JT@ /cgJ YT\@3o0Zfrja)8x Dgla_u\,L + r|wL TPD6MSwV[bviV,G\#mQ3 %fL.LJT@QQ1TbHy FSF_A5O֎(H "@;%CZ Oгc2H" 0*g3er"vOئT`U2~RgN֯*zg)ƢNoUWc6[۾403S&T|J'&s6 0fXKsܘ<ޝ+6p/s}=W;TPg2m_/7s=I kʮzˍcsҢ1`.Ę\'\PVSF D9T9|!j7V ')e$s݃ cZ\Li'fI p1sZ&z?SKbwuR4_ Aoƭl'4uNaB%VDAƄW\Ove$o~^ Y7.MBv)+!+Ew_pmVߪg>E0)HNIrs֛bG]g /sw'SdR-L4w"W,3j&IExU%`.f_a:u4ιhy 2 OL1̄4ASawN]&vG zT~άR7|3?yUOIL3: 0CCsnxŘ ͺIqx. ZV>fY5eɭRk,ÉȸiM 0lm1 綘6<¯ݻsQmo}Bىf:tJkd1$=vyk?hmXx|6CPK@sLl8 cEk;IKcbNl&,Pi!PPi쉎hG$0m+. 1cr fHm ?P9hٮoimdc"?i3?a%sʚJ:09Q&l &Uo8z)+d,IkJ2jGFv8[Ų2A̕C]m$FQU})1a&6֌`K`cgD'3G^|jer'ɟO+=oul@XkI?JzK: ^>.sIkY!`rp^X623TxTquGe<z Ub=X)1,qSy$l8HnboBr/3dsnݭqyl8[tsh)['S Z9P`Bӌ+IZ X%~U$0ڸLCxUmg;gLhkNh7fބB@0'RtFSŰ6n#K!>^'Ý6}};7uuigOP{\.APV?+k`wFOn3WP}05t28%SP3\*zQhg vnWnVuqV̋W5!_ZٮzSwE լ_]V?[c3[P_iaf H`ח =)lcY_fF"}~H>_ٺMy^Ұ~iؓhmWo5Lz꿖Z'=g S7TMQ׸Ⱦ3U1R߁G['f"[jMDCH)LYJB`dsn)Xj|_SX~|鮅4_vәSrx0y뤸)ez͋R;@WObqTFS2:ۖfY)'Y,~[,m6~3m~lwmgPZM\}E^?'Sfed.,l?gω_}n!u 0n]꧅%l]|ʪ{Cg7 _U>EO`j\r7Ypk7˭_.4lnn}PoI|-d軫~pia͆gaw-]Ua?r>_. }MB?X oqWmӶnPXl; %?nvm<2x%ݎVÆ̩RbH f.&2"PPWc(3qfuLX?`b*;S~Za*)e2j堠%ȸA&,YF{v0 &}D 97`7-2I&`*#c\)b{7r 1}=ȅA^ݳ!`W! #6C<չHV7 ``Vb *1NTX$%$'A-9o.8˩(`gY!BqSLZi C+*q^(m\5΀5wsWߔiJ t3k~+6wL7mz(i+^܄1*ٽ 2VX)(R*ugW p5žGQLP n*CTRk}[hbP]-}b|%6@Pެŀͽ- Ĵ`Z1)VB1瀾Hì#5k-'ů^zo9Dԙ1A@eZ(k ǽv0j[Odet&smiqmK{0#+9<շ3[DcM!,@NҵʬSB\-k5FC_(HڪuK:}Ǵij^$q .*dT \ $֦n *=imhVD$DO+#'q.F &vS+@88 }aBmٲU #a̸=mK.U/L8~<ϕጣ,6,@!J鍲$/,cq;V8I[-rsr#*xa/C~AGD;'g%£wPbkK+[U`LK\C#!Py.*0l"TR|at>i:+/2R^Uԡ,x1)>.|ME(7qe- aIfJg4k0uMRG4X֟BG1ar|ST^Po"s$r 9c}D P )I+y6Dt3yoa)'+JR2m kúkq3#7( w_'u._=AQ;`ڡuy)6*x;|rP0 ҆)>.{UqU7j>ZJJt 5 |yz6k< kz2A+15wҮZ蔡#Xo,@2w H\˿3I5d!ܿ&9qē΋п;1}ΉL!ʬD'HYFŢ*Cpu6{+$ FPg/zҳhy .6x]zAU3Esx[M!@z9ǁ$CFb2݈mtkQ y3 Źg\lmoOf|.H}|V{Fzty1V_r3V=`H+XIp$=ah]{]1voH1x,C#xH*M$퉄muh1DLJdo2q%^}`|AUGop"`6^)9ߦ@&TMr ;YJKyp\%\b Q\AD{uY87(A1J V_L$8xBfIoRDhImE,Kh-{zD?L•ٙN VqH-B)EXcM)쵮9!Ik轢G0"9sF0y=4(i4r#Zo )RGBgh!cu[οy'tmS%U$NW;;GlfiŌVꣻ Vh ߯cQvTal>] X" ϐܟJ 4U*YpA? 5D ۃ9 /.QëO q>J ۃ%6^wNw$*()2MV!|&a{d{cJl!e+k#0_d.E%ԀyM TˑI$޴P%llr ֺ)`~7BxbݯFDh_D"*=] #zQ:GFJ$6|rsKD$=8VěN,$ Pm2}jRt}~{!SyaU iB_B3( eT V)ݑg[\G(q6xB53| |fBf8BA0E8T 'AX{[=:Fu6|O0QGMWlޒO#`%|Xz$i(][\Zkr\=x\@iSDD$u@{^W0G7uK@;rDqrG-uyyyt7wH^(L}[B2蒓PE+).N%}˄}Up}]nWw=nHr*+ w]侴Dux퍥&9Pe٦lZ/M˥pR(2:% M,(QU&MaAX$FHNo5M yx]?`(k4\}Wl4w&h7dr`6 FPERKcv"x7:f. .n\e7?~X^6g뙅)JoVtw}}}vkJm7/+by= S41 ̳El\߫/w&,ٛR40jfygA]ű.ʷs=-_ގoìۅ=}0G-@-gø8C,r{^(fS{Vֳw~pϣr/FڲM5i*Mx1>/_6܁(3.f,_^gq1(893aa ':pW ř"I9D \y>x A/daxo gcQ=H5.42 V,bšn.6'A -Z\h ;}[#/¨. &kt}vq) TFWSР`fiz?|BLlInf_X3T~yU؎*`_/@7eyW\^\ ,^/T/`P\؁212mziUm s[;(nԹ<Nٴʬ&]NXҟ>-1*Ğ`{+-y/1ߗӞgs^6vk#fiaeԛ,|H.83/h1cHZ[hd&6S0UM:mHŇS*^ DB+y/X\F 3#Xhő$$F:j >SD9}!"NSMtj.T: XLJMyǛʰyȮq")[{*,Rag.Iv;Ϥ?BQJ?)~G=Ok' vث"i}Y-»;yt6* \,d٫m5Ɛ>.?6ەt2וy l|^wWg|hDT$5SnLD!DdթZh}gAv:&R4E\6qWy.w'Jy4%%x}*fᥑ׍h $ZhDMƴK!gNFA)&LOe"tWZzǂ^>Y.A^h(Ǝk}d0x)(1p>>oizw}\j Eyȥ He:(,%",FR4K+Whۈ R&" OnqȇfϺY^ ^mkǩm>گɠEo"n{ 1̾GLT[׾ PU6*nw̃{Aw˻6 wc<@Km@K✠\_9T߫&_ 㕧BcaZRŇlΆ$fbcL} X{Ea}4;r{Dؠsa+`Z, 5>b |GʍDJU5\<pN }Bd k)&"HГ>Sg/צ#5>0lc#NvOb."L۫\~uS'e9Z ,D !'?'^)=WA/@fWů/>,Qe ]F2k/c#I'6NYFHX B;X $ʯ>)5zPw9#^Fv}c@+8ŚDbL \8C 媈YNWjJq>þ|Oi/@/,GWg셱X e0{SZ7uZنmUE3Lٯs4=Di+BB% Xs4;Oi[*Pjzm3P# i(ݫׂC e( ޲!P*} ezuA0']cDOY8clH!y,ƁD.UHGFBnO,{a.;#"s^|q$yn)IPDiG*c42:ci.nVF]ucH!y.XƢ?OeCcdS}P@jjKc Pؾ6 D1eT!ל#5c+D: ~6OA0 QBZ?wOAH!Ur LKκsD zIj}g׈ݻb'ТbwQ}J PjE|Hn<_kETS#Jf<2nșTE`I\c١%uҨm$| XC`VB}*_ Fѩds/)Eeʐ*2eP8ZhԲ1rM֘2sX6mFM>ݼ l1isH""4G"j߬])aUD()YИFYo}CX=hCa# bP;=q*,LDګv}{>zMP!{:ga`2b{בL^1}JczU3(_f3B_!fShFؚb_)$S! ! q%3uHHuhM\ /*N =ܩ+'^8 7ffqn΂6G&:m.~6PP=[5>C j$y l'@%,"W*k :/@huJQ0Lu/vB9\#ڊ_oDFajAg@scIpBxþQG@Obվ փϳ+weU,b:+kg6O1]>t>^>x 1P$`{JqaŇSZWk?W<7:t(Κwy͛CI␅6( Ě`UxB⠽.t1 yؘp!c9aM!'1VU+]Ȑςf"pY*cTi"4a6;'Y|Y^ȝȵz]__ݚRۅeu~UaHqT$5 3ؒ Q:{ōƌ_&F9W29G(#s1װc`r͒ũrcn% $I OOJUG9D$F. SmGc[m(|6 М|֨!hjx7gQ# ."xE?3C"cmu탧j‹@-> < i뺚KWOt bggʫt:zm3C b_kX9~*= }AwXf .w%w-BU3O.1p1ovL+z~B3_~ksS֑eP)޴&z0NGǏ2ݣ%'xz(d=1T?~䏊~f: "[@; WM{/Go{#=Gh+7K*QhuqFh$ϛRσqt^"JZ\йt<*UJT+TRjkr%G#;BH×{K@יEFYekEôDA[ 6Tj R(1J]Zj{}xz9Ro={HQ+9{u|ZGEHpPRc-4P$Ie;>_]VKwnӂ*G;-h(n}M=]V[jRp; )W/;1Yk[V=ZCji&F9.L9mf:fO}͞0;eӊ6{>KT,˔M I&!c"i1hDグ2&e#gv0XuLz$P޺VNIM^9x\j&PPSkֱU}RX@= jtP DԵ6&ڈ{`R= C'pWO*^~pżTk$PFȠ8'S1\:=bDBrr4t E*E:Y` N:b$vfYਲ਼`B߮CugK?|62PoxZ 3`L[ *#ρFضћH'k#]7)u9& )sCNLf3F{MH14iܗmೢ+=](F|tx&M ,C!'BF^ĠMk/=]=]\~/#[5 ϿDrijxPM&0H|*ѰHs^F"#KM*I}BJW=Ow5@xMu>S٩l8Fmc}<^4nC{}~7N҇20^Lj ?v00>kiY3~\T55l~jS1|ʏR/S٤}hws˹Y\n_SRHw(ab`ɸ1'5^żY_JfS.KI1i$>H/EƟ f"]X%ci@̀IbhyH;?\<5;hlubAu~:[5a\AE17XTY_&)=D49^R?2q1ۍΫ3>tWLB@5Zڅl;ZktX2+'8=};&C2ц>˃9 iO}:Zf=Gdp|6Z<#%gpsdQ m22nɲDP;jl,%гCג8ɳ9$&^![] 5x%F͗!_k tlz9?Jb-|L5k˭#N`:ԫt`y|+v/^TqޘRpUc ЭbLaSxDIw<2_WF48;_kD Qq!P;'51d*l̗6X*)m|NZywo7]ooKO޾ϯFѥ܏G3>1\"/wūw㋻vm.MX7`d3ѝ@?]w-Wpq#qMxմq*(CenQdjiEc5\Gܦ8So} h `iu<%(<Sc8_aˌKcqr+Oq㍆l1HwG`}Awo3C Z2ȵ(n<.;^h⩃ %3Dl++;@Aq] seЄq>>ȀCLR7υH?Ҟ Qxk?nDw#̀^# oH8K Y"aN2z'Љ`,Dg1L$YٲCT(ZCNƺ,I zV&Hvz#  C}ڻL&AOke)D!y0 Zz:bQ6e]n)8H=l2KIǨ]D `,|0 >X{$9!G@X !*1s2rMMfly` F1*_:Xj]{X#!d!֘?^yWhWq/"*KVQ,1JՆ4'DÔꑐ7$mNQAxص >'M }C=GpzЄWmwmI4U`,`'؁7p2`6AOID$%oSHl7Kut*?5LQkf 7J)q"f^zupz d-YHB;&GSPOFXX;߱ fH4ap3>9%)FXn-!Cy :1"_ %YZT<`沿 ȅ 2h ArTe!MTX2+*:'8g 8MsSWQc#؏$V=~&93bsJs?~* 釿}_>Cn]aEIoS8JA/x=8:;IT8QpGe7?|NO:"ηDsEռ-ˡ@"M0i&B'd).boޅKޱ.GB!Bk3-zSoyLb%<=Lx7jΝ֯Js՚X388>ޥψ|"hgkMhrB50AEJ%^Fg*mEV%$RJN$EXR(aj} d_qvXmvboz}h[_-PXks=g2 "Hi/i>eLxS3BnS1׃5W!M=zt]ykG۫h=vM{8{lnX뱶*_o@4\$RhQc"x-&X*Yb.ʅ8VO sl1|Q*y-U"4;׋owMC>;ϣp*Zzv]peY'з*88WMb 9dž A IHvc?dei (&dm*^uH=H[@͖-[5t c\ r|I\&[wIgy%ŝ )hh ^bL-mZ5;b#Y$Pn{顰c0&Zj9V9i ֪MN<ZhL!28 !(hhi9GyD*"lJA&P"3ԭb Z6` idKV߹E<)^H^"9@&FjRosi{7={̏Ifc> t.هZhJ` J֚hJkM.CD@B,)̃(4ҹ]=s7DkV#jsނW\`lGi)fT˳&%m-[P67e_G$o0*k?^ۂW-Y^^,!n^qTgu06R&=1M5S-ULT, ,<]-bcP J?<}B H Ix!TęʷhDkJݷ\izF` ,![v$r{&h>R QRoA hW 8}JM|+y<\y:g`:B![gRg~wyeu Km2M[tꏇiίukaL02'-4Mjr; V[8Ċ"IV3gL=Ú74"EtݾS;W*QTA@;>9i1č[R;g| jiRU4'v "-j3;ss1^DS\MU'j)TL`J2-'ncp8jrDN,ݶ+R &^E!&W*Zlچ0y/kJU .obpMlV[ίI1 8$ ob9P0& ۯ׷8 !e"2d"z>c];J{G@b,.éoyh Є`(NΚĮfZK "aiW넡{VI雀ZM S+Ω$ǧ015 \&5_AoΧh H ̑ ;@cndhln z%YsL͡^ &cڶ7#ݺl2 cnkcצ WSes<8ӟkk=Z-#F*AqRV;߈oЧ}=I@zglr5>|:?wZU{P'!Ķ/>9*|{渧w\xc@(ijX6`vI ܊DtKnv|SyH@:$ WW߀1h ?m#Hyq8S&LjXnQ}Hv/#^s.y83hύV^erqm-w[Z;fȦPI -nj TWOBַ^Y=Asс$K|%U .qVr2ްMvA ,Х9JxGͲJ"⫌/GZ=[EB7&[NpOO{*\GYN"oIYWia/CTNp*d䣍.5Yֈ>-F¥9!/J!0T* yӹ}mF= lu(QƒA¶8f]FcmZ<٣V!;,&ZB|PS޶69Z3XS5(Z4yrR6 ~vrUgAC7"C) -ۆ^Y ö.#+p&3p0>1qKոmedcNfmb#k2/޼[Y<=$YM rŹg61nClZ >_vF9a"Tjf `#bl1M47b׉ބHվ S! - cDMu󤳮7r Y fҧHftXDp kٓ޵bg @Lp sC-K=Ddkns5+,HK A+'#mnin>"ta~h t=JFNp[PoxTd\\7ٖ3{p|rirD '3TܡssҦiR\#it/HZ۲$?{+J8:*HQ$2p<)LYh'/gɪ̬-ixPL&"!qACSBFe!bNdn5=} A~Lxtg$M/ܖ=!ͥ6J+TRP@=S(Qa=W4'[λlh6yI sVu4){=מ\wb5&BfMо]VNR;]{vnwo՜:'N]a䩷ij/8vvk{7GrFu˹ץ5]x-Ri{5Bff1s9z^owgWw`nc ?77Ed 0/y `E^X*XeH =l}+}[_ז-YR.YuU<@%-:TZwN?N?91Fh#ۤ؇YS1J(2gqQjs TnK0U{vZSyy^k̻WWvp25T~yNI&jIcνX>:\HF!X2R8Ɋ?tٮ|k<ёW|lU2S\^޶^f`QLw+?{!2o {ٷ{X=߷pdx;xBgkRs Y$[1T[s$ʰ]ڲ&7W{Fwf"iƛ_7pirymGM~4my-Ev"%";p1܋y*-.3Xy8 TjtC3u`<}NRktWw7?S9Oxg䧷gˬt:gn/83U$K,+\[ !Po +xO9W7?7_pF`409I0~w(B/^z0/?հ|W'<8y^]A%;=Y?WdAY[\Yy|`nBPp92|ae9WS?̝#rc{~>Hz{l)hF-%Y4R}RFWtτxEhW1C3(K7KT J3<$Ȉ-+aQ]bX!/:b] ELß,i-Is;xcW]f!bO7>5\+z)D}yF_pЛԊ깍Ʀ0;&8w)sj!6f{;c/=DQ+[6 ,dH$m10v=jMr={ج͛AIL:;,7LA6NAeRL~2OL17oW#B\1vSC5Sz?>2>i jF`Q(ՈDpg !Ý7fQ0ḏ[9'ܚ.b=2SndC֩.7.X#߲$!fVK0aHI%kaz68FAM-B 85[×܀ec Y)V8~Yϙ)i,2g*Ί4Ct Bkֹ4@]43F:8xAO %A6e4$ +#! )hd\rHlpyqdc_s5[nQ{o)d4-'lߢ 4 $9eX)"2+M+dGlƍy9W|E/EPc Lp2* LipI5s៥`& DQcp%cE]WVExf'ӭ:AF'\>_4@ĐYD}evuUGGŨu]H1J4EzP9K!ߛg9[@jȃec=@Z :ی[8Z\?` 6g%8zȈ>ԈL&*} 6hn) 2H Pь)x(`*+r6<=^8n|h_^ӫl6p p9ka@$u4:\G`کc5p4 HmAz~cnoą0X=}1P-hV$7<ݔـCXFgR 1xӹyi۵w0:E5եLrR5:S5W1 p0 jʖ@ ƹlz`hZ5dcX#PhMc1n blb2GcAfިu=!^&u#H ;!G0(\b.Pq Q+xd4Ȩ5P$nx/  %.'͛0n G)Y֋tt䈱pp;rz5[Bj3l*6 4At K -|5:llN%`=Wb)F )4}`~p)$wXP'aZyY:SqAl3 ER+>0d @\qr NoHГࡕlJl٤ぬ3.H @ 7B:8Sr|\g$ޖ> V]!y.췝k]66\WݚsW>S:3'cT1W+(겱)x<%  !+T_1bu<2ĩ (m3TL쳙Aº%B@ wmK\I-9O ^z)Snzjڠ5gń# jt+k opRwb#S(*zi?};;"qA6tF tF ) !aJ?X[,cd:{ebKcAgtjc p g}[YjzBts&>8;pzR\s3g $15y8tRT%B.\3y{a$ V6h$L,9(5H "3YǴAslwpUI&2~\~zaL^48|S-93!$LDQj&bބ sS2;" f MR u,s+_  ڪ$7ax в#{ \4B/^#&SB";rq4ot=9x% n(wl22z6^gǯTyQ騶Cp%ֺ7_y:/A;yU|{sqk]y?v)5Pnw]5CMJӦ`92ga|F&,}͎/T}P S_  *B\ᅰBZ_{iMͱuzb୎~:~:~:~:~:~:~:~:~:~:~:~:Q Y?g'}w^+]okbwuMH<Ė}KY4=;=/1φ.@1U=~S16e͕1z`KWG'}a!xL8girfJkUhkr bȲ\cY6p"-1ښhkOs>)q6I.[{Q~i=v`
vkܝ: i^:o/8!pS8;^ nqBׄIpZ n߻4= y~pflZ 3V{ 4&bI RK]m2h̞w;P19447k$ĬFaKRU(DdGadp& h؊ډ.I}(@טRrrTe4O)m4uvqqy?/:<-o7]Ǫ raɤ Q{yY+H|{tJ18-NA(2Km"ɢԉ|1h.]yGS&aggZr  ^g5> ]Eiy,n=R+r*뇩_BZ J9|JY3rh!=5oMu(!o&h/gA4k_!;asC6M+vp~<(.*c?ǐ>bĚy5[7DȮqEq?9]|OK899h87 ?,=9ۦp<_g?G$%fzc\xY9O[GL}d_??bܴ4p8Wdy]Zh٧CRet(Efq:PnH(?qws7epƐأ& k\,R|XsQOt4 1ZP,{X}s՝9,J"G.1tEAǘ͆iɂ|U5 n۠kugl0 ./0nB'< zҡ4ѓd FU .﷌/A/W~5ּEqIaK qi?r|sv6GaŗҦ0i"q7GhRut1)ks(/_v_7alh9o;a;xvy]j̛>ю-)m;NfUrVk ) D^`&1`n!T/ >Nh]P-Ls#IrLhLg#mCHso1g]LNnm{빀; 4 9$-H4zVh cg7xp2kmv!nT#kfŴE Lmw@K)vFfUr""!rYQsg&4(ɠgT(%*.8*Nqi;U 5򕥫N7j)x^h6m-ij&{e_$K\s/;TQZz0HR%ŵ=R뀱IʚsWdΠjE-Bq3ѣ@uny:~|5FS Fd*Yj0>Rj[m53R*i.xF8t/B_^Zzs-˓bt=^jl"_tؕJvtɆ3qϴ>]':pAI2$\eTU F< y>:L^X[A.aL,;558y=l6T˾X4=( ̂y!+Ӫ EsDYa\@*1!CYa@ Cj 2A( #꼑F:O췥' /[`pFV4f:mm)TɕOm͊]d[擮 ;Pvz{ YR93 "2ܝ uVΓ|#:l|tҖʹ K4#>=Tخwq2ss[jaL'Cdʫyurwۡf+n v{vuYibOmrowBh K`i鄙Yu^cGrk6\59rw$XL1)~#:z3zϓC%Ogh<ɵ!DѦHiSʮrTQ2Af]v"AT`]5hCPGݫ̎umHIDDJt'&ӕ>hI &)_+5x39RAgbo!ix4rG8QH;%XgN ʍ0*F 8xqeQ#,]m볈x!tCyF~iN"7i!V+dh.gF3c0b)oLl֯t8bғ3/ ?P:uR x!:c P5%'![0T$^2l$$+fJHֹ "!Z %x4W4>h-˿FqcT0dՔ9".>[hUaJc`#2%(\*Goe+q")o*HO|q"p>guUm9aeCeܺ©ۻ&((N:Qͳ$| S,Dvno-ʆ\< ç7,F[A&oZ[@͊B@RxU2ɭxpAeFH/%mRC1F")Cr6!>Lk4Q59tNҥIGw4uqR9 Rt𪉞zQ3!2Bdd2ܽͣd!q a6;Ƙ\.ä06W=;-x&q-H`Q5'!Unpk/U]tvb]p1"P&V)U-˜\5ss:i>[R}M|= eр.ꔃq7 'AjJHh]dNspAdWYj/l% %28]LH69tS< ']k~Gߞ]sm廉 WV=̉z`W&2m(x;*R thyIz<9(zUc= %)Bz!VqVaÈ;ȓ `/[!<T/='c]@;֙OH14&: tT^TFBJqiTkV_ʵ.Ua52u ճ%( EL+e&pHp䜔Pٻ6r$W cY `W[-)l'9bKdG-wˎsmK|bUJUǮzgNjRC ?fnQ|!Nep7,#.>b%/|r°Pbf>+L \JxY߫SFF_{]Aj5hKGi2~l>HiN%\T*FaR:t9jL% mOO:]al"24 q-d%f`UJk2p Gg="py.[1ୃZ'0Nzuj{ۡZXeͻYTO_8:'ΛNY ?Vbc9]32x^E{8IH76z 0{Z˺7h形z㖇kQZqX] .xtV%d %2q.$"Y˝~/^0-'&S!BVуRX{ĔJJī^NZS[whWT֨m(dVA0z=eA0zܙB/8ysȋSoðǓe;M't ߦYyn_C[רzaHEH::gUҿ@>@/%ĈKL>+p׵l JQpsr2sDAَ.T(M5%{SDi  t-#2d5|W|<G-Pl8,yj7$c/x2Hh$\6qױUI$'P+ ٬J!Hnp{L./uH=Ƚٰ{Xy8:cq$6H'ձ[QZGsF!lk`ǹ6ZrP3 cH&xgԩdڈEb4"52rEIfQ;t.H&d ]Zvv;KFC@Wy:\E[Tˆj{.JE S45R!%3 ':BOy'|Uotφ|G/x?Gttrpts^! Oqp`GwoeQRRE$Q|&\yFSPX0pN2X-5q]7j~ykQ2I'Rݮɐ$0PE0}d_M"'=l#sG_LR*B%$ "Ԗ.yȋJ{bAV5@,|Mf;Zn&MV rʙr* 2f2p7V<}~;+btgե=-yZ8{]tX?'|1e!թ 9^f\Bkq~T&W {94o^N>??o[c9xw?hΏ ٸ0N7#!`Aכ8sG_H+fݿAcMB&Y/lsy7mp`aj+z1G)=Z.Wj#*JVrH^2ύbXBVΣ8ZcWtyC3#틞]] &$gEGu!C {2R)1cPD`d:$k ;;n֠i:Qebsz>i!7zm4~h|kdz9ټLm?v2\1ZW v5$_5z:1"*NP=ZhK tg5۫n0sxVͳ-[munPur[h]1cR?Ο4T08+jG2<>5pQ/  g9 IkjPɨ{5?QYwYwnMul])Q䜋tYS)cmE't1imA/Sr8tQ2rrVE# ]3J M $R.E"N@NGfrTw'U*>V+p[:mm =[qo h ye܊u2VUo+>fY7e4I29"FƋMr3Up֤nH!b+nvt`+&MK0ҫ,1b!|KU$.v9[_F`ql$[x47z& 鈜'5+ݳ8P4ٕ0o%yF틼 .oG^ŬH,(LuhbgÒxW'AL|@sY QfG96yD6wБe0OO:V#\~o&a飞c!,.|>XiŐȼ:EfЩUa dTNIcjN~[E.U֊kg܋@*ұ^JVtR֪V:EHLju=[^cLb6K!zzXlɧwR0d. mFJ:1W6.$Da>9%qmV1[C-ݒȪC!oaF/Z@V1G!pRŤ!1ųiTNed{9a>P'V#h0,<Tem$@9 .ϚiX|a ҃E;8M\r6pذ&@ 2jѾ4 A;ɚRJk3w})%\֜B)Kt;: L/uii/_lvPưO.Zx4,h-rnfEvPx8mM3~vV7?G)' K >, Xo?wmSE(R[ mV =/|3;iɒ eeC[I,nNpk3Eb\*H%_\\oqls̳eS 2j$0k|)p)u+6;8;}3 c(˱cz9W >b"}=34lc4dOd%RTE$@[`efdQ55r묙+Mԑ|pSdy7z3b!b$RpW|G-5\Jf稜YQ>) vvcxd"QfXHbk)m6MZa_r2a/,nIc#'L)0 l[h-m4'RG 9 uwwᒀl-S_m$Nj[Ąܼ!FڔҦHt8I+ꨠ̪o\l8<_CDDsQ լxxc4?jƉsi4fѭE{N莙[K$% *@KStjNe7"lVF)Ac. wvKJ{ 0::Y~khg5'h.6PEDid J;VQk~8eA hc,q 'rRFC͒n[,ďaZ/M o$k:lKdӜ1P  B*u1'6, c.4 *kx})-tFgIS $N T7a"j4OC0940Z!IJ'cqUiЂI>HLɁ#BrN hEBfMF2Lo=( Z"|M#A{gi(Ղ!+^68kf(*iljK3.hgqJf3[vm,aq\h">'AKP\(3(4[[="yc"%H%h8U^FplW;hx(Jb(C>1ddhe2ԕ0]Rev2;/A qek{vn嗳4vnQazrw H-WOF;(?Mz]u*/EAwXK`u{h-/xiiMpW{7׏MVؠR5GU a~^}})Ss~ɽ4";'z=EM"zww['6?z>o֚e$ͥdzfiYX8us[ANY)cՀ Ia꼬\&jޞa a+uЛ*'!JK , &X-ޠ*էԯx^q~BS]TUU^\xx~C?O-U˳1]nIt7 NVFӉj>߶vh?A[A@y5% Vrwbj<.ׯP/bVKqG5rqo|7Dգ(wQ}j/?P5–`KWuKf{ П5.c.)򟟏 _ʤ4KԘHvلi#upD}о"{o|yaeVTSJLAR+`f0$n JqJCNyD,e5b.s#U49dJ$IGBl*dkQKdFT$Uʚ=P D̓SɕsLJI$UQgjHs%8˻F͂ZID[ݟN욿\;}L@@l_|>6tn4yE7(9BlG0Pe f&'%RB6 XWsv9t=Vi!6VWaUQa(CHŖsE5]uq"Ljcl1M&s8w "X&vRf9XY)EJL56ARQK$d]3M#vNS4h)+)Ak`zo040e`/ڤcSQ<[})QI I9htVb0!la 5]HYi^aUS(a)8H: R?()oʷ&!A v5&ڒch cl"Ǐ~њbk˞ǖf&\b2ÝqY0<| \옕 !eyH\M2-Es *+8FaX]2 겏ЄBfg J6i:m6o1`Ƣj{_leeyݺ扜So9@Zʓ~D6Y,<'M`)uƉxi)!(%Z;悏܂,IT X!RR ԙy ڈ!~HNƖ&vUbn@a|O|;9`ZR?C07(/8eU9;ϊ{!1aDQka dbL8Duh m9u23Ld%Y=HB9"v/L c) Jv{];6b?AD=m^Q1RCSQN(i%e cQ%Q)iIAY 'm@}TF0K\FH e(6.uiU8a圙=Xh)o ɚO/s5 4/h_:ИXqن(2VU.{0Y\aeO:ڬBIeP|%hF8(hςOe;m&,gFB P4\9ch<h\,qOV/<\k? z9/毺._fQ,Q'?~mlSP Z 1D&Q Jv4\J+t8]L*l)hs@i5EP c,ܙfohHYuNM띎?(]<H#)3B9d"9 {kV/eŋ=G*"jQX, hH Jj0ElE=GFVmu*GSxR>Ө\`]JxVf51IǍe[aSt3q;}C9ÕUSQFf sb%Rh;F-|6j@}#["mduTPթӰx &-8|oڹwM7#ڥCMpH`K>yωA!~leKzh{F$F$i]-bubEpjH:q3 !lf\fʳ"Z9rf4 Nwy;D!ZeWԱʁXŀb*56)pku]t=v F&IoL X`W.&̪̊Ѕt~EUkɰsX.;I^?Į?JϪzA&~'ջ>#);΋Ww;!$}|_My/^Z;gOHbB-nϦG/Qͽ> }?t||"pD) ] :{ ϔv(SϦw=i_1\Jϳ2"`P=}b{?}0V.t[&֠~V4:?0zc(e ^wgg" Nh"ߴ$p:/ ? ۊR#!ezb^y%NY:/ =X8RɫXB G?߅1 Ra$ Q4cj#$42'2ZA@.@&%rg1T$Sܶc$ J0fI|eeυbLYjL Dd8rI%蝞Is |!MNūrU?}tHbt殽'i=\ǹgސksmgm:9Hxl6k=7Ҹ%|+QJek?> kȥ[k1SUq |S_׌Vgn kŒy _Cw-vQ_׫1;7꫙6;Z xxrM)jbrS޵6ؿ3(DJ" c/`]hYIwԫR?.'G):eS/?>Zhr-yq3ZQ0M3**Uv1xDNBskK*[8Vˬ{{8(2#y'b N[>U j%.Kl;-\r9ۊrՁB"+5ŮAaD28;Mnmx(%l5E+wma]fPruFhJ?>a]V | ryzٿot-r6QHmi Vqej/]kmXG:[*.$5~ ^CWoىCV{@?1^4AޗI;tˌ2wͱ0k!pP}&ɀw'3v=Bw`c lxF3n l@0Ϸ ~[{/|RC䇵b$!";!㈀R`la8VK1X-aJ%)?t xibp>Cńo{om2DX9TKVh#hr?m*Xa}D\Z=ˋn:krsڗm# Z-?{K$^`nKo%8tTS4+X011M\t\m SdYGRY@h| NI,0ltl?6wz~Ճ9Ž0вoT+UڭlqN_٤/drUR\܊urt"DF/zq9Q_MN? S]D*'n 5ڱ1x)vI/_:;X35yr$6κXk!ĎS5٘=7g˲l)彥|=ӷMdB$ 졖fcJ5"zϖ`Eb  c豧 ӳq{n´Ү&[T۞ oG'$z ΍h`z- S!m2 `S2sZ,3ib*vkѥ8`-̜g[ceF'e-;ERvHU5r|أ7Qjo.` 8$IK.65NEiElEuxGXk5wxU@t!~ ?ZeI BzVvU'PPKjmz,fs`w]%Þ'1Ȁ)taZ;3b=a ew^ǷLy瀔oi2jեoDq(HʅcjAab[0tsj IL<\ZDBj8NrnjIM 8VT̩)K"H:F Uh%G=UZJl:K$Wɒm1xْiesFVvtՓh1%ԄfHU~0ҙ{g~CqOfxfǝ $0Xsȉ6gK# NPwa@hd=;<%:ezy^O@XJHDX^[JUElXxTB7S%]-S Az?t-R9ʭwNLNEg(-@@ f1S1}.g[Ŗ62Rk̈++\?r'gSݧ.psrl?HEt|~iw@2`y7P8ǘs`!Rq:G3[5e [&@#~6x=nD@A@^ ū9?ykfrRɪHShuEt#a@߁{ַ{b{ZNfGڭ[Y:=;SذϦ|%jlńhz !lxuН%rj1}!6ia ܐ֑n,(8jveR"~m{R؈t"-YHRa*VvXM-؄ Yzڟ[#bO&dښ]ֹPfa <)Yq+gC8Y ꤷo,7!E)pꚡԼ- Cf\3B4 ^pu_̅e.'æR1h*1 ։8"dk=Xpq (s:EQd[E8[a%ҩĩ=(ݓ3gihZϛ}t~c=@׫~gXX ~>ۿ̜Gz `fԿI9~\\t[W 6 ˷9q<ێc zٱ4ȟ.gKqh6ڗGw$ndz2#I nzstw׵*jOmAu8 O%kI^GYQ'[ӓe`'ng&tq3kBrY-;_%}zοOo\sVnZ<@NB& Ůn.斯2[g煼(8<3XdPl!?*]^1޾,޽۴RdrzZ?Ma@Ǻ~Ak01c?̕Fxsr<Ţ9QmVlh)%,Ƽ s[Ɏ>1D#ăI)gD,$"0 BfJ)jUVPHJVq `u %LAZ~_o@)6'pwx1y֊m #3c!M\,`ÖGl2P-bwx Wئ^O#88  HH9>/C$*@VYE5ŮA%ccF٭;#;2"aG6F qqǴ#3= <O܆\|*r ח=*<~=_]IRR`V TMmØJid"'>Uuxgg#L^yfhɆ&HA樹% CZQ1d Fz<[N=7g(ɈV=V-& DhZudG4>-dB#¤{-5uN蒉1 9eveFA>!Ækq{Jncu{➲#α= >Ԅ1A)KRfIa+:PH.4@GsB3-5DYEW'-C6=S*I 5ve>gPGOٜqq7?7(ew὏_vunS3R ybTS\͖FԱ o_7/J#b)P>1jWH@QNY\\mْ |B(Z'K%Bi(/9bM$@!K:3?XswWoSb}Pݓgb5?ν'>(c U{%\|x2:?t:Fҋ|yuBrF.7*T1ؒe djnV|IMh!\*70)cwhmȘ][BM=S||NVKke! Ĝ6b#i5W/?2o.%5gJx8_ 8ARV߇wll/1>%!l_Zu5)~c53꺾ꮮr&Xv:Ϡ <(ʕn]D;u Rdr,xdB;)o`\C)wҕ;#gT2QmR^ڤӞ"OiR$+i"Zev0$Ri &OSncWN;CKO<ӄev}n&'g.C$}pLv1Sv *q Ld0#;bO! G%4qOSp%BNMNjapI@ʕet'$$krlIh ߨT h⹐FEL~YKؼn-(yeq@ީʮGM؏5/&M̥=.{1M ~CݫW?.X}kWdQ# 9hZMq?ץjE(]~YH~{t?vxCb&v_F,cf_?Ml~~Qy65r I$I%Y1Ё,\.ais|%-*ɗ$cAga@:T}>X}B?>bUQx*=;˹.7t6) giVp|6h^ o#h=Xp'Hʓh? Ysv+$zi$܆ԫR iRi5A䮂K & Y3gz^!df1/xwѹ`[ jʢ:~FZ7_oQ.ˋnvwl)}T.G$p!|Of X5VK0냂M%,gA gʎYK*8q6ES*T Q8-6 !eZ?h4yJ2],e:ʫgtIjhS0TBp.N.%=ܝ@swϱ{-[+inֶ( I̠uCh tց6T:+>HEvUF.OGp<fDzj$esR{N\`ȘJ,}#YdV,8g׽}20uުc-'m@h3i7qz1x ̍]=QQzCZ B+Qzl.STWhOD`i-vm qA+gzj&mk(i)x9(- %NL.N#8P$q&m Ii.7%wfHu3{3Leth+3: 6EP꺍~Xjqq*i9G3?ttb;h0xq_⾘Ulok<nѠt z_6ߡ{7_ [K:uFdBh+erKiV‰EOA).Ud]<0+i(`=5ȽbN3rt:#{~i̋KyZ\*VY۳38kjf2 D&I gC).̓ڻyc/txGu$HgډXm8W*FRż-, cY]iZ^?:ޫ鉪3//A78BgsqY9$U>~mIQ3Rr V 9b"[!0)Ë`nq30Wu40=a_.@I1[WmNq{JjJ=_diq`3:씂.G#QNb59:&2\^Q'7 /F*ud}9M]هOtY 40eRO0"z`fB A hڨ2KQ'*/D?X*m{)?Ge2"f[Iu}@T8ѴJΕJ8յk.0_0 {kH*A}vSKC󘧢IAnD/ϕ.f@ZE+g꽨!}XZ U)ŠI#D=Ccq&Jt},S])־'.K(3Ha}էdCz{& ["Dkp7"|֮sUk6KK!g$x , )߻xh*}pC=rVVrӳR[XE 9`xAi˥щLU>O5x ^ + L}`MLK. z~Iy$*U#jUdj8WB˩I2D*Bsw^bpq8ks'D(Hĩ"˨yg]R%KDm\ޚB }2HMPr^Oetn7$ftzcDd5V3JRe @[J@PyKں@ثT]ybF)wø'$%7I׼ulc2㺝:frr$(P5g. F|7٧}WZWaDDm~Je?R+w3 !,ڜ\V57k}kM@X@- If)2)CS pjۯCU¦:EQ Z>;p)FgT1b%jk^8*9}qI*ZDUӽ'uzoM^FPAw⢙Z V>a9%|QZ Q|>*ho6"|ڲ0/Q>8eŃr!Qծxtp \S,%wNxZ$XH,jq %~2!DJTȅ$Ahisb&29f@0֊Tj\i b_%'5&u IF095Zg2S[kf6weZD "ZWdʤ.⮦aB#du3JSgP(C֪.{ۏ 1 Dgo}ZB{;I:LuPđhJe ki )>2U{:[4B%s)BdE"`PsٱGш:Y)*ܼaCǰ0 a"CUD q~vlW,eZ!”Efd\Z'6e #:&? mKpV5#JW~QtiZ~R) ǗifS97ūHtdOabHct:Wۆz|˾]lmmNPK~Gܰ fŪ9[ߊζR*8NY p!>8[ϛ O *`.;ٴ>m*† Fo:aG^d;{r0ޘx31].KQN(Ґ,\P: XMl)OD(Š3_fg~—d`bwIЊū0)ۃy0PsM" v]Y?7|#i @w7͋ӂi`s/fx:l2Λ._ּ&YJϣ}Uq_v_.k^ɋ͋׫cm vցBu_ "bxvv&í7^pywn>c0O9d(ƌN1kG[HؗPeC.DdغfWiyYkNiֈeMɍbW 3rd OW/ۻ|)I6m8ۺQH%^ft6Ǩp'S1_57._ Ƹ\Л/? ͢v; cxߐgہu}X]v5b}czʑCT"L;):"d^zD6xI }p@*3A6x(bZJgfWoY ,Mlp2;Ku.zOyIه+-$?'Hpk Eh:r[ k:U0'ڞp#+B!)>%DTvY{Py 4OKO+hΖHH0C {ՙKBXK"[{8R$J`Qb1TN[S\B&Wr0XGQA!§u?{Sv':qݾ2yѽ\uloe8njw佸|gq'1,[uKAYod-e[UT+ #qm}#.!ϝxc9 =$-MYdJT&R j/8#,j'K wU`iCcL-^H[ amI|H[R>\< 54f{s] Zj7C\t, W\F漂Oqp`e'd_ҵs]~qf {Mv}FC I ?E;cDIeR_G7+cV[sy}p -J,Ny؃L3.y3ȌLsL&x'ߧLJ]';Ijx UYU&8foM>bX答Y8ŃPkDmSZ*gx'=Zi' tWͥ29=f^e!m,()'5NRژn.2J[FBT>a*&# B_y*!%y4lDAi 5څW[`Fǟձu畔|V loaL2EU t"'cB XQ uh{@mxt6pM e#FR$ pJV^c uHD ^ãl_H>2kmq.®U ZJBnEngdIdF LQ hE ?0ڊErB5 MDP>KɘdȴS8YsP\AqL1#;kZwH ũZ>L{̧YxZs-СT+:6IۍSUkwt}/_S D~ӫU--^)0z&Y3OEo ?/lAW,, HIpalTX >0j6/n=Y1{D?yG$ymh)R67ዷi@ٵ$@:ZEâA(`Y3AEJAd[ŠQΣb뷺/<<Jjաur:Z,E˒!X74VKfIHP@+ځhd:e_8#SA& % ۵+[[GVܷ4mSu7Sil0f<&=Ct.q%bPUN|b(xd$Y6O4AAv@r\ 1HZMȦƓEq^eIFw24F>M5,x [mX,qb:25Eb nbVmnvވ6Xۃ޲bP5P8}e*G̈P dةzM(SС܄rk-QΡpa:4g%Y[|2M(;yq.:v'~lwwXĨVC>fhp:mjTe fyYgPG1Uf[N4+эIxO|]Czu`!Rn$,OVkwl[\@b&g^eJxgbhYi>EhNkB(M6"ZB8Y4‘-eq"KںqwUKjE43 l`4:Xcv[lh t<*N\Ѱ&Q;5‹oٴd }Wmju7v2[ 5Ěo-Bì[%}ҒC ]u,Ƿ1L0 -u8MĂɚ`}RI" KL"Ӓs:1aI{'_>v}'xk/?PbG L[6] Iz]$e؂伄H:$8KZIj*5Xː2mH\aG`CYwQlo`m8zyޯtϖ㤀I)ϧM0ke٥(!yDREڐX2O0>u \"NQͰD%|VՈBeWGQufAl9΢sG㥞Hbl529m V%6ЀJ X$dUG hBeKB{\(c1J"*cMPd(PՈozه%G(-QXY5+$7BX*DW+h>o n} A Hu"%V7*bp=\V"GĻ7Q,$T n=dg5ej>s(m9.R9GX$ʛ4}_8Ē Hl!EkmN.X:7*ثjGn,DD F3 \F4clh]qv43DTΕ! M2D&B#,X87݄iH0(T@>KcȌ71ﺧ?Rq!!dߦhdxkM,ނnou\$c&9J2D!CJO'RVXuҟƤpuD7_u]!VL/zsqϋ4wzWWGX:ĵ,}c]?ytFMc]P5 T]5 ߎcw+֞oF@7K;yý~\64tԍfՙsc8P }9~wʯ?!]zD%ҔR&+\6x2jLC|=`~z8ei#4P[3t@3FH:`)3\2xT82]vS;Yvp~x{աַ]w]ֹ89Z¾ Rq4~o&Y6\Uc:]żGLdKz<("%VzDvr>wc\RpP-atV4ζvboz6fϒJϓ*yk7V_ v~+2ORl)E ٧c{ZiL뱽RUj 9#2GD1It4J{م};EvϋvO1ro|Hmn@b5U|?M0},mZSʘJf:B8*9 R`ҫ(-1AC"A˜sh&֕ɔBR%^2&(J-噇s@E]sdh~ ˷O}$yV!hQeGXƀT0ӈ5Y!qPkDАNa8I98YЬYSjeJ eaQRS s1;Na6Ifp19E)kzEMN RBxXI$6'69^#d2h䴣9D%eO@F<&nt*5×D-Ww0̿{ rb?PT`.i҂[qֻS.؎Za2t,7rok{O!m8vbFf)C w[} 똃Ei֦KzhK|`%>Yk2l $lݜ*gΊsUF%Ap4X(g@C@23VdmA(lz]XqF Lޏ:<٠?U̿-|d׸[h7Aŗoch/z>gOiyDG͚=Td =[Sb0CdΊAU++:CPN"q, 7:oG8}tv&/f)SquZow޺&shHOo{:We3!%#Ү0NÌ'T.fG?]`rPk%3]AO'I%a\ؘ-:̆!̑|7YWT^oq5( 8LY̊2띶< 2fhp\ХXT1Lj:1xNAg-3FQ#`6CZ Ɋ4rVxS $gͼ&Ɍz8QIcHKָ㡒-KN\V=:BgqUkk,t+mϴf[2ϓ_qWCQZV?[bJJ㖬}&Ww6)UK4Iz2BbJ] N_.XN}ׯ.=II]o3x'y.o/˷WLxdUQ~SruP7x 痿,hFJۭ?ϞLқ%]lJ;gIN> 'v᭿"O]r@ _Q/"=;KVRje ٰ$V׿4@S"R C0x91,niU,M'-Euϗ=lI>H%+sLJZ.^%r ]D dyhpGL3tvV`8I2F:cB 0 Y1ʋT [η7!rK +,PBwt>2 7E66t c3璋^oSx-g 4Rm|PQZ.^LLKz]mVYza5%rh9j 2ebKR+&duP+K ?DBѓ!"3Ol00V'<=^%q:(42ͧ(CUa^/x)xZSԘn~ ̈1"Ism0?D4w`)2/0YAPt({+1LCG̺)Mh:hg+5۩vxzg״5;'KT;ǻixLrt4%#a:T%,8#̤czȑ_30x /;g;mpo;-JՒ[VےAزjW,VEP2%VI:unRչIֹE5H:V2'ѹIj\`)mt7hEmCJm&kc"{=ﭵ?6y@/_ HG*9Ӕ4W.Co|{Ω,~24n/wP/boE)^GFhE?=[9 HSd$&N pN hEOkZymLmh2^9>m $٬SpUsy˜T?F)[g+/@EجWgf3 BR.rs+1o|%Ɔ* Q+iJchcZlB:&06dm$Tز+2gv S|h / ><*Dc1wdm(sd7^V1;xO}# _"}K+Ζ%cރ0+$A(Q)3> SRNfg nft>gt 7gB |D {ARǕ=ɂ`˱6K92sd|;*/)isAx#ҡ,\]zCjGP)F51&0hu$4Hs//y@Uz˼ =;/S5/uB:8W!D "eKeǣ2:jZ]jUm;8L۪#VФ( QJiN$EwD+(9:6]&g²38ZI\  42/pҽM @FbdDXe fKJJu%}?c)і!!%oViE5լg*ZA6cd6K*0:}Ԁ(GW })Z y~_fKyFL|?Mx4vn+2KfaOV(&MBE@Q+J=2xίѩ^ ͗ &}#4=AQ@@ 8<HX:U%˷7ד'r!(ytYN Lc3DouJ>vWY R9k f 蘍@Sbr S "p:=:=( ! gN2w'*(*PNR ;BPfd[+v*7.64!"1պ#h9K>~~J8-. !$' 7.japdg IoC[#MU4$ȓ.?QȖv*D#KCJ-٠j/ ;}M ky@߈6 i /~P H#(Q[ 9- :/N5 ^/{@BPMθ\QTb1  !eyH%9rhkqZ5HG77˾5`)7EHg(*՚x'2bZL<zy'\M (!HL1O:|yS.If]PLPz:&:=QfW*L"Igd: TDrXBRk%1}G=Lt#=vG7g}-u1)C6yvj[S ծE: 'T˗lb6gñ9w `T)>vt!+ϑۭJjUMqZM ztA Cֺ jTI'KQVWwq{.;Hi ڱzkW?;GP}.сqҡÈ$2- FՒd !Lc>,7q7m^#5H<ˇ܀F®6񝰑/t{w/w+QBy}#Nws)2E!HL% FMh=+3:Kӓǀ|8UV:}Nނ]YkM !GR)#%3[>MdD!K­, *[CJf@ĂФ|HSBœX0[/)1(I>]+}ܨيS:)B.ј':g2H~I6[ r،yJ9/:puq>{S !F7`쯯Ո]o>{vԡX_ to;/P$7J&W#SFLF\L@|iUZs\Ĕݗ 95B=;? \6 ù+:gܰ31YwMeE<<6 W 癬ݟ v2a5eukl^U5Ňz sJ0ʮ- {\VMޘKVX#?+PoNDC[2$H G֌߾L>.ziP_\woI aSkqPCͭh9 pJ< A2/=WVӝlt,[ Ī-;pZaU+Mpڃp0ĭۛL1[?):=: e8WJC&V0O4.ATVKҐ;o@S#p*۔ SuP>2;Uܽ'A<"){ :{;`jgYS4]I{\cPS.ӨU'xl_c~B\iC'=$Azk>;W񿻥fb| WI !Wl4ДWbZ_z(*,gms+{C a jh{#N(1Wxa}H^ DtDރ)Y9{R` DYX̓ъgxa9mO _u@ UNreͩx% cE:Y#㔎p4/IhK_=iUN;Jǘrɡ|{ܣ "GF's2c+ tj: TU~%1vݥ o0-yρXhxs@U#2ZKkGHXǻfnhv9vWij@YsL$A8'ǐ\2xt֓y[>D#0 k5j1}j )̠=gq.+5 \z4BX)_puq~=un}\| @jDl{IL7$4=Gq}lZP.NUUmeRx/װs XjF_Sky-SqӛmO,ݰYh핖O/ЃͧWPc6)dt&?mgƗ;{iYߞtb)KHqNŪE4kO*:./]o?jts}Uak4/=6|? xt[WOzvq8}S-%G?Q3_?(2̦O񿍘~KiaX&Λ6VrKxqW/Z8!^˙/{6c~i`8LDI>qN`$˔MI3g3AOIXE~U^vyGv-F\P&&̽D-/h=d-!#ɲ0 ʲ#XܙyI@ă_.?n=8Xqb<)Z:}M p \IbF\S,18\zG$aSO5N0`z g}8Lm9D?'Vm" p/@n ]ߐ~"qk1Z>^Ggcvh oAsl9%Ct c‰0y) 61ds @^;Kqi,gDci/ʘHcB@}/ڵ]nq$6^rؐq$(%c~5cKbf' JD5d> a&fz|= sl=s2SVU1a$A$"ȍZģ á;؅t#vicӳx{QL+ǐޣU+J6Y1,W|-TDcn€$xdZFךM7ō^<9E**ѹČ:F$7ɠDV R#H%PE$-I#ToV!}Dڬ(g{aH%5TX{AcEkb͖|7𴐖SUoY!c@Lr~t׊Z;ϻͨiuJc]N҃r5G an1'Ɉ(/: iuL;U$\#O[u_םoޕyc3)w[Fih69zJ}}qx O8&9i莬vӨlY8^]?a|^>dkE`dG(ɛ^z(o^GS٤sggv!N[Q߃dfzfK; #*HDpt(G0v`~;FOiFPהiʧLi*>=sth|_Q!&Az@N E Ǒ[6+75@P#vF&ob͓.8y64Z1aF' B:*L⢥Ex=Ϧ-!wKe+`.yQVIlZE}VjXǓKK+wxd>}㨶'q)VmN*WvavJ4< {io.~_HOn5=]Dreg*d%c>i.&ҽ=E%4kt2#CTVi[yT֕P*ʍIUuPw5.={KiIC(|-b&]| Mdv@/7Drw ŞB:"(no0dP51tjkU&Ql_Qt2鐆DZIE0}ty Cw˹B2B_\ay&ðI+oغ'yg:91y]?6UDT;}^@{Qקu8;\xvm);\Nn( }%}wEe>nϙ.|FM)(. (aDi@ϖ*~;<.RMgM+kHEj^-2 `z;v>z-Eu7I3ov;r [ 87)+tt?O#X6o$ο=9fY1hz^5 ?g"0OMzҩsO( IZQkL N:j3G$KQmIA]?J(@ Z^XPCW Ёԛ;L$)Uv,+e ae Z~nHX ꀽIr"5Z}%w0NQx.]U'˭}ZI`J°:ς+b;.齭a$G*+q)lA8|e琖Yccy0|1LV/" /7<%Xb_="ÅAfHӐxÕNiA+t(ΖVJ&~k9nF:^KFR]R[T@ g0k@\H\ [J쬜 m9X}p1!Ys, @b+-0[z9& $NExTl{Ө $J$V=@TYa*jIP+1}s0_*N߄ J1L H)+g}.;aj  dOf06Y8\0v3uOU"a5*)I2#(yUU; iI$J=7p=sYTo.s$fF4s_jK+]I%y+DŽ)fDLn0)TJ0̅ Qg(9K#2%AYsX@X*vt+˸癆giؾ;?"B KUPVBSt&RDl(*"_lI*Y?o!bp֊Gc$Ad e4(BZf%CzdtGhz~}r9gƆj?JT$ևg|ic'[7XHqQ|OXFE / ؝WG5/+Jd8ZӦh `Ot NL^2Ta OW7e,\WG;|"< lC-GKg`F4/A-\bvh6 Sy%e>oZe.g CՔvnaI/-x?V향iTT@ )ÄVE+BZX9i"]M8v0<˜WhAr1D**;KzؔшeZT{)N)ٷć?=hX kZȈZSO՗cONRrLs򰾯`ATKynq03xYPz9QE ".r&/r0P!( >1.um."ԓ` ] -'7H]JDW[݀9D= 44*eqJh ^j 38M)bFj^Tk3!bIs~=Ɉu C!@uBGўQߏWM^8A_}l ׳uTpc]]XFͮmgq\x=E-ܭo$`(^U 74BnGL>kܔ+ĺZln޾%T1nahܴ{{,w2*w8pY6;^np w3]mnɦ(/MV}ܷ7\`˫FR)n - ]G+)QVanJVT x_CuNwe|;qw]д;=kovE h& \@>]Ml^Fil'[4$ę!% lV y<9NfEg N)"D&xQp$Έg4/8TYTSj&'q ֏"DZy+2q+N +5OdEDD~=ĢǢآvO.] VZUJZH B Sg sωU2'~J*4ORR,="(ZPoʿ{p\.tOǯn1$NhŅL H,r6G:*PbC|ވhn#7{Ѡ4eM#}n6>{Hyqσ9T.I.)C/ 'HI0UZT%Wk, 3ax;{uk`jUӤ*0C!f2=m`ifѷ4ee+48rrn-ANߡ#V4"^Dq8=G(hYNfHYiqY]X0d8 'L8A|0l~-0 V:41#C&5ԝ;\ӣ G+.)H08+8ǧC΄)e w9eݶ hڭ5X]v wZCѦI#/*{Аe6NWÆOᓢ2NV;׭cS2o6hzE{g!˫;CigQ$Ra3E>T`Y @U]˫iS:>; bu /E39G U{(}HN1JctBb|N/a'pqo]Oj>Ï<)nN 0YhЌL@ÜWaQCȥ!jg|%,-EuYT &5ߝ/G\7-Cާ(SCx4~b-0d-! [GŰ !%_!*95h܃\hs֡}sx!}ʶ'dS64)9Ձ.>%3iBX xXEh[ۿ W AJ,Fáԉ斻Xd]}W~}xf؉I|!g[*fR۔Ɗk]ff|'‚rJH%|q eڞ#"q(Nw)t)')",|ʹ{_,*XD)EbiĐЖ\,-0A-06!M߹.O?qSΤ*T؄;9(tz]@=7e OU3EE `.zǮnO`#A]IWxpTuM,}݀4CQ3͑ AdIE-0u9'B _]5aݡ2׵AKz\%]o7Ψ&.qM|\Mc^u@QzMt9bIcItۢ3R#v OSYg/;]8rm; ;8Rd`tlb>6_{;44O$E$΀ )ȫ#˷ɊցMMb*ߟTgsSo1N10OiG;һP|4g3K`e֙b$Ay$.2LCko cCչ2RYbdv]ws5 W ZXo #\4vۖ 8<ǜJUXXXJeJ:ZꪍN/#cHjVjta(?XUc1&DH09E>􎕮{96LxWtSw0 ~+LiR?fҳ*^V hz%v@C")DF֊S#'J;D2%~ŷVX_eˌChA_@ N'g<QCqdG9O3Ң}L3 & չB$3io5 IgSì1"΀rVXŊ.+Myv>&Tep[bq+0 <{y$!=!au)k kRSdZs5;ߕI)q @JNΈl<+.KH1&TvdL7#ꍏH[RT6kS46bgȑ,'7CI$; l)֢iȊTJl||/!NJz.qA\H.$FJj\YRthVċy0A4xUd* 0By$-I4ʗ24\R#= Z w_(lO1J).n.V,nqvuw[/=\iP#8간'("֮B2`C;}MO3Vsa (ؾïQe۝] #R͹9 {X`,{@ZMR9d8$uKɺ@ep:@(xx`ύ㺝K+ D= ap#S1$gQ&qr|q<]=BQj?A "#>7l`6IqUn=gJ$0.A]q_9 :fqvqYArup1|Lq G UR ^_q꠩{C=%| Dڈ^h;}cԽg4wI hy_X}?s%.'zPmN8<mQDIV&zㆽ{5.{m -yMHVc;H(hoŽ>u) ..u1jQ W-d 7$yl^aeS{q +$^2.XSIioMoLJꍳ&v"g~{ k4E!yѭR%D_m[]>B[ńk=$Ւ2' )Hss&.8qE3߲xY|m?˿N$կ0d6Y{Y<{NXxI> Zwtsep:Sl4}{g3N~}͟g&h\IYH 0Wq]Tl`PW?A=>hOU+/sMpx~EͻMxsZ1ݼr3 Y<6Irj>|yT{0EϨ2$7oSm4U6K\~/KX,1&}Z2:_p*Ks|1)* wRT+20&˚ tUFbߪgΧWQtuD~_# 5Vտ;cGRH*RR&F~oaL?.sC);C/ #Y*cV(P W k)$U88γ"I!p ȗ%hxZƑ.&NbTF )\ElӸ [{VȢrK2ƍ4n=v;6jU_R"/"5#@; gY{ȑ+q_g~܇Ll^)%G؇GVb[H InX7|@ vhy~z{` UUkJEF*u%D9FoW5R5[JC |qq[۹دw ] X |;x(ƒ w|w GOy .CMb!qTP' !~gɡD@ q$!?+ yL`9U6@Q57Wfh~+OgNlϞzWXA^p*^*tJ-{˾hcpV1ޫtŔ^_HYOPX+^6ɬa=]P`uK0={Iɤ; t$I<-ZbU̕jqV⣗7GUoMS(R ܪsKѱAK*)hnr FE(\rPlzLF98fE;vo}>YtbӚ%dGE5^N-.Lrg3R5Cо`8Н.IXZ, Ť9S4 )g N< b\YFWm^S.KQf Z P/1P;@n2R6d>RT؀hwV(!=ea80eH@JcIit/92u%܄ dR H 0۸tswĥ|V1^LiOFKU,MW43q ݭJ,1HC^AH8J68Vwiƭrq>3֟[}xJxYZ1>M4_.6x/uX 3PB7"{):'`.d5W^.#Q!YKB~ Jyɱ/O-:cb3)sڝ>FdAAgU v՝ 9 ܮaVc޾E/Oѫkb݌%RH3ѥ8gwĘ8z|gn6UW#`pbl_FyU;yކe5y0rR*m(,PC9/F$$0ɜ2_-]DR Hz^‰`8Hv]:d8at:7PPj9˅ 0߯lfheR*VfT Z_o#e*N./*|sHo.MftN^FQ[ :WOg;N~I&n 5=(hAJi<ŽhfCBW/ё|Ó)`@ftuű1K ű@,U 86ٝU\sb~˺1@9*n8?潡8_")D'|(ίcdtKmyedqrM¾`_wa_6, >GH; tQ(˲uM7MڳĿG}1yBo PI&'7[hH2`Dvv^#Qc14(Q ~ Հ5)?T=2ͦە&s6j{mH#u`v!.dc atN s0J`um##[V BIcv=" L K 9é%=:Ӵ>XQuJ#Sw 'd~R74|6(d^a8Bw&Q9"Ҏ C<̑4XPǀ7~/yy4Mr"5LX-dup^M`,JML56LkVEgUښ<2'd2`t@h-2"s U/+;6SPڶ~6%|-W[GuR~Rmc+f+\`B)E`XCC$= 0?c#_fVەg^~>(w|o kUHb( ڌ@A>c5(wmrpPJ]Q3~LgI^f ]Fܖk I ! ə*Xv!eP7~9yxfW,ebc%R “*1Mt閩om;fSz*H;ڰ}7HK*|PO F"|fSH\ 5^_cP%~,oa 4Ii[>r8-]yrH "qک ݓ𕐏Yb !$lqJfڕǚviZB[1|w@qSf!iZ2":$@[TMt/ D Bh,J6u8VMti @mE #xg.Wzu&y_ q3zW1vV|U0U šG3;m>l2IOf.. ޝ^Njŭ]i/߯O'yG3ƽ~P4)@N!^S>snYMwR|jxg+?_?UlVz2 gnA=<ټr/w^}_  BS\}XٲvN\|X _=]TuF=LVc'~ ;#n*|_竊dޕ(G#~BS_l= vƗ{r^No _|?Av˱ƌ;Ed QN&&PM9ĸmI$2edBֲ§1\ _q^y7s췉c|"|&KwXsU~o}-fƳϸiׅguڼxxڝ5&PboБwhYEe~9/^`w3-3=+0/r0FO FO(g00}xS9k_erVw+OsJrbg~z/Te٭p#p>5lo GcBn9&˒eoթ!al5F{;pRה_C[q O9ϖ,g@:  `p !r9U PL P)@B,apQ祵Kȴ?xX9EV)e9f\,6ˍ$j V,ϐ6ƐHOstߝ5r9qU@yia2Ŭ&8-GS,tE L#IՎ3Y_>{!}v}#KW!*jUlmby3mqIr&m+ -3 ';R'GwsF%̍{## Kaq=b^kyyƝvWGP.1iI|<ܿ8֣g4j4r5}gT7t~bg0l_ӥCR~w1_aR 92+qA;wD.-t~; gwwαPr ڑO[J `^KCQ&^;C<ά - M1 @^Y?d㕝KyW*Δpz H;@\Lgy!Wbv1&QATl")bcO~I͆:AcԿTQZ>s>(5腪9"{#!DAC(gE+Yii'!-{"K Q艁16S}6UA`qt'4}QsU "@pI`O}GoPxc@41(1=i™Vau(bP LtÉ=Ʊ;M_ H0Ā8rq$*׺f< R3[}?NUr@jVpQ%$+ hм`@FfrR:v\ip;z. !gR Gs?a][Oɒ+'M"2>2:4* 4v`͸7dmCS@gŗ_De'@C%|f..{>b2m^2\ CJ k^W#CQ/p;F[X}vhsǧG5j6\nv"&l6i|7og#z;YnJ9ӽifXÿ(%97]`ʽIԞx1ysU^~߭<Ǖ9Ys5o$yo ɪ?/g(Uh}r7~Xs|Na{Vl4rfgc>?nP@Plm)8s-Od5e^DV|Ro欜@bm3d!ekbE krUN]eUx{3ܹPCP-S=ebWd?}@„YJK}ssr/KϪ7"! k v^ؠzoh#Ʈ0WNR&yzFL"-u©6NNJm>]޺~;ܘ|PD(3Itҫ.j8A2JgW}lQ9w)?Dߐt>x$RL󘕱b2+.Z9IFiJ "Ή!Nd빣 "Vڨo 0VqI~)Z|ğ3t=GF -LAgrǞv9H: TC&ٞdS0?8'S\d1 #RV>-6zen/.zv膀dAt-RRXsage׵׮:>]}!BW߂A]}'\xq mx:S*ĹrO]CʝDQðK H}|:׵q\% L'/V(ʋ簸K>~Sb}でk:[a}ޣבb{b۞:eNSq6/xddZfq&w2:ezg$euW6F{P͌eh(b SrEE]e_.?;m'D(Sm6XCf7mQZBQ=RfT{@,"-XUluDֺ]oĆh2NHZױo"*ƸmH%k(qrSUU4f"5O/OEm 3S %~7ٙF K4 =#g+ Z B mYqT! 4'%N5-A,,;8·%}_Pԫd + GVXEOkP@&9XK4ICEW[ Ϙ ¹6lASDm`[c%zbT@ek%HF1t BE*O'obۍ<)l2 XLTMJb!vt"Л;tvoFs!h6QRS`o 2ٱJ*<31ѪCTve 2qH|꽳z2SSz]b689;vG̪[%teggn&GzIoG{9Glٶ˕\"Daq \Հ,lt}~OOp#:H+=FsTۘ&IŊZZML)&rn:z㬕mKz|%fJ-J4zԥD#U蒦M{B[)Km dA5MZ & gI1 W=vNy9;]3&g!a稪(F\M+! Ӈ.A+m.Õ Q t*B?_LNj?~C>A%yYI44DPrp'bnlxENrEo*fheѱY#+%si5`k42x?Msxpϣ& s<%C9BMDŠ=f%]O䝰8/BF( -T"}4䠝i6wZ~ -n9i;zN}Pvx{ Y!jL.JgD)`RU |lRmY(!dhtR`PKɐJFRɐ5JHq*%u8`/k_q%Zx|O| NKW mXTEd8U# eEJICd3A0ztgoY11fW9$%Z}$iP2 -;\ et5x,!+3QP ЪOW=}2ouHdk6@]-‚ ) iVSX[ Z=HcVbL!t1THF'x5V&f*WWXk'TПy9PeU^P[p EHY4V,'^i (ݶ$ (5)Lf#6  2k"%Or"0=!wԊ7IL1p:Bcg@yv{תj!(#-rDŊX<ֲ6wLݾ줁**)C&غBJNenkREJΦ@}FCըU 9֝CJj{}c9o=[Dxj]SgR&# zzf]`1LIYT*DL킨E״ϓS@Y ѐ5} p;i'c%le ܗHO H$U _,ቻyas Q]v0k\FL(RR++ s j'Vd&dKQ)18 x $|AdS,H׉B% f5M65ZeCSgLL7ۭKmZgit=PC$aO)⾛]h÷߽|\C.<d`d2N+ t4DN $bB=ʜ02aXbJVD@X-2,WܫS =Fbc] 1gX3, lv?!;7;{ww~&AO/7}Zw!pKvRWRI* "* WE к9]h΃+Rg\Zb0=/w+T6(p|HI(ed1Z _[t5Twϟ7lf{?=u$J TQ(ѐJD.hsg"fRah2}Ց]fd#x49y c;xAWmO&vvb͖m-ʝSb6nO0hqq]#7TTŒ"$ N<#޺ nf_"6y<OO-qEMt4I,A;JBIڤp;,zKZc֐x@Q4"2ЧB!BFhisA%_.CPy w.p)"׉Z\Ԙ0<뽁ucFz\:yR&ܶ&L:(<+ p.]ZXtH"M",_;/E CF1.z˱x/PIMG) i (R49ǰni>Cj}Gك[/ Fr0QK?Iy)O?"u3Aa{Ň ,̀]6;9jˆ滹x?[j$SsѾ& sϩgMx<1mjܳ~oA).߯od-˝Q"b.^(&azTR 6 .E6X͹R:$: $] ʪ^lc8`l32r)#@HlIGJT9Hu;ˎgҳ%sdy=S<9?mho#/=l;E-i]+1 &BqG{&+B&C b $jlPWl`5.SSjJ]UD[<4x)PT<B (\ԍgKTLfkY{)t]S斿1dr>O}g=| /sljQ`T8vȮKo |zTƁT@,fVwսfmG8w;jVr;hSi`hf_?089cj:Cx^ .n-3_|}7Ke`EȜcl,'qR5t~2CSz8#\y_&{ \(;.'0B0o Y[!YѠΟǙiH5+lE:hW"5@ N m SpU_vA oE"m~#HDrSYTx!YRMp/"iGVI[,)Kc%,-.r[+9iB8"1S=@fV6O=mIo?':Ώj)SNy .i\%g+5QNpVi{Mo>r^07i%FPPIPBVFCфii h\eώ,hzP rxqki oԟOQêWLb@FCEov֋:Ff2;"SLX/ì FrS2`m"f>gYJjik,xI8es(la֝y7GkEۆ| (k $ &QqRbN5=Z]=񵥅Wh=ж~pv-cږ ktd/8-^iiQRVR'/;T%Fk80ɠShUV'-m"A뫙~ͼ̣ [|v|۝2uAfё;{*d2md:g|)逮9|@o%%Bd}4iE >eQA {Vt Us[SPZCΩˍ^fX}"1&LW\`Iypis6_'wAoya*l] AdBT$0kEN1ESq,ޖ1`Qxr:sgE,J%8? W.k2P93*Ib˙kPH]yp$ '\謹R܄ˬ&ļP%͍س񤹸*룚ӜV9biA.1_nx㤨&hU~rK:j6p*1?L?>9@ʂ0UFFKIFeo͇ia}Aʞu ^R-k-)O/p/~ʖݯH n/ǧI_jYxrU2Ni1Çlh3$Ihær׺9sUSeeeiݪ;;Gr56/x8g::k9 L"yümͦS+Zt\u:1>kMD|3jTﻡ>p0o SMj.*_׶Hi s@D469r#aG'X[ts>O:g{5io RۖBR(P ) 6ă $G2 +K0B#P&FS3yB1)yEaA[LZgKQu=|&gT@{s'!>(+d@Q 8-1:ꬱ)m ]#׹'|}N@R ɣEExmZl\`W4K I_>K'k׫TN;K(Ę?Œi)aFjs[In>bx0}N@:vGu$Aw~RltxRZĬAeMBdKJFIp '֏x |9;sAt<<Y.FS,0z?*쌢rh"B6A' G2::CcGxtd^Q[0ISXM(Evn2?xR.[%#p:AB5Nd DGN1.Q1Lf1j2R?O,d({'___ kT1sm'X8y@/zNm0ȩ  P`}! h $Ǖjv:5׹_aڻβ8=`7F'U5Ze…cH`O^ZJɑk/Q9`&Bѧ$إ;Ogw?w_Q%= 3B.W/!W7悘ۯCh4~?q[ׇ[?~q`OoX|ve@[siT:{Ӌp#?#P:t.tU hBZ"k'/6bi>|=gkoR s4g.ھIɘ{?˦o`-DQaq.=;>-85ܻ`)\*K jm iu2GB40[`[aj`duFcEf3}n%T|]`ώSb >=9Gd[amKtI5--){#;5_oW'Md•]lr  O߈aT(=YE6&Ұ0k/͊.4 `.IVmnq/6qiG73X2"BtA讣_Rߠ A!JaPsgopx\KIo?'ZˈW[u/Kâ!ԉ̲- Tk 84nޛGH(.B4O0m`P["q=<&YHG\O答 He6'n,)#hOtiSSoEzmaɌ1d)4~9o'6$/Y=}ЎfmSņ߀-;=#kK7z ~Qo,$c2a+ROo+yzJli9NJ=nv+i5@\dpgm|vTPuݲ m)CܵqmҶwmڹ6p>/2-cA`q7hW ?֍E<vȇy SPCx`2sAJV GvZԫ'F8̎XN5ULkū~xiŝ D68]ϸm˵KpHr˲og:IGs;f7dM@>V(.*]ܲYS)~)γ'UOXv>F$2ji6]mGp÷,["i6*|X^Sܡ,r.,ұ[`|z.ɶTZN6i}) 賣vbm]7I.{/R{.A Rr V'/.Hd4d-Ovl:XAƵ:⿤vAm> Zd7!`hQd _v`0ƀ[H;ίbݒRxr<IW}U::lT>_Y;@Ij CY=i/fږmT@;}Ǘ6jeO?r-7wv:JaE-({|Acp'`2*r@j;DnN ebT/=XV`4*bT.ԌSj# ^Y)WV#. :"G`& Ih. Is +i%? mQfil;Vnh% *uJjG feޯ0ժttQA3YŒ2"Cfq>V_ 9WCt^%TdHU-9o- +:H@95e25(YVFg5chG$k6gN&ZB; ܛ9 朕 k#l 8%1eZ Φ70lRXT@Z:;Eu"kJ `3xl5{Mܶk@+JU |ZOGa_;;dfk5' L^/7hZ |6lsϞ:C:mi4Ū Fv$hdig|Xछ̂v'3sz{rz!Gg:A$@TQ| A<$UD3KuD <u*N1^\.r_lss\LnR#bogr$%TšZwº!۹.xHV2.ZVEeB1١DX}YWXBMdM*۩5W*V7ysKopdtss:n%\Ԓ(Ȓ0JԚ@*kPՆd2@wڨuAR&Eso3JVsz# 5&VR|KSљ0q(Ci=ƥ?25Wm}UԊDPZEUQj0=[(5ͷrpF7TU*Ԃ+0$mf6$ ("]PLx]{IXMɧXǒ9N ~/!yo[U8f"1;j4( nzwúkwؠkHΠ{H\'-7M0TT (,5fl֘t#"]K"o( QAdӐA't,8Vy2<<ⴈU)W)PN0Gœ绦WY௵Ic{y%ۤjC c038,">^YUd)ot 2'6=$U;#1E¶Z# uLK("G TH04V5qǪip$)$U(hE";vnm-!Kޚbݨ6ҐVofC7 YC:KšҀV vBCa2YmuL'QdoEMqϻ.=et+HtՃ0$ek]/;}*dPMJrgz`}v7!Z~)g.C=" ztF~K7K5O(Z!fO0h̋-gtE'٫R(܍+9C=:v Rֲ~VGQ2' : P0,GSwdCLU`ouF5zL"?6C8Cqɼ#҂lbUh]T" B6|,#*^/8Hd3} @V\ #}r*=Ȇ\,4he  [$qQGybBR[|ӂd J81pcwɎmd|dC9 E&׀@,X.֧:ou)bq6c ;2[5Xc U۾?౾{f3#gBc.>lԑIQ+G>(TV6գX<~~꼑~QmDkJQK!GgRsVJ:cvEB\*l\2ٚj4ρc>{|T.`1gP*ɕưRpeP=Կf^PA3{-ƿym@^)@ ji5A ImP,"Vz3kE069#DX@L]rv) aR6)&hs0>D)BtjY ӀPIX_.2.Jb2jw0425g{%r9kMy6.Mv9HE!W^j`kD DI;i& _\lɃ+v֫Ы3.%V,f0|G?>G5B˞A\"k}d F*-xJ%D5#'=$$Q$S'g5}[IZBV]OĖ]Zsi{ wiͣF/~i44X#/G?SY]\ P+#92-U|ӧ"zh~e[VcAj{<.n~ma Tv?S- ef;Xy5luɚ%% e̵Eu\3`B5yke :>l=$ iJb]t5W6l>usbY9 jʬ bvԲ+?A\7ih+ʱJеr{}3|)#Oyye[\<~1yc]< ":Rʿ&)zʦRb  Q;^wA8r33<]jl^]ьG DetQ]l2c{N]1wLp#s=l7k|*yc\)#%O'8SlW+݊ydF"Die̓N(P _>W d M.r =Ni3GH|,~Dt4Oqa V0h^]|?5l8*דI~?n&NKs_y|4yL' _~uZ׼U}WWﮎ'f혪5lVZG@l\o/삳s\c갖n &) S|Ҍ |l&M{{/z{#zXX:?yk4&*< 1 v7~;;zRw6|(*(%ko4G'Oc:\KtWys@JzJ\ )G Ey0T=IIVENcp!+`MĢ!& Bn&^QR{7)k&y1/d=S>/0T*u\,zOC‘qι"D %^gJ$. E^?Mj0Z6{ \sk)83}2Cj a6!191>rig$@o`i?lVgy*| GV:҂%29n<ֈ|#KC G$}ﱮ ^tr31~dž~Z鹨uH7N\1ᣉhDfY8ŴQ@<@Qz#qwg!= y81\POF6DFj'X9g־#.RPzrLCO.pR1i/{ʱsi,(?# uзdYicӻұ ȶx㩚 H*fA:K?y*)9GHkL8S`I˺"oTokl(30/ ݋i\/- <xh}Zw*.1(Y(uER.gjIb9|^R=:AьF$ɇcᖲ{!f=m^!Zw沶٤d3F FGQK,z鋉Jߑդx[$j{We۰-P+bjH)iP*p$cUBJ8k&;1EBq G@%BD pYTe`$q&!m`B` d~1qOneLLȊ+9]׽;4MFu2qͭJ|"tJs85%텾jɸq51vFTdzbVojԊT-QmɕPލ)+=||9)a䈎 7JhXa7C٦9{*5Ӟ"=sԷ<_x2]VN̳]o |sL=4mDAi;EC\=UgW[u{QEZ70'UvJسf'45kr{Y˼ͮSWh^]:ecZsj6ރK;ކzm]Pf]Ӻթby}jdTJ<RY\NϯfbJvi9[8{6?NΘu}.2=zp7klTXßػM;A3 %> wtAzs}Q:{3?`~}*G?{5p`p7~om-cq7{}wKR"1HV0e,r+)s7|G:ܛ^}@oqB.x= q<_R]hh] ">[ 9#cdG/}e%8nhq~ɥyָ7\e~QW#(v;n mϱbM<ȍ"pD+$jeS Jouʑ?)*zj/2kZuNj._uy)iɲX!>*'$&2Y(&z-!ykBD  Ah&l Iru$dr' Ĺd ^C'4Z Ub%JS$%(^&D.h,.*徠U#YakRb㬈9$ /RHF`@MEΕ@& $ٯ õMRg& N/ Dߝۥ$6U @ "x[x"&"n%s6F=:ً*&Uff 2dtqɇ6**g1 JD%dMp R:$A?Cw._7ҍ_HA$@84!ϥ ^+F&#Nl`aJ_ u6[GE"r*! ?e@h[d9pdϕi9t+g5*}A7$:KrU1q!}xqRr:HmF@n)F:)7wq럛(}w6dfӣAΉ}7j&q]CN!l;vB &6ChFC\$c5 p;Fr ` ؋ؽ @nm< J>RVIunK$js+R$&Sn|:C-;Ngґ})>׹Ӌ o,fGw?|imNVz/Z߭.&w-;\!0K=?Rث΋[V#\Vf^%dTVf`bwG 9F 9[6Dž;,r[k~YA_Vld lٽJwrc;iMK.԰قUlF 4{9k.`~-6T[˔kvx?pq3'p/%p.BGu'Q>Eٗ`Q( 䖭i=Gʌ+4&>$d\#B>8 9BqC;J-D;lAcCXq3{Iw0*vD9`t{1&$^q1 Yol^ g8νGo.*M;Nϯ"@CʡZc3"q>C7A9.`Ing ~2VyD lN_߃_l:dP)G ,AXآXiQh4BzĴX!cMo Wޡa,SR(6ZL.f {.sIB;8vBr¾ZW9'v$e/NrT9ıFɌ]#c!`iZ{qI k=jLRr 1R~t 9Ǧ(PP/815j?ĕ]]ko`SNf'ҭU)ukWX!^`J[ =;]}Y$S--eVo6 t!]ﶌCbT28VʤX-a92>G^v;Ygij"JnsEs\gD[[vy"EVg@Ovɤ"sVYhԹ riXRFO4vJ+y}V$1.@MY PDUZrr%'et֎u5`Kͥi[!S |6tXHIcJdpAdS.ukZȷvyyZ0wyZ=O;}>n~unSO47V=#,>h`%!dtzkަjQNEet ezղ4JhpOi@r̞|_I0;X HEZcy}nh֪Ou\-.5wgdV'!h**=BÈ0XIEEJ!(PPYoTg6f23&Ό0O{\Gޯ=w~ƵT{}emn:kJ=:^1ESg3 17IY-z/Q󟦌ƒj50ڜCVwIX3Ǡ7>q΍B,OpQWZO1P R6|U< N lY?fİ0 7Sj=/c׿k\0w0N%sCE$q4_?)Ҳf  (ȘR,΁t^F`*tgV%XsK\}LZ6SӨ*gۢ9OrVbL;"1O%7Q+d8Tť=iϗWZWk޻a0v~<~uH +4E۩7a5 PM =HN3F3f& h7hf}6oas'7\vD.N_rn[|`bU{4Fu= Kΐ@䦨\FQzl\㒘,EX&jVt*8\jťd{4,=z CuU;ru%l])"ZYվ@XЂy'Kd \ 58=xYrt;)y>b!Uv[oN|iyhba4DRX@VήO :ƴ#ڇʙB ! 2GZ!mѩMܒw+M90x u!h@X!m]':sNN8TzS+[&C$ΰK!0 :Z_. UvbFwG<^B|)1lG% 2H&'C9R{MmCS{tfy5now`MXӮ>yI*2V&y4twz!e3T0XB:ac_x@uAp\h4ls rD{/]f=X᝵y:/2vDC[ƻ]_iǏ"D%STzXlb2jCY[cR{6h;އF(Kz;[]Dѣcc?0ę;6]Xcl܇Q.<f CNO_~)fTS5L!ݫepwA$7t&HP{gC}ټXwԜXPZc3N>mɍ nlt8]3 smcb9eSu-=oY3`Rs h.Dw! 1rWSm`C)W hfxo:6)ra/L:}/f\lLN09 [hyMjU HpUf`Xm*Aif1 Dv[  zWw f JY) E~ $[@d7NsUZ5u醴bu @g]cv٧lke [R6ojdz[4sʎ8%w{{(|9[|*Ww-!}fPustv!#`"bcU3,L׋oW{&d/U/FQ#43rZز,U䩝޶Κ 33n$Z VrWduY?dbr"BvJ  {1|4w8$+#e/@']A:qaJ DZ%IkR"ES,A6^k1hzub")ۗ4cF H b 7DzkRJSzFR7>K.ӺD`u >DX5&>^\(v"k{ M(N;skz;׻I:="?({QvGQ6[tq;$n=g~`͜3Ȧ˗:ٝJ'WIF &{JF&֖`VrY7zh4եKSqEA\6(&n,l-` -)^m3Wf $hǷJx6RY';c3)poh; T+m !zu0v,1f_vuZ)J&)E(dwU;jMyh\+>v=| ެρ_ qu@ +ӻߏ/Hވ4ɏ|&FSֽΜLvp dOу-lbN["zDt"" 'Dpx҉<%% 9QoZ~2&Z!}ӀU0{;||.Nv?A)`P-F@`mH5l nJC ġ`.&)-ӎS֗Vqgמ9E\sľ2)l+= HJqV&DQT7 =a7Il8w o˓o;:'2Dd\r'yfwb*Ĉ`] SeDNvgz1ӷ7On&dŘ#YɠcZ CQ #$%XnO>''~H׸%thvgO?2̭Gg?%Ѹ.=-V(r  eA|U!o7Nӷ18u/^??͛׫;VKݑb:3go.OT}1Z D'yvDIFBo4H+?:{wO,N>y4뻫Yş7hy YG+}Y]=?n 2 (9$B6L ![QWS/߾~b]y=^;'$}>B~_H]H#-r~QH飿ӳ٧;:a<AGp?J^϶Q^Eԍ _!\I:[աkY1/!ܣK~lISӵ۬KKY.zY]F=R}]!~^/FuOJX]y=x;^λb6/ }_ K}6i,p-HbvKXo-j\d#eJt bu&Ӈƚ&F rQW+4/f8 N}*3Đl`St46F(v9[R`It/5Eo\:uʨbGT'm0Fo͡V NMc#Qz:ܑkaꩠQ):Sۄ依SO&dq>!YN'G0;PmSi:?ׁÃ-cvlcyh ~$'cY| MZczTA@;?iֲ;m:{=koA\6ǽCBkhK;&q:xvzz1/>g3g8}[nmRf#[<9c ZR&5%(|t@Bi{Fl達 I9kq9MhF@ŚL5ijQ"8&mMQ[m&2ͤOrJN}m4 MAi_`^c4ڀAkHq <=/xThBkVy ӟ7^o`""6q{k. wkJ>A{E֚0+bѢ ҇K 961etx PGmS:qŽ.]r>^6A=%I[n 1 Yd R)1I'T mT0ĴMtBy}e72'ZLw8bMF@7⎵EPz<#VpgОU6*7.rwM.FL?(GiK[rT S]B=@r-X!0p0>1qK>y5պXq `fm"[I:d1]MoG4R,G ,3dXLxS=SzV{ pyj×xw'W/7qr1;n}7Tu1^ƙ/l#KPE0Rm9%[3k-Xݰ$D5LeXgB4LSQvL};uϚG'HT.9XGVpj'˘yBF=oíQ"js4=a6Rt:wee 3NBm3}}^a?=䍄 "AՄIÇɇMz|jϦZNxj_A9Q{J2v֦zIʶPYqk3S&ʙ\*yҤ|MZJA2JsfuT6x.~=:vma:51oce߂0uRpT,SV"c|Ƿt)_WGm!~{i1Lj5RLl!r:ReU{Gך/Wl4eS br2idL;{ubu Qu36!M ?Z 822DP3TRu sY*. ŀ+NJ6mÎְcRrQzTg^( ] N6Qy9@݀䛵}‘9]>@ˤ3" Mt8韹ݚqa c!Y5᥇¢̊j8P޼CTSNh LX` ڦ$IP_!X8F "a j+Zu5lRۂʴtlxbBeL`K"a-d#8,5ZȁmJ0sMBu!Z4y2N&0!؊ :{g^BN+Bv XLOxX|DWR(F[;cO%¢<ICgkA)2Ęв(|u=!n#Es'3zX]=NuҠc3&چtv7"3A%4mJ~YռE6nK  R g0V.cm1. 7Ȩu(4+IpPU8f]n#vG԰ߐ9XKY{rx*^Ȓ8AFO7=;{Fq 33luKۧ1Iz`1RO  H9Xn Q$ o*C;gUy˪ڌLɣ`[ԴOY!'ÆW L ٔu!62pnؒ#4P IJ*|Xn1ʸ\oNI~qhDzhyϳчdR{iEiasOjuף믫##qOii:Kj7K߽Of^c7.fz]??uKfߞdrVՄI'Oh3?[?ntCNe}}i7-\+c'X7^|OXB\([A E}n[扟/~!W&40 \Yqt}[ NHb_c]{R3e8enp_?U/ޏDk4=%rRJ(0neT9q\C>bmn&O4ߧսW}o.:#}z>>Huh~!x9v?'^~ X7i"21$e*FڽOںgigtR"/㛏?X I\ִn*|YZ|u\Ӕ?^ sCPӡ/͘,7RKLF&Q1&O ֪dp\}sJE)0E^,J2g::!UҴ+͹Sܥ~ quqDX鬈ӂ;jKȆS }Nek!KNК猜(,ghr2LA$f64!|} .)-,8DaQޕqlٿBsڮ}<)+NhB5G[sD3QSE)ͲxcPš[/`ň4mBϹ}nkkSGT"AmU L` /Dɗ%U(7N/8Q_dU2JG-3e 92«I)Ȱ Cl5Ϲ½dq0"GhJ> @UgQ[ #S%:Lʀz ʵi&Q JBSI#lDi@2$IPq**HGp@17f,?mQ%M#aM4eeQeE#-9x(N Tq;f Y@*$|igthp w .dEt C6#C\jc"hSax crXcZX[I0|p6@$JoAKE:%5'U h`R@QXa>c6oYi1wy:m{Ӵ V7%P@xtd"e1NyU@N s8t@=PKk!f@1J ڍ´ι*j,y- gP @Y- TajiW: U ,)shMoԈSc 5*0CvQ"@#ph4pQkѸ5{8XX0qAhHj ʁ&AS@IFV#s@AV}%ܶhvwբ5˶jc:Hm,.Ё~(^/ LGqa_Qr0%`SlRzWDI+Ujpq;?.R=FYz1 ~t,/McBc[$1nHha ف/"ddP /{P:9{H%XLb#2TlCZ&8Ph/䩎]5`mY\Şa ,~gE`> {2dhlRgte ZZ:jpV!Ue؈fAD9i4~NV"i8X#$بB PzF]YP5Gx̘mjw1d>6_Q R8GLq. aJbDKK+W :'9l6ƭU;F JVѲf6HΔkPΕvG)\ XɆ\-Fʚ1(0Q$j4Hm|]90pD*V6be( ڇRU^G8@{` ?q+bY \8۝@bE0sKۭb vB/浂fZgWl6_ ةfXΡ3w,n:?z6/޿_{ PNuhG%֮OUv?*ǃީ? 1y3]l.DNf)bNGS89i/J<,.Mzh`j0!ïٷt4>;| o~譒g}lP,KB9|A>\?j[|>LpmK2+k'MGV{Bgy;kk+k[<5M.5\ . o:t^jh-6v?ŗx/ħ2p>3L6 WSiVCIX] :HWgGTo/φto@%-7l}mN{dZ~ {hr>]/p'xw~W_}9x=U6e*kصc9g<:tֈZk#OS]|\/>>xCW |% Ol%TT(^ g~ {$[K3{;Ϻp2^N<dR`S,.T:y.U. 1}gX7hWiþՅ}F:=f6nd;) gp7 4 F[*OZTdtlu-3IutfһO'5r=yӡ?ֆ(Âܦ`7A&utjs3"71 A\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\mՆ\m\*=#`&}R"oѪ/oQZ)׍k= j704v~pYN }2{S:R|Fpε$ Aԇ+E7N+{zkۣ8!i'XV1 gE uyZ1F3sC!A8x ,|Aܩٽx~K&3)抑'=)IAO zRГ'=)IAO zRГ'=)IAO zRГ'=)IAO zRГ'=)IAO zRГ'=)IAO zR?H}Rty? zf) P zH RA9RЯ͛&iwvt4}ֽ G L]L}krC)7`K܇;?wf~ti76}z~r2p{2jCiv~ˤv]d5n Wwcڈz:y?c:lQ[.?(j%ݢa޼>CgkWsDFdd Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ Ȁ y ; >9x?mGk҆TJ>[٧Mw%._!Ӽ;J[yGڻ4 ȀyDǬUOoRP|1=HӻAzc&feYQV geUo^ƽ¼Ww6GR2=<9]V}a3I)Y۱kِ^^ex{5{Qy Db6~zNğzY `b`Kz߇P^јW`u_Ȁf{4{?3x=Bo"L0'- *w=s m)R.'@Ҝ倧$2>@Eqf{ֲIxq8А~ Eia<]Z1lZ1TFI9Ƣ,upE|$RD*A^w񀛊;V0PA n ҁÕ!T,-`*Mp*"8o +CL:ʪ=.Av*^LV<^jK ֥;6ĬB <%N F4AW}A Z[ 3Isf4XD`E[9/85"jZ!5XBv>`PrXcb:T^4\%\A0:&E ^;69> a$/3" JDfr) H-z_gA*&s0= eǫ OQp m e|H*{1BqZp欰 ,dk&?kHk fOȔ|z.*}?ndRe<"ݞu7L[{C&"l5:Xj>/鬏Т<*IV&[Үr5#:Y8O `+>w>]Ϟ,߮4 m{ѐ85vAR% )B]; +]W>J܊Bܜ,R2.uPrb?\J}_f1Z755eu7o\5o\5o\&7yTHsu5Ӽq׼q׼qM7yH[ۼq 6o\5o\5oyí67 ܷ3};s7\V|.|!P1jlBc5yuK4~IĀ.2%9=JE\ťmpc#BAm@( Zq"ZΔӆ3yOCdF-6Ա o?o?7o?os^sr%EI:]FoD&Kd$9*]~4GPGmm.:K.7$+J!DEϦ~oym+zFFC>ll[ڦllOpiQcφP`d5jdjgNTa$0cKؔ죕,?T(;^t>PϪIC%!x6R[OESq9$2LAD@ L%Tɱ^,b2pd'RRV>%/ <5Ү8 [sSZmHH#FF 9P)hv._cFTR:i)fJ8UQ`"GC_4o:FAJϲ[s>UXE*la\ه˕H* @8P>$mq22_Tdr- /&WO!W3LՓ9z,Ucf1_#Qbࠊ DBYT"rG"fRah2}td/:ˌLvLVNfgZ6gk ~{vaNbO{J6Na66`KȫGC~*S|0xݷօvd7_99\x`1pfü qO@C`peY9eըG+|,j*ẗ́ d6K,:X~0X@oCU[?B #h NXd|Wjg+>\|c!/lpxĦ'G?~Ӄœ7_>lfsvGyn0bX5PIZD*:# SFuTSAyP`F!#{dL+f[!O^; ,Y"rx mJ S CHZg+YϢq*g |9ۅ>w~!Ah},IsMp.]$.gX-aQnEw"!#ƘP[wX&qOǣ4)Kch2ׯ=mqR@\txm$E t~;Ǽr h\Kß|,|lRlx;z%c|?˿w lxhOUJJE/aj`xv:VG-Vu^']]^kniHꐆlkŷ׃'սshԐV>5%&fF;:ayseqxxJkhy^"ͽGXv]pUIŦq05R ᱧaJo1A*'#0Kյep} t:\o>i.\Y$7zYjݜTfT7cH٤>UF^h7wy?.^%5!wDfx Z5AMR 6 .E6X͹R:$  $]QyU!x%h`s8`ugd\&!9BgK2>*WJq k@/\v<Ζ.34͟I%{t]-g;a? ׻ +]Ƚ`"#l6VjL$N3i9@ўEv7'HOم 0֥EV*]-7&xK J %XFEm!'?uŤQ8|xqmA${!BjY Ca""2PHhh Y  UV]@v"ɽ2EFlth$c!y@,# J*(PR&bnrߢ:^?5p%p ɒ1xMaN}Si-Kf5 $suQ509ӊ3rfiHC Ž-㲛7i- sO1u+MOB!u8\M8ܗHe`e]^S.ܧR ܘh~6z.@ X : Jt Y ^fJ^Y:s`oFuZHӀ |w^?*}8p>]PHP|owD8МwYh{'Û,.?z8XH~c<;z Wq_>%hL~}Ǐ18l&r@[$@,2t5iʛ}-{tGM*,k_XF1[bD' I\_dAsz|<V9ui_oxMhQJҔ \tZyG% d䴙*e#o,z!42f2s{lS #\kP֜6O>,` t00FSozN|vƓş^5@pWsou' t c|D'xƳ^Z7xw^"Xp ln Ewd+Y=O%m$j[_fdu~z$}7wpuXّXipJWxztX'ΑWbbmꂫsO> n6:;2e󧖩㒅9M+)x]~¨w~Ք6,gv OF؝L'lm-Ηq;Y_XssC?O6wdSWiHZ2%Jn@^[_-d@4)[Yox 䧌9!m~/ N~Nx,{Y lJ|psWM`>9e'We6LgY6?`Sh_m|7MYiN^Ӕķ)o7WRO)7ɛ=_ ?hbv̘dQ< (1 WvՐZ j2^bRCHchSփ$RB0% R%KxFo0-h$v|RR7IG@S2IÄsb*T6-SBthҕLjr9`Le!5|a2`M{KTTgGA#g@'v()j5 IA`nr7awr?8vbzYIW8c:=42GW'g/ %㶠0a\Z_3Hx&b检o3u CԎQPwAv|R13P=Ql4I=Cxz.Z>GH.SEFXgyHU$Uq!ђ ,,!1쉌D ?YXS+9з䴸g=P :3ch x)& %0 "GCIjo^tY&{>l`lu0*:@ ڪۤڄ8] ؽ9cr;>uuj ڽNʧv J k#]8Ӥ駛i҆Qe8^FN'9koGXΊ%^ЗJ:FFw[2X:n]8j)NuOx?7pڇ DJ J+|UztD.7c9L ɀD)PȄ0er{:g±Og ^>wP0mV]gK?kCmCXhٔ"R#(NcEhi6.b.*T4׹׹ {57~@۱,xs:2BR蓒h&Hedi2yS ZtVd@kyt6t:6k E. @{9Ύg7 X{!]GwWU^\ښDASr霼QZA1%r:`ME6f,ʹ^g8Oz&ޔzox:Ouq=Vv2S-!PdkHۊ|@P'Zs7Ȁǘr¬]F%BVX)*m:$>xrD.Z$:Ig2eKxC7иR:6;sq7Qxؼ.ee(㼖/ӋM,\_` ^oQ|5z` _ 5V;3p=q 0J_[oK9Og/XPIꜫJakh#$QLTHgm_EX-eϳ̏a+¶dQQq.ea$5eL d6, ~e /Wc@4;TFsuxmsH6 ϫ^")TL10h[F3ѳG @CдR- rEfgũj$,*Cuh*?GOswi%ZvkE a b٢1QsUpMM%G !W Py1vFC.&c `*hE54s&}*!ThM*F~HD`dvdp.Vb[XX-&)ܨP$\7SgӋ'WsnM[ӧɏJ Kg.gݏd`*|HTJanIЪd،jSΛvzh~~6_zݑJX՟^~8;06DD~ޭZ2V'?MI{MJzk_ ҡ/pSfO<; _t9~nP; ܫOUl;WH?}Z $78ͳmI,۹Ah\uNq/wj4to`UUPL)$D蚼9~բW) ms1&}H:8@B9~jBJ6}8_k= {޸iyz> (MPUUHGNmؐL\SҀH!Is1rgA ɎHL첨$)ZejJ&-CPl"tJf,UHFehDTX ]Nך*H[ 4{訪ȶ͚L &WxHã1,$_tPN0LIMIx5~K RR3)ʕxyT aU^[M0S2qY/ {Szij*`ps'MAD$Rdl%2:ɓlcVl#۰a<3u "><dRIJ~6-If $cxB!('qx0構\[!乣#<:}I^[e^^u`$30eMfU`8~a V!5a' \]Ə$Lh37X7` _l P4tǢא3Hrj rP :wu vBlS?& yOI' R VJ;!W!d`_ѣr{ɶbD,IVNDHd @&oZ7P䁷 j%$VIoPexIf-^A& D3 T42j;BmӼv|N+;F$>4aםTtSiIg ]*PГ{YVm%GaC'I):'|$ΟR;4`PԜ$:+-~Ybf1TE V+ňctYe';-|yPwE &BBZik+(Ų7c9_bM?:VcHK +cvSthI٪bB/)zku5ɘ3X͗;>29l@]C]e'5x@V\ӄ0g+?FD*G%"F'6YU3p~ C\L89yk0$gKP] `W5v 8O}|no!g&yKSATbNA%AkF*%$3C5o~/W_Y퉿Y0ƿĦ|:ۅ_WrFK}S2Z M>8$Ądy;Ŧx:>ZrJBNhTDH1 Bƒ $|B›**],|tHeiX\\g瓹ȿ/{˩TDNR ŋS2FT$x[Lƕ:2l2*Zy̅ZRTROIXrb Zw[.ր5.R1pNCY$c4th.GvN֎~b61xS@r[IWg4FUBWIΨ$(gdUlIQ`"Jhù_22urQsi"(P`> SW6n߃deQhOJlYZ$8j;=0(\P$DvFVfl LTmxTuU;=aT8ɢcfam8{%d18mPBFCS%D,!8Ix i$-$;ΰ!.Y+~$229sČQS AIBv!!?dJY2ˈܲ1 sz}.{ÂZ1`+i&AdȠDs*Y9BbmK=[їn6r!H"6}{Y[bjYkHwK2J[lź dd%SjvEîU"5BAi턯F դjd:0p)˾n<ޠJ'B;mjp֥7kJFiأ:{i ֢C!LBDe&kܼ:_5g$(JJDdBFh) )JVĬ2Bj:RzT^bٮRLs͖Le%a]㸽Mck WE#Kt.0`j U]&b}enjď!}2ϧ'/7nW~]!*$[\1(LYDFkBѲv:k-~œN`a_YZJu>-[)( B((T|KQTRI18zo0e4^W'5ՈmdYD_8;Aߣntq{\.oy;1 Ŕ EV(VH@:2ngзNbYs5޾Ut?=n.[xg^KbvD~nu$+ +j[n{#UUeBH̤b~/RP&y%@U(C鎼Gl-8*!+ww&_ҫҸR[ǀ,?ueN[ -*pzM兘vwj,Co]q+x2y:ڊJt%z'5P2f5{rOYn҃o5> ]`vyǕ ?;YhS*_c{nX=-ΧZo5UtUyjx]Y(覑CֽL^A9.:atzO4ХzUZ5 hVO57>_čjtpo&i^^S\yָL3`:ñv*{ ZRƀyr ^s=!joeu*"3XRTWK SWQϚ/Tɝ%WPe\^wej}cH$!_Ϸyٿ >ԿP=l՛%z`'!W,b4⧴}򁣗oPZW;z~h֣|~xԆ"s<M9U~+Kj`fe\j̞?3u±pt́q#.[)))o)6C6A%cDc2Ζ|T|דٲUV\3lc{[N=ۼv7VN>:Lg+OD:uHW)/iVk΄ZJQ¤!cLe~kVZ{U<czvxB9>ç\ϰk2aZEbB1L>c{.VnP{&BMv{O~R=fVR $0X !X<kE%%iCV_ lr 3#E/XJ,|]~ 3ɴ3|Y<g$=8^] :<،G^9 I[Boꂼ %ilke}pDUX*%j!Ctc8-<[Mv:tẦ~Cr1?"Mz݆N5z|. (]\tj1=:[*|X&WŚ2jMW¢F ہ|%4@ƛ =hZrIRVE0 ē]ڻ׮:Q MFh%ho=IԸs~s[5ֹX)To,^Z#ݤ;=Nnb`JDr@@R`PEծ$]׳<:%}KGe4Ն?EX|ޅaڊO~  )B%`n,AX !\`kCLu0lp MDTp%}Ӳ tw -09CqnlENxża~`Gl"|ro2DϵVw֡Ikw)'+4օTHx][ G`&gs*m[ָ/ӫ17fLڂ@X_[,Zۜ1h6+Գ~/})FltXN9{'5l޶"azD*=6cv1 ,|X!")Vȝwݏ3b)ũl5!E'a|JP4nJ쥁X D&'Y 2>d8v(ǘx:)܅Mk&6Bβ\#[< p RO'%8X1K}0SS{zxW`fN쎭l/ Ť|.6!]EA|#1BJERs[m[]D 1Tق1cL> GE2SuDOxѕ]޹K l(EْN>f;οnbFLkBm"7_/y G82U<fFzrwe%$k,3di٠*JL̚I]lp%0yWz9El j%)RA,OjAr>yUa9AFOJ'HP7"da[iU|j%zI "W (W*c9˵0I'GZxJx|^i0c5جb dJըep|j;#ebUvҘ/#BpAxN+qҬc>yLBQb2(\SL9ZC]Xe>R^1C_sZF-Ol*ň)gP22%PѻhDZ[>aei] dlX^[ezZVg<2SedloT )U"*v.bF7Ю@x Tb9mH5Fߎ-k`Kl%;:VeeB/ke;o?ڳ6saY93Ŀϻ+X oxIss1$qF1W}4(IpC7,*lBVLfٝcEZsK[4!{#=N.O)) , 1㲭 TrDl#0Mi߈f\}~H|VmhʁAݿz[r3S>Qh۲:7KŢbO'-UmyʪA?:^m<] OȦ@u>2W4$i[-xd0K%yz"碮Xh579E6fzkPLܼ\2K6 =}u*h@ǁ<&Uo2Rp@ȉDlĹ/y\APlrP ALR}A35VgznڧQ|AnDTy gO#BD-al5Xb6]TMjK5@nycr1F\Qynzb '`r)&.} //oYt ,lsFv%-ME\jv.ToX}J ]=SQ➆'0^qG')ew40"V1Y|鐲C)'@-qӜC:H54\, gl"45%wNZB5XR25i RCyN;k)a;Sh'gcuǒu1)ŋq6O&X8N#cbBKbY~>!(2bG}t P`Y{ֽTgڍ#>T}̗obM>Ekdls6+J͊*'+mX3ʽ)J0C"+ ,E!DaFH8<@5ᆴF-_|Jr6ay}tZ@ZNW(,1ňju3h_oQ=[wĈ՚9qv"nMbuuPehj`l8b#l*ȍJt")fLIk&ʡ mwzgm 4hW%6@t!;dzMG˜H|Byꈣ(bc,}9;5{e r.>”d٥f P~ QGQ&#jmȅuf83GLQ{F~8ƛpt9C;CW^lx\HTYc2 %RS q<8A bu\2『Nd9zdmc3 JPɘ,[%g?L vzG81eW4X@bDBC>?4lTH~[t@rŹx"0 Qi G@Av6ӳFwǚ4z#"WHJ(I&V,l k_s&JM:Z`ZF581lHRm)lv9;db.7Nwk"E<$惾o%$9`V&Ea2= iNPa5Чliaf*ړ [hB^e~(Wx(3'u9G$I;.Y۳d02wSXqXqWNܣ:LRjQ&Đ J98&;1S/{n8n9\qWNf(O;vHu .ފśig/W@eY4]_9HvUPSL ȜcFߓJ"aGf;؎Ak9hm0L&v? *ѝ=%ZB)׈=[b;e-wzI.wۧ(2Bhq8sq]RN^ϟ/ b* t9+c` pABt2H d0Il ;Q;%[@@k)X֕\gÑmNWJ#jF{#$Ma}d'.UR}YG<7Jf{&I>O/_g/ - &׮C7˚o桴xB@ ]P ՗3H,\*)6 qȔ^x^x^xަh[l1T(.)}LY7̾|*JJ9Q3yTc㌃~9&Lsp!Hf cHu>nDbLp/!bSJ*Vܬ´~Dži^Dݽ{z{ہ bk*}ݤQ0\QOF)&1L 2spF.l+m,=Gkq06ZK?hkf\z / > @h}^_\">{N.V9P%rPlrom*j"z![ػ z>Fo]uy}Z}~N[xHn@8_31T=GѧuV?X+\ {LL!!}D5HUYrKrOz>|PgB7 'C'c'Ŗ|_OٚufVzU6ZrNoy,ohQH?}-& .̊MD9=tY[']߼^:w{@t~~9Qui)QL*jT:*~7);^w n6 51sUGN:PK\qܝzS (:QH]n6(o`] d^Lg{I$JPZАA6A6Et7u0fD܊Z.Ĥ6DL,k3AsȡaDUQ.Whk M; Z0mA"߀![]]Ӻۋ7}YD˄Rf^fXVaB)cP{cuKݼE^pPkL}xB!S^7S嫳,/P/TPd+[ϋ{֟_,Eg]}e%ϳ\dLYA݄ȿvU~w} o~{s5S JϯwWLYoK|/1aYo/d:lU(T)R6yo.|ҖyTMR˭3yAL=Ѷ?樖 HFP5=fL܊q4aP@0qQttT6ݏ)e0lܗ.Sqܣ.yOcfA \펨a8zQwCOtJ !+m#I.#"3@cz; <%%:^H")[w)S~ ]QqtCzҒٍveH=| H[%fF+@FGɇ>X}d`D P#Ĥ(+#,rP{)>B!Ȣ7Y,쿥 7b$'Yxe)LUP%t&r59b%=D9>,QԳY9<Y"^t-N*677iOLɼ+ܤ;ߟ&O|{ Ѹ᷹yX4oקv뗹YDR/qt81꾹:9__MgͿz2" ^ufIeY{% dY7Rtb(%*WkHIL=i¸=p ?%jr'%~.Ip\#r[ʷAaJξGΎZkX48l X1StvEؽvfж}UP}beTͩkC3^sn5:X+yqM["I6V@ V@-,H!;Cm'ި 8斏(o-;~Ygw/Nb o~ط'$$׌61WոVb^$S4 (Pۚ㡶=Cmm1IE:Pa/I[UJ2Ruթ*j6V%Ѱs<ʚe#yZbhh Ȑ⏁R*w|)H T^qoݐhz|3U}+k0o6e;=z6cQԴשPI/\#=59 ;R".v_|]&Ml@2! *GTliԒTC$/1J XA1^0HC8oW{9}G<߈uЍSޓX !)ôj4T&G|y>-#ߒA[-ttNU2׬Tpz>{eBNXb)O)gaxnW2>#y% b7sG1ENVGQ./iTm+ "kzT@ U5:drl+d puJo3TBMZTR QP@6Yi̯WJe y)SPPCcaL!E>@uIi"%W"ͳW[=.%ž<$)J Q X8[CTwVo HЛ ۔1uU쨦*U9a!Rsp'~۽-m0i*U3Q9M?99?\}d9 G"Mu-Lį_w߹C2/T!V^w-߽.M2_^XaXUm AKആu9#h^pʻ{4ڒ+Lt~0W%(6Ai%9ct+SVo+SVo;SVo-S1$W[Bm; T(MmjUƂ%f_ i4oGu"*2AZNT+y2"b2ʲ-5k&83e{3eo+Ivm+ kh%<:%r4Oả SBbq4fD ~TA'GC1[v#!dG۠^6g6;#x%ANUi7_đCLT%$ִ`lńe=ntf[ٶn櫣MlE]Q9dlX+Zp*c1{Ͳ `P_6R_dm2$}(PW[(%e%A4>DZ/ywMۇ %U0FH9 Kf&0 # ݭoiƷ6pE^`2ׁ-t/^?D2D6%4m`I2H! oe# XKxhTXJB:)P24&2BR@bQ4>uc]f-AjXA3"WB@Ɔ{Û~qzey,xV1s<"ICEW`3! kƉlUi{ PHZ [rPPlv^Ոb*)m#D {Ewkkse^ *g6?p<ģ2K9WW<dfE R ɵ) AHTSPʘ#[&zeJDW"=Bo[@DITrNVImr*chu-5Y )2($v\Reё*! &Ҫ6 ,8 Bzqp1ϋDVNέ_}3~v)a=[<w A!Y&̧'J͡LpgS*F6(EᣯX$zGza[xZz|-VO}0K(s xc2gb1%bX03Ч~ d-z%6 Ag=0$~}\&YǨNq5AiccF_qg;ڏR >sn!f6ܘCa5#}84Ǔ/˕Naڢ.ܦW8}*GuAXgO]8[$`K^iF]4hpz?/e@J f\yOlaF>>߾#T'gS.k´'xjEiO٧S>Zx }{c_>"coק/֢f7"wsur݋s~q6]#Փɯ~\㝋yIe0{y3?Oa?罟'cQٻdps6YPԖpH[# @d_TUv e+aΝ5P %{qbVm\{>)nߚO"4;8"HMpr||1eƳafhvq"H"Yݴaº==qi @z|%!%h ۂRեD#UBZ159$t6UC7_tp!ˮZ`o^o)]rPJA|rgK8jhHh$|e!ʭ Z%Bc8olM?tt1kT sSe[F{ڻ}d VUQZX[U}&uEvDQUúkkCi4۞0P{ym<_.u.Sf?v-Dų{Rn?bvJЌNSjҎG^<(kFP jr[x+wҎ2óz1GG}ԨMޢ Z7'`Fپ=D&h.I| X`1"A[+X3ϩ*ŴeZvhmn }y}l_?; .G ͅkǴE4z1A}jCjGml dPZV`` Mf+^t[4j%k)Ğj<>z"=/)[jGY}~u[ Y6mw  )KE1>fu~+͙̇dVi^5oP&,d>tzE6ƭ\JrL<]Hj뜘Bc>.Ɠ99c'Eזjt,L92raאGQX  ޚݻ9oE1@{ٯϪk k{9=o)ǻ{-~]19ɋt) J]+!D Eij]K֏[]Rplu DZUb5+OdU⸠ɡ/P"OFvuc@(b4JU.pμz~9g :7nTHr*TRTm<v:QS ulfTD@2^d*Q!?VIͫnS"Q†GG/G]%nh [#?= Ϫể(qN U*Wr m]ȕZ0;ܤPP :RP)]km~Z?/9pm93&g!a\_U--aT&hވ]Y;϶${QЁDPJ-۵# ZZhFj7LFYD%kmH_Eؿ`[G@  1*[J_5ILJgWF,twUW;CT| A[9#yWNu)J&^o ɵSVmJ5:)ȗY_+}29MMxtrV_uV{J"yAXNrΆH@v$ JƆNlZ-Fm`iIo;o~$2)SΩe#u[vc}@;T7fm9: ;Z`peQe j5 ̓]i9a~0EVj㔶 jB'`qHճsܾd?opwhHS,aJ5L{}~,tQN{u$ۖTE"ZP.z7kT&})'N%iH@[B٩ث{J$<<,tN]MhpĬ r1lP`~ :Q'/돪(b 9ۡtzSФKw8 U[Ͳýhu!xQ?#Su'8ECP`\Tt-b0!Qc G 5v dUaȹD_cDk&G0+yz- Zn[1$ Lm[)wXL;*T/&)EAa߹Ћr3E2}|`˘+6 |)k:4Ԅ _.|F4''[x{5`66F"7)+5]BauL.> l!Z/-(4I^A,(fSeeT6^g8-*E#hD#ĔK #` Vvku=,.)( ,#YP)4@1ji{<ۤU|p*eΕ!ZOacW}'t9;FN kEm1~tُzE 8itT=Ȑk,ܴb0 Zk*:vcl Z{3ٰ:溷^|!oj&Np^*B7($^եZ5ƵKN^aˢ9 \-~6u\-XݥTLLK V(]J ܥ!fgV4ޗy|*fyφZ`3g,-ǧ~#o˜THr_#kvLmrʋ}FDZKȱ}#ܓ:e/@nt}h-Ū#TCp@\{Jr/KYƺw2F{a^@AQW = (27߳q`d9l^J7қTHoj.=F˚$]%7`HL\ y-jYm{UhA}"VNkȵR5 `~|裗k,]eϮe3{b6@^7OUSs\jM55``J7@cL Әz8 gc fgB>!;\:!=Fgl|[qJTK\BڷkLb7ʑtES [#d[uQQmb6>ƾY 嗮.Y PMld"8l6mh~r_]H1H/.j'eD6$Hb&Ky?;AAzlh2t}C%*7#"&I~o%{ǃ7 FQH-N-LU[?WGNf4j D۰ d4ZM QeP3),@E8C QY `!:86Ml@aF]T 'emT-d@.\PkDl%+:#c.&)Xޡ!dcgP|ZgYxd^m8%ՑM_ʠk0 \.u,ņ&k[šbXSl'j>}"Ot(:;$RgˁuQƙt"G΀m)]۽ Slp.qr< Gn?u[qlUgUe B$ˊtVh7M]{_ˋ' 1_X[to++׷}>=[{},Vy^'ߊERAwtypGΈiF"ᥣ_yA,OצG~N7Ɏ:hVߺ88?kWx wϜo{ҡ{?_0.W< n_A.о;t0d }X..~|xq)J>d72+Vrf76Bcyɯ?/[9ϸ+pV,joG}Ӝsfm];Eݷ-291'`۲Dvs|[9>ƙ2iC.qu &@ږ0֨1\O]vlW~ބN_y:]umxOy8H$o!ZHقMAEI^,&l $|Kd&-X[_\a)DQPrMC*M皸CDVth C7+i}좈& ZX2+SOVXH)V4w,WKHoVݘfkqPPY}!GLqgkb rc/P$?(7͌bM"aJ2׳r+>HRV=[hXk#xez- JWu3q6IEms A^ m\w4Q(*;rœM,odZ?LfUnoϟsy>]KeOτ LO?Oc?]~O {nCV脘MX5iۨ4$a|9i]߮DҲP{\jKrUVX[2J3rdq]Z%~ 9 ଓ4CMVg-AcpEzJ_?A W"ؒ-(٘]5A2cun-'kF8n7װPa[6ƕgB~Ǵ=ⅣD`;CqhiKU#(/[erHaC-L0w%]8W؊-(IAl6 65L|L*1f38(zdxcMׯI=Ck`!NypFaF89dxd>i{-YBd!wODrA#YFx1YfP 9mЂ2;r0%hQXFdʎ&.A-Pۗ-f.m 8%c}q%R)΄D֢3 bЋitv g6iܮc^9j$!QcxM'F9_l#^ ҎLs*eYbҪZĔюD9R^s# W"W,xjT0eD0M~ A615Oτe' [˓R=˧reKXWn.p{5"qmM+hNk[ ő k*A}. ERzpë2ȸK5pjV$ɇqh6k p'hk1@AkAQly =-l/|y]v6#X1h1:"Ik?:[J5)t?2s Z`jc5nWsXgFn&nbfpnAu6P GFd؂B(B!6̭9s 5kjH/VU߾*`҅QC$b  :Θ5X]]j RV\sխ%#\)C- sPpmY3{\ A§9 `'"&j2zcmv+;#1 AЫ<.S9Y5GS-Zc 2.M7snF4i،R4}0$nE ٦ # <<گ5aF3k} jMhId'7Ԝ AFN2XRuzuSF`с̧IZtk5EOs keDl1yۗxfXo DoҶ:[ UGEL]9ʘ#9 wvOl{S^}ېbjc2oFI5Cme~$Myx3xg C[޿q| XĊz>hg=@[T2T&=E?0;$y֍y-#EZ2h\dq走枯sErLrυ2{F+Q|hk)^VY*.Aٓsc¬%oK+Iy UHz}z$;1 -/ռm`cuac DCjfSOXbQW0}t`c11[vqOc@F.ϜNMBr{,"H]/&!O窫3p,Pyy椏i \Sr>N4z~:T͟寗g\v!\]<L#GMYiW^tT?yr1R8 ʛT'U齴J7,~Hpin`;덾s/߽|}PX79']\4QeUGFhYj\c?S\\fr r,yT|w%k|G*(KoT"V_1p!&]&S]TǢWc].ôj"GB)R*DlQTi94ekrecd0Xd-v/ Xq#|HC|mp/TO5YRX] j.%30rqF qOˆ/lgBͶ+foPZw+ȹ?{WXL0,s910:AC^:d.'$/+r@5 =VDDEPF'"Os_ڎZH7G[n=K/Pu~_BAPiBSɇR}8JMmIi l=JX>xﳺ sΑUb%H gnKK ~El[/o1`$q"% 6bA]LTU6 r=HφeP+C6SVi]S>]ڣu#-eP֗U^Ɯs]0賳]X'WE_uq\kY >Ek Zd;,޵ x߳~xtxpN"A""D(r%mO'fu^>oc8溯MK4Tܩ?ɒxW=;9c}ݰo.cvٯI?? V0$>ig /,9X Vnͭ9YW:/1NࢿgFjeLh9y2`.zwy2W~-؄dX#=Xn[upe1ګʓi*rj(f^'];ooG=н޸΃IWGN(Rmt52OF*t. >n!fu#PȐljo6i테uE K.!hQCg[C; w~\,mQKT=S ""Wk5k U %{N..w~\wOmLoB۠oIGmN hQq1כý:MHN,b♥ټ6;op{;%ٱ QEv@Xն 7B;S/DVKQV“EA]r|v&ϖ@rIY8 VZ._ϓUm4PWΆ3WCȷwЎ?_ɦ>ΚLZ٣99-JNԬb"U>OWs'4d=f 8a L>E;1ʝ@MԢ,6*iUy[kvڤ j| `B"@C̦s-:+abLX9Pu>A:M@@ gpH)8jo3JfB;BzߡD^>XRYhtx~ W{6JNNrVA(^O4ȴ ijX}X> ?~kDG_> dZ浡EWbbOVDD3`FIU|vƦe <6]06v;Obx'\1e|'W \ќ.%5ME7bUV} x`1 B@hHģ7H#*qoxsBYEsBl- ިEwm48 t cWv;huTpŠcbR8)ΏUpu65gK6rjF.:ֽ̞sd,ژd9\l[Q|uYLG]#iH5;vvUͽbdB;Bۛ}xp;<I}jq`]1|I 4*L*6׊>C| gf TUŇ}J2!Fd™]o}Wdl凸q9`m̐+茏ֻZ5q XN[G'Ƞ&<7g p(2ϕNݑN`GK_сGj뤋cza@]GО\)J#d%ܙqp qҞ_G[qˆ'|Y';_Nm s +u0Dxm%}yw|쟒sPW]"1nxwFG{MDF$u2ed -@+9;IC3׾J5ԲVWOT3լ܏RȾVjwcO;M^r{3U.LKQ5(!ɰ=b_؇(m評\9P[HP:MWMe٣m-5ׂ̙{TfB;B5[:}  4Z.Ǩf@i.YAIHYUlK6|p}[k]X#P1Ќ.aq=YO\?Wu2{?~ Ȏ Yt b&;4x%Pԯck*e'*G(tG8yq8PȌ\kڵc-Z렃ɜiMNX->mƗ 6kqA \5!  䘆לO>8`nb) JM+IǢMlB) HꨦˣE1/<sEKK8 \JP,cakYnwOp&䞾:4pf s$nJ~9m48F$ rYAm3RV`tP1QQ(@ l&ك/{"lU^Њ"Y;0Rz]:a3tdW[}gaʎ|H%^U,ٽ>f3`w mc_nZcetѥt2EuUdjX3snmVFX>`aˌ)dJ66W,⢯su,%$rL 6tag]^oiҺƩWMFF!1i[3oAj<ػ޶r$W|6m>d@ݝm`;X`>'`%Yɏ:A`KC֩b=ĕ o i1)l>Gdy뀚'?<һ!N) JH([Z#@M:9(%7FX#XRORv?3}3oغeZ9NqSeRO-/$u5 .#p8"j$YjJ"T_h:;ÿHb&-R'!q-!`Uʆ[ε~IQH>'9]J> #xozR'Rvʗĺ&(<%5R'^<齈%+Rg6l6Cx^~}Ul^6RUC*۾A;p] e,FV)VbRU_eCmgۃp/!3=1_෕OP< T.26"DEhQ)֢LêeF`5%Kj\ ddR[k <.]@*Y|L0pB6eս-<{~ƚUN~⎧>_`zvUFDidh)vlҐ.ktD4VŰcN_S '&#}N;&ڒr2]i6[Cy)˞(ULϭhˬ>^D_lp 1ThP)TU֞ +-wAT8SlR1MAD%1} `*$dg{/l=V;s=q /R`?ppd!XTJbUF9-+s`T>$c ~_vqCUxOx16Wc\QUꔡ'gk6E9jQ&0B{=5Z過:j~ОP`2vj|u#N6+#Z%zDD,"QѻAv$$s4j_H'!^R'̃&ˮ9έBo1dZhEQ:#|MPW)i/:cKl='b^,dV4/U<$ɑCgL)U,œ!eܑ@*l v?lծY( - FSڞ+{Y>↞kɧ[JJ6˭~od5޿TTjT4" ,<;|%AIҊJ[#@:0*r\kpJVSٔ j(K钶9kb[EUb01s`yŞx_uOXw!튠MB(Y$/pSqg&$N~kxJ~ؙj*O૤++gRbI!+.Vr%J2GSiA)HEs5PkS <XtBgy\ZX87ЄS; C:CXJ6'J䔍:b 72$FV!BuބԭLUs@'Q"3_ɊSjQ2pٔdmvΊ\,T0_OǕbőV+`BA(/JҺDyuNKG /W/ Oշ7| ^+m$iLd@M"Il _bKLT|Zeu#S(zىl>`D\W'ƣ͎xMoR0 @VdI$ veq=)![<<>0eIpVOml1<q|9WNW_v$Ƞ$E9 pED]T#hIiQeUv܃we<LkW-F\\VIDP3ĎJWg}Bq@=P-Y@p;PFbMBvLtPȕ9@;idw7MHO(Jf^;CR">.£. \͕ ӰS>BK>DYIٴ$)LI_FxJS*O&ԃp/ kS10K191*Z|޺ox}ۦ"nybF!֚ q^1WC#; /ZEPܯ_adܣ|5/e_E'梉y4b"uh!m=|qT꒿,rml~0˲6].7=߹/f jialS5zb݀Onyn`u&R1u`txl"_(뷖5ɼ_NoͯhmӅD r1TR2Jܪ5M/_U&ӥGoDudc[*i ː]FGL1'%cb6,>GNrwL ]Hk2Y6.t Tx`v#؉8f+L2LJ,}F2b@9bo[q^[CB\*j|PUtHmo2MVٺё*j+s1vnXdHXMنO X;܏*S QGG7績rχėyzz\ a$Ӎ`!tBZL ,$%-BFK{ t_؄Y[͐lgL1 eB=7Ͱ;l_ڧ쉜w9(-Viu 5=7cFʻ/AWߏg.&׭}OhsӿϟEϦՙ;l`|5 1"ʧ#޴%&wճ|nqAWm7䞜=nOoNA,wOūEEiv;,d#̶E}}jG~O[?EE9ez.zT uF\ !*>\߻ȍi7wAG/V^aYs#Us]Z\==gg{!91_0aR+!|;:bŻ GPV)&ab "D Nj}ΝsokvA 5>vrF,*+*G -es& |2D]iԋñ<1R/=JJ( dQ'*],*蓊ZK'SDm***lw19EA$2dbܻYFf.%tzYq;LL~Xc6r{rN:r!NG; ⪓!)l(3g 8T58~AEA9*㴑5Dlj֙Q@S*IdٶBюΞЌ6=bszƱ'9%w!k/G|p֑.jT|Zx 6)jh؂"h0C$"dBLD2>>l=]y-fg1 rB[{Yj|H?8,iksQٸ;reUclL_)]1:JIԂA~bzIQĠZ術Px "%Li^^kZ-wA2&uSeM=1 { GN "zoYf̈́\#ȯ_Wqp,!u1@`8锌V[yb8v2)܆Iu)7+0Ȓ:˛*Qܠ58\O-IjBr j *hJZF8:pR\*k+נZz;(ZL D` 4leUN {ƞ{WA:꬀VC # 乃; )d2#2%ȅ LzmҗP] UYwEjqxv['H,`C0\}܁Xl6 ] ,g`%U,% Tm_tY ,LWYUdrn@%Lvdh;Gq{o6M1H֢{'fR:V`G3Ig"Z65Kt%w}vkTPܼKn#; | zVrElݛ1]~PUzɆ#=.laF۲ 3R**r8|1X beԗ=tF#Zfx/J;eIC ~"R51B LɺpA&u"YTv?4}|g&Wuâd/S=Rje/b.\TE`Wjʳkg]"Z9`RYjCU2;wL$Kw cxdOK4A웰*/JD}\U'}?5}; JϘ(3)iUTB#'fY}}ѝ@+(ЌM&!ݝwGO0 j@H /0Ce *h;?-`X??8xp83Y/r |NyLro:GdQeW<B 0+itpگ7YpL,(Ue+ Gg  >R%^6dE^kA9D{a ɜS"}/^No dč@(FfR@ORb$o #OȀ<(M,)4.;yZ92JDqaX<eg2L2"9`-FG'2u2փ]>>ix/gOHr]tACk}C.HUܞvs릋I[r2{ n˛Ɂehsϰ@l5z(9]3'5S ƹR2dz.t\"!WI9BO/eM ]bh,$ )^e=y@+i!w~v}fUdtk0>)W1x5$(g. FR3{7]c22byŴ ʏ! W2jTVIZ 5>T-R NQrӞ{Is2='$%7.jnR&غ? ;#W T&&&:p<91` .z{H>L܃s#Pe1s'7H6H7YqJ>,^?2'T k<Xr_ AX*,"G-XZދ#0Zʯp&+J*ɪA_h=ⲯk9VE|ҺxZW+51H/eVFk=>Ul`Y)~_&0 `@ʅ(Urz]y NdxшR c L^n2^#e5RZd{q"0h DJT516M9budzx 3CS^~rRq0iw3?xNOx2|6yӵM\qfmwGԽm>ƞsS9gug=x[o,j겹ugm}16IHlko٤=*_-FFQprXO{ѴsG{1GUtI&{☍6㍔Z¹dxK&$l VD&[Q)JnЁ`5ז}~)/rIMF`N3<c/"~V,_IٸEP".)f~jl]{@zx@h^\su6ۃ|ݴrռnh c͛ii')-Z~WnW7W?*oUXzBsvl"CG;1[kGknюxs͵gnэ>S2mO9(QkTϷ买gP;hڅL,[&Meڜj皋ujQr?Yld2Y0NfQ"9w9rwc͚^nR!ڴg̗ .|5i:o\xw5^r1 KA5a+{BҾi:6Q\o/zUwG55eotHI#ΨB --:[䒓MS?=kW8N{WR)KT(S &v/ofk`vkySd]Pfg1GxMl>j6"g/w_z~P.=?) GGI{o/}SuJ1gD wM4QуjVzBojYբkcgq\uފh XFoQ'p(ڷN_E?u`W7˃N\^6)~Q}FDJB&mI:!Kd g0뇘<1V>Gml^GtrT(0n5if9&&PpY:;5JEÄZJϖ[* \X$mF#3ܣG 8{b:}]:,f;>%~z2^k c!sEIqGR)(J`S%g4P^˰ˏ{lmVb4+k8'LFE9WbqQޡ`qD$ ֡I~k|6JhGp9`yT`Dj4Lk)%zut 8.1Cmb4[XX#uߦxLF2˙{V`K_pnnx+3r8dHXBdJ1\_¼&Ҡ̇jMA"á9s0|n\NȜr}_|v5+j;KS3K4kVxw~qW>ŞQ"E)CQ-- ,%*ɽdsN8cDz=~x~xt"?(r8fןNX8/[zo׃WnklnppqUK~o/|zgl:>װ+jk}Ί[B>Rmi#aC%e6U4㢜v@fdBK!7G"sA+lF ,p Hr+<$BV{W5xGZG 9, Fسzdĥ]ώC sWBvt~jk==ľrm`@;F{X{ ?7͛{:B6s\m[l&@rL#x] '% -qC.F4ع6"Hr'^H%Wl-b 2)7!ЬkGϥ;v= ҳ #;] 㘎Q7,Hsڃqfv!jܤIzBg[3mUQ2>&a +Ⱦ$4Hn%ƚrm$ɣ6r6Cij$thhR×//,lۥ1p$JO]|E Z"hLp(d3b'GxPtT6ܽ@D"а!4Wxui’V݂Lo!1B3X>@u-!e$=׼TVAM#m'965crlc)IK1"6wGeR C&3]\|D At L>W1ݠB 8*ɗk Ǝ\{uU4=^,"[: yq.EvAHBN!ERF#>"B襴^K ܈ '/=)v.DF6bH^zisC%mcRϊC6߆Apq.شpsFcNr44' i5ILR=L"rU|( X89G!; V1GRzt%WM٣ R RnR G8Z)Q@v%83ƔYxD*E,`?[<k{w?1M"?:_d&5Z'M ts0[|r \9p`嘻̿v\m\Mjrl+l|0CKK)U)A73CQ, %q4n mrSofNvjJ]W,ѓM#JؼSAh%{(!R{Q1+LoE`6~07U+l ]!tt<م 8/-HMBZ qe-=@kZ6(oK3 ڈwcH̽H-'Ae @u)yE6xt~g pqq~tl=J*jdף[\䴪h01?nxl8жh9n)K-gXal$rt2L 4nGXTK,KI~V NWWBդzڻ+JѲ]XU*q FScU&ҌcQgs 'QWky1?ׁ4A)4:H]3yc&JBEس+N5/qqqQ=jeOttMd>wEu֦վzo|:#l)yyg(rժNvާ< tɾՄ> F/5 ۟2^__>9S`ޘhGZr=p_==hceφ]cx]uW{ ݃YpqyTL i9By,#c:~tdĹ#nQ֌/M[F̽PȂjް3$N̵-0Qig:2S (<{:P:pye9g\ `Fs.#cS#c<1M?:6䱑n16C~!zD"{Fmjf.b”TN)onys۔?'*cdw(C| q*c2Ƴl] \v}FFMB) 6IKќOK5bh˱R46 CU4uޞ?!^l*V?{gȍ$"..& {/wd5vc{,ۙ[/lɖ5ҵ?-$UOŪ ѪݶnkR:͗amGn+r>>Y˾ZuGU[ŧ*{Z@$`k_\l-~үv˴%+Ǿr DVRyc.HL>Wv 1~._e!@5N`JIp!?[sQX)=*)[R@WRIcž|5KYP$#5;pI4l@!/r{J1l(>^֮|ЦfO5 Fs-Q*JlS`qUnˋAVO,֠\҂d yr:ڐ+bڧ m]J 0I58O%&dȝp@ӣt񯏋rDOdJJIb$ҡ[v+R[tX 8=P{v.n5j^MQ8T+q||UoO'85~Rv8:)LF,U{VJ"9ͅ@?ꭢŸ92^.eqB(>I1+-ޠ>%d2ڠO^bIZ]])G|"[sY4lK p'lq4QQQ(ߨw-޻{Dl%%k e'fre )EObLd j6!a| nXv極o ѳ/oD(KSB%$ j%cJ m$oJɱNh(Q5 iS_MKcގd9Ot gȲH֓ fYblt Ȗq^$rd7CGX{=h:|< sDՁ*{GCj;/W\xXh$.pGUWjaQk !:X|W՞^AJv7=y2݃8U j KAO.3}'z'=%R^%@i֐ؽ .mO5^e8S{kFDvY*%TPԊ%$E~ݸ 9"pڙLU9lb*!\) Fy*#ZC_*SyÓdPz+|ɵV(=;Puu`SwcSN]m J~9(~.}jBbV@̢:J蚆́2_5 jMW!w @J,ʷk-X ORژlr2a{~=ױAA"XYNAc/R\">9#0{S kHbuJP?-^{'2:543dO^Tƌ$Ѱӡr$0UkE)F <-w;+5dYlB6k  U4VHxOoO4Is 6)NT>@)A)y$qR$DOl>pI䒃shLRh2* Om5hi{.c\JHM$L;c+vb c,_'Fo͘,oS!DNR ZT1V2G{mZe@ɍ^ ?/..PoXܵhQLhrwsGoo~boOy9ٌ/oܥAG:%]tܝݺXc@c$>'lE)Y dDlP@{ Ek ]n[K`6` z^qIK۲5+kLjK h*ej&]ھuP1q.Dm7p_kGN6mԦ]H3 (ܸ^vF:gR2"6U@Z3x=C,C5KUUCd׹:f@ *IP XV [Yjwƿ]˒oFb^ZsH]gIc \tQncƒoFFaVJDf%@Miq]MHUTZrO!YKYgTYEDWe!VW} eUwxg ZIiq 0vLK ?N۰H9ިWg(ݜ] 1Sۻ6}Me;(3Y7IfV}檎Gv}'F#'Ѩ"D(_9'Pϋh;dJt8geHsGv3 [,4>`ONl㽾?MoUs}?[,CJQfZPEX Fʹ&)Q eGWL.J|w+@&(~d Q6^XJ^$VgH%ᙃ9λTh{ vg`fv}!f eB!Wt Z'>dBX(/(U-)e j/_^£T.' n\Ϩ6e/4`ĕ(#pׄMD! Qΰ@y/ .^12H:AVEW^QR&@"VҤ:U=i6^j+TEpwx6{[`jɣ%jhFK =q/P!YaGߌJv;&*Sv,Mku6! jybKôBU#]RZG hX/ci);ʱ n]#6͎uA)9w)|㋛+¡P~e$+1-),J+@]԰p(1v(ryqz~=ȺtZ8exE9;%gay)30Ȏ$wtzǗ' d+>[@ @V佰^TZ ot*Vˡ D?-{8 Z% ANYT!{TS+v#Je§,94Ȏ@$I7<y$+|ճ%'O I$cVN؜6YQTRC _.A'z4j:kҤMbrTvϪ.tRbX ڐiS8Y!o S5N*~MVo/m9'gRrmE.A6!;lݩ_+Svt~N>*ՖeVrՖ YW^o=ZF7N0Izg'!/U)D0& (.mBkstgo|tg20(_ v t0OH=}wUoOϺq:/|$17$(F:ymtq^W%:.̙ggV6Gϛ4 ([n$BѮUg%x~zVz4߾Q=jOOKq%-O4i7Kk#ө1NA/sɥl|$3q4umf&937WɌ^aGsi=xz犟ü)oF jW<ٯFS}$߳a#‚$HŃCĘUmbFYs  ?n'rH <UKrNrfikj/.e#&z "jh,Tsj<QJk1ZJ>fE2ege ANEm J?96vE\7mȲND1WˆTTR&@uu ށΦŠXMdTJ/CJj(z#RKju-Dʃ&%w@1LbUȵ p8/c0;"$|lBx˜ȹk 3<$tE`Jxpw%bUCo kei0".jeDKjwͣDZ6J-Ek,Pi/Kj`|s^dɦA6 rA4=' ))aS<ʳk,#G.gs}UD$XnԒ!9вpu+灠$5` L-X1Y$Y6x`D:RKZH X<&; )oʧ %7m$BU|>M_>I7F" *P [5`\nmq#yuݿnY=L?UUy+%]H\];-ixz U2nDH"3.d^5ɲ6AQe:\yKheBYC0z+) 8]ޣdNQ9UPXT%V5f _bLd|IQ+sI׫#(K#yuȽFmr\,gb;q] %G&ooF+;XǓ]\v$t5 >=9[R) GWwV5rz*J)j5r4D3tj֙B)e0k[*#-Gktvxn˛9K cO1Weq\Q8U}rR7VUSu!Uc6VzH>H>H>ݨT>m b-M29//ɔYYN,s$p٬"ȳ})zfuvOzG5s lϏl !mU\+sTu/*B| YmJ_ӊYs14Uzl!F!A &W_RȊQmDK.| \cɵؒQae(om957o.T 93;,:0zLi8_ (`:?RJ@$7Qj١U&clXdmstUBʽR5d( kU̩` +ꕄ Rf'n&%)Tkϴ[wJK!1:/f(bBT!\$KdsN.V!끮)/RTfbQ(&C \ "J&+% O);Y Ddd/2Y$<#mVz#[rG5bf" U]7ec{ϸ«[ kgnߡ6= B+]48-lBǷd&A!$"՜ZE FԒ`&S n>_'yt2_8k-񅁍.`T NY%HzOk&ԘXQAr.2 " HGkɊ`_-+QMβe$U5$JS!]L 22i!v U~5 +͵uZ`%#GU&L<6)VlLcÝ":P:[XR[)tn"4lr!%ehJR>U+} Yo)S#" js^N:NR&S]K_R Ah䈁O&jkdE󄗒@sX-lʐu =TzCɰ)6DX/M=O]G\ bOYl*IJcx:p{`ZS᪹=M$(ʁ@J5iIpX'׋8ɫHw+qre:.~ .4=cTpOxmvf 2:,Xq9;$N{n=SEo`r C-C$[ȪJo^1ryJ$Ҙ YgmH ڃ e4yCzXuc:1<l!t~hinbocs}O`5=mOz2mSxjRJb ~LxjxT2Str:__F4C J T-d5*eUFkL:IfFWQTȯU!jS> H(<>It wz޺CLo13Znߋ?uK0YwFg'/X܍Ԅ_'r{K?0u?/x2ݬ%iuJq?r]tSi1{gSw7?Yk͏7xzM5B;9d2QcspfǍO20&Λ/ /\>{5[0rG~mݝ@߭aT`p:%M-h~,nuW4;jK e' rM*$fhm=^c^L07I2Cb`vb XKj%yb[83l%S̴b&jg]5_BD:yPg(Y %Ru!E)\תw(J#\ lY&ZDl$a-|Frcɚ k5'/kj8OZ6&3yV2z/_#{KG*Ay*c kȥ6G1QO_y do9x&9k5I'm?#~)3\r B,gщ|bȭٔ%>eOYS VL♸w֍0:p.9aKkxGBcd^96%*e P VdMBйh'^Nrb|F(cylqca5`WN\&3jmP6buq0ڴ0]זlΣss"_4* +X>PsIa/yJ5!疴<[(ba]>[fKiZkD] <*!c-!Q]SD:Z'`ݡ!GmQjl˾Tі(*U`J" hP@@mT`Tp).RO_Z2bliѳĵ42F{@&!]t_{Lfkk%4r8YHcl$̂ )A|q1SCLQs yL{3EdA X()%6D' 2Viwl2B0h*Ah6bP ]0Mg]#Q3cB lE Li ÑKw{ad6h/SPWtT&~EyGaʖzYݥ^#r"*F,l yY+;(1hnAQuf|ؕEq[jk!glKXQ?9QͧBLaCdUka4lܫu4^r}gޔ@/<*;\ 'Z\/'BEN/utB\O?^Ek :k ;$mqvn}%nSM)aC9 ;ˑ>Wo{wSogI #Tiy )%߾#C^lʋ;'T!XsYt1k\f2h[OFB)IB3Z)s1e!ftCɮZ16[s}kp8_r;J[`ph,Y! GoTkXgixBhZCMQ74Tm:Ԯq)d1){Zεec4iܺ%4-"b9"n<7ΤtŨҩ::@ `[ˍFhsK$ N@rwR*`Ţ%f6 gZ;Q j'1 )+Ӓ4ڒD{"=^IRc,6tTD d2zl&X%p+CI%sȔ&*E:&UT?f)j-%֛X$b.; \aiY!a'+OňOæ0g;Y>ѺL rFwh5G$[F{ \K+dMkIc2p Nn:)Nd!,? !q?'[)ֿ0lt:x&AR]H3p8oK.3$n|z k^gUc *N pdyeZL{7 lQL0`f[),1eUedkv6E^4\"S`D>[ZKgٶtk>B$Qu%Fo74(P+Q(pu47S AY#W)&lq@8fA`Ԩ\#5+g~5٠z*"w?b'Yꊧ}&JRM>h&0 40S)qYh3=}{UYwO6@ɛ),b Z r>Q5ѱ\.)SAdD^*6+0H6a_slVf͜I[.$ij`ceN11p(Ӧ7em~1?y&L#Z[Gy񞏅eoˠm#ǨU,6ͩEH[+n)XⴙwGNjjӻ0M B-V'}}2pMۤ&tm]+dP-gӺUtvQ1e~ivqSjgm+&)93` DŽS0Ț`~0c0MGX @B,\zM&=d- akZXXI\ؔصL׉/nà7(i\A42.D7nX?=TƢ='X џl5#7MҎ6ڠHAQΠ t?2tD PF/-2ɔ<ɥL%[T4l2P$v}DԉgeF`Ŧbbڈ*Gq :py㗶a`Վ)]w77EC9aτb:i#U#p8Ip\uLVI ~is6<{.n5,urWPXSn.cرFST݄hJڑ_ai9nAnߣ uj 2(o#F[&yȣ{6 Wd1!㳜sG/Sg17N,0p!|TmtWսTߠYa dM-1OY;Jmsp$T וu;06ԹDނ._DH>DJS޽kA'[F MZӈ/Ll 3XSK6#^'ivNOMOr<ڤA "8EęC rɶ=Y-Հ#A̖G].h !G0[GId)i 207ZAj S0 b(BIT,F&;5l!6u13|\a &R!.E/Ws<1!0ȳbˇK 5#,~B #Ϡ| cB˶Z/>",[+-:AD[7֨qX%i\qBC/NT2(9.K(8 &-MfM<@TN/kr!2\] f$mص{jqm"-[ax ]踢P,>k)pcu?@&t5̾J(:+w8ަg$n2q=6 釘yiKki_[ܟYB2rmnϘB܆I-0Bw, -٢&1(8zl[`+s0%0r@Igmu/)B@mpd||i\G̣4u Wj$RǦy8OwyNJYr)Q'MaaJ61$gU3:QRj-܁s+c.G.iJHW!BEH'J&*F($m]I!R$]SKf-]sʳ;J bԑ3@o8BvʓE曊J<64&lK##89m (-qe2vٍ=<োv{,!e{|N*ӳfb7'x 7!řJF *d"HMH*WXI%"C5c~<|_'d等X櫋iJN4gt6];<>?)wgyg?_1<뫫N'kny뭞&SݬE1#@Wo' ҳz-N'?Kry8=9Ⴘ$Π|R̝MF3cy9M#F:[N}/~YS ;NQ2QFm!KJe1M#7C/yq!lsx59Qc]&RCPGOTIi 11̃%GSFAqs F'ȬB-Z.ttQEE-U!zO&|yUEyau[Rwma,n=_eg>hw'[Kͻ!,"_g$s 1Y#I>Kp?k%aLȵiDACH) WH,B=qב7ъ!ZZaE|a.1/t}QXʎ;YR:k'0V&8R8`ąT'e))xB{o3ujЗ:Ydyo8kEhP~/q0tT3a5c}'i;SNZgC ޘt3yq|ؐHD$ù7젚AaJ%cLXCt&lrT8: G]:8<䫫iUE~/q0|T7܁u>C7u7$I_?#9BO RU]-``a)0}ɣ4y =$uRG8~ӧ.IQaŵ.C3s|@+*bYvBSlBۭkkBqPl.;DB.Cn{C+'ste_wxw=AӧCpA:cj6✶bT3Y{&i  A qu;Q;ČQ"4 E|H;֡ѶKt`y jhM|G|k‹bmԀ.Pܐdɶ7)jxzv5(XLhZdKj׌꾙"֮+\MhR7kRC) c ;A" &K(xŁa4bH!7 g ϘԛQ1Zs#vckREQS"ؒhMp5EOx񉊥7(-1+ѦnƦ!!Ԟu,6PM+lwtqnޘG~?4zJQZ8f꽛qHOOKgE=]\kv>꒗U|jn}PGW]#ϦT_pp 7hM-Y/Òf@٩U0yhU,@B[=f,|xB5:uxW?!a =|Y&s?T>?:'s7FEA`RJ;TS8:_\Go~⇖XQ? +_゗vIx*NYșl.Tof ,@,>Z|bࠇ1d(9gj@92΂iǐS1|-s#7u4V_v3NVo)6)֊Mq4q Nٷ5<&*kX=h5n~& y*b5Ƚ7Ľ)@ӌml%+"3A]d>pZAU,[ +ʻr #ߍ%$,h3MIƞޢyY`fꎩ`F!,h;8sy4Fe=vxO!>"M}|}=qIUԚQ D#B߉X2#xm/ymCw|ʘkMk8Qcq:in76Cq2a"ŖG.xlz965#h_cd

d)`s[jKCJi$)w-CTuklbP@JV^X,^i.5Y/ny 4 MγĜ 販 ]slАBƔlrHcH~ y;h%?jFvv,lJ9OfW*gur.StSI+kdujmؑHf zRNN./8B"babM0C"vlzY9@ !!9Aτ</0<+lXr*'A7Xk< '7P)):l,`8sw!jOXVhٚ5N1+h2iWE<^ 9Zh`,HveDS94 ljْו'ϜZ~=%+N("c؀xg*yj:n:#yyn:nP>$zqknOHrPH3?oLPPj4#.-[/\''g[&S|u6ܨNg/??1xY6`ZMVMǻQH~'E`2cK N !qr _~5<~.}o ~7n\,{~on۷lNs`~^ϬO.[j* {9[Jm:;l$z7^Ögg4#꭮<>j5|z#A1N>9ĘH}Mn*` nFx$%퍅h%!XlI%RA꾺F9֖2`1 fv/LivgAu=,x79`MfVUZL 36_xVL̽UL̽uL̽YL mbB]+)daTBFBxS,z8jE= CE]uDBY XF |6TCcb 1BaP؂h07d0+4\zd0M(F@K᤮# ѕ.C.EYPp(v4&Q;M /G렯m_:jݹD2R!H-{ix_<^N{c%+ocA>|6 ɠ(] iwl,r TDTc&l3B2|Id  lRBh@dя?ץ"K{-SUU6U'uuytBP~:\ēN?n{']|S:{SxN>9=˺PX.""fHA, `KwSL wbJ~\|8:;h=q.s>쥩~7y:I7Fd2Wq7Nf9˫媧,|*>&gXt~~>99ߗ'qzu~{g"瓹ٟɷ855S96:p^.ltgyL4ɶƇ/Y]SܼUIcw0XHξՑ}#9fGr∆\cM޿" &xڋ_l#C |LNz$.j܋I#mB>+\l V3-sDk#n#&}߮ /gOHC< qdq밸5.79Uh)Z3؊q}= ]xR˘녊ǂz~ @lb Q 48ZuLޑcmoslE3 jn5/huŅAe1j[4<aں`ΧXkud[m\V܀aZ/ ։гn&#,6waHuQ@u$Ps~96'/޵#bioLHegY 0sY$LlGs[bK---ҡ][MǪ.<dp5.fN~䓷|\uL$όNA~\#_|MtILcTPlA$ 0[W2B\+{TNM4%i9c%DRdd%Eʞ=P֋bxXHIx:uyXxq<@I ,bBxe$F/ٶearmE ߽j&pvc*l] [Zj *֝-71HkBEmefw@o2)]:1 2O土?7<GbzNF+!fc(LETZI`|/ .Tf7x8Ӧ@HDz>t.Z$\iKxI;ArlaV5QjMM}gkL@-PdbHmMT҇,Ʌ֛jRm1Ex 傼͟>0!kekku՛`vԍgtz9k3m>ݾ=[/I/s%/zs^-#>'|=Wv+.XK a_\`_b݈ؒa@9V+D|?|x?MeflS"$B UJbP }6Svhڡa{yU&T5✷eNASؚO$+RN}Q E[5wh.EwI{ JbDYt0ʻ:%G icm|;i^{IklYuݙۖz{gcn׺&MM6 gj)$+VIĀ;@;b.&Dh۰IK1Z̮] \pۍGkW#3hh<$m:ՐX00)-sj Ԅ4ee ӒXk˱rCغSPT^[%(sv&Jw[ f- JʾvϚ]oݷ%lf[B #Pϓ{͜zs-q py#/WL&dO4LGZdn!Ȧ`g'(LA`OHz(]q +&-59{3 Zk4򍨺"CLhwB0Kh~Wcq7H=IіH:6|6 ilL?"\f lnr䜈%KYzvg6=UjDCX*"&g5,Uy+[ʥ*ߑ P[.0O]<3-1iȏz6Kੑq Dyc HZZ V΂p5uTgO;YN"]owU2Y4ՃᷬyII[5䭉ZE64QLl*GV20ԚeQh btJy -@pj@04 rR8r%ngzmx6V1 _`.rV 淣m :-ڟ54iӉF.xdEO(`R:(  FQF礴I#.KUnU̬J5yŖxӸl% 78Ɂ&0񜦜hrIjYU>F`BU0OҪfe5SܡG2ϹFXAGgo%k(jCksmP<HFL%Em)hS (!;Ԟ5(΀ĀPh=t^8ާ/&ѩATRx0)s1"YcH$fo:uUJP6JJ*)Ǭ!Ԋ"S07ѕTQzg(zC1LjL;'yz_ގgv܏o}م$uK$ձ)6yk׽Oll1p-LQ2IQ1}8S:#/ܕW,ޫ⴬j')9LJh/ E̖<3USyZۿp})|| _o~rg Ϗ_ Gޓ!oQ|Xw !|v E CӋ2>X9(K:L&Bb0TɈ)'-s5%jL#eI&()MގZy&;% K)ZSɑaUX&NO;g[Ax E* 6@K A$' |\OfB )QP4/ DKbRKt'h!h^raS7S')_2u|8s֘*3Q2\}v:NnšϜ_/.uvATJ_&VX娦fϬGM}|?6 _ŧ47YR~Aۮ$GrRzI2ͳK`f̺眱?L|e5Δ79S;<_Y竜|ktW'᳋ݘ%kprojA_3'>4<[l6 8+~#śÃū+sws~4'p:6He;`G̓`8>}_wOb`^07,&-ݦIe&Unwk۳iݷW)iMch`\J蠢0>ȺٔNvvCg E[srig5惦J~&+@xpPs]Qzcjjmep>JԒ RN׶QؔFa-)F9зbhڣ\bSk|4?[N0v$o۹? EbV).EvV*FoױEk A.c'Y~Bi|I$:?zI a~|KYAÎIEɗw<&j! tpi?xt}(i~7OVz]!eջW6fjcOk ~u?2~7r?1 Q1x^== m }QYTxË,MV#/n%9@HM LEd(Xk0{Zu9xU]{Pz 5|F :,emfO I Xjʥ@fBMl3;ԞmVS\&us;g7 mܦt~{_8="#@6C(Q(vȣPIalP"P@MiHBʽnv놸2\QZIB ,øu*2/'ᄒ nR(bd(Gt-"r0vZK&HCF)ruWפY]Nޞ~[Kww%lpzN_9):E#9Q°v>BUR\t~x]#ߥlyX"gZF" ݏꗁan/sMKv~Jdɑd; 0USI;#%{gHH$Ev^FjȞ{%YY g*101%klI!TPz,hT7]3@cB'g3k ̜LKD!#WK1.H_cl6DX ޔlO 5Pc.Nj:EG 4: =K,&00K\08ύ4h2`y'Q 4a8g5hQ7"lL0;U<* Q%4nI"e6Zi-c(7KhoFt?M7cF;>T֫L&] [annsQJX6r)uYsbm&λ '>+d<1 G!Jsr zsG8 *Huqbr@6W"M!%FɝC11|g_lqkWžLúß9_@úhs]_V`= /gNn/ z9YΧwĻF? /oQ 2^[z؜*t\9gBgev;oZ\WK4ma=#I pbt,ejS1_Pֈs<#AUL#E:[i.lH>pWW=B.7] #0RPDgy+)'lk \P5z!G=kzjgvy@ UYȝ7#MU_ */ /\C}BKC,{rG,iqA, +ԋە-FD嬍m®OpwgVS>ḽt&/3' NeE $r*Ԁv]#.^ |}Vl(Bd07$2P됺*zwRԻkq6l,<Q^u*+OY\]=!t@z xmD~;-uꫪAS[8BʠҞLϵ85!M]^4z|"%qˈ2*'?눷\lYVY[W۲ZOOrPM B|qQBْxsUIGJkl8[*)2pώD1QK0[S>u@e>,H:ٶcw9-Ov# Y"2H&)tS'vrPf랇4fYUY4Rx|q+҄זbL6'Va<6uX21FItçPEζ\Ӯ:!1֒e*"^1D"@c=u`Is_Ȥ#n1"Ay҆|6l. *%Xi(_i .ӒI:b }m} Am0JSBSFl]E}ɜ満/XGUf- ;yLKWOr*M̖߈VzB:HiSp9iOv > 2*rϘ Wsi;:[2哫I4t^I; ([$qIXDdQjX XYSղ8eDR0b$3?r`)8cF ;&&#,;%ew>T*Ue-Wws-4t|ksrp==puokxo#},d &PԦNIꛎL"x<l֌$ 5 ƲJ heletkٸ(QDuDcZ 1-Hev&&&duĉH2xj} ].ZݣgE70E-h#;C e }WSr5𳆟'OIB1#v!H/I6[ rVǃ$7>0nﳝK{sIg XPeSBb11 UKfC(TEmd%ƙQ-:zae%Tp[P*:>p}  hR Xؾl@!FTZ˦%JCި*΂LtޣP#R,,95q!7&fCg] *v>q b,R2u*vj,p}"f9T)$6:p+""`1ǭS"qL`hVGA)otpGjvࢅ.R$]|`0#FnttPC2}غK X`g Ѕ$VRcSS孟^ؙlj$@ђ &Pi(UuV,Ӫܠ =a+ǿ66|֪Ó.Kvrt:sbF$ y:дKREDĂ6z_U>T⍧vJEB\B8-t%&U\UJZ(;%hA׿.q 9QqQ tI-={0(H(1iN3bⓕN+Jӡx$w5.R2f$ BJ^e=A;Gurʪ)F51&0 'ҢipvϵkKe?Յ:6Bm~js~h6NKrvuO'rV;wwEZ%xћ)JMV}4 v>Ob0M&[m$(1=8"嗳ޝ*wbvX⍮˛9va5s_ .h9Yne$E.F%C+fԌ5|;t-HaCt9j#U<W(uA#u_6>ß>lѶ$z47t$OD7e+!+Uo'c!ӣ+`&$Ld5{ ~s :1# lQjD4c83$*)~S|~5kY Vٲ{l^p, SdUY*bsMRFB?JITpTC,e;m& 32 SH)S}͌vWC4y[o_,^J̻DSt(`TIy/3cBRb1"K;SХSVh,&+OZ*Y$gͼ&̄vG V 6sM0R)v;!춬Ƀ'ݖz]6ZS^:c$4;8:hR@{Y>-J%G4B2&y'*$`<P]N :r|_GgGgj#k_+1Lm_Kid9(w&95@{QBp˭} G$D)$qHuqL=Z.Dᕇ]nǵBϞU[_N#a8#!THJO?k ɡ4=DlUVu]7GEC)O%2FFX626׻Ud ٙz ؗG먆C+`Z|Li(ֆD0Z:U}jLVۑu%&CLRi^=E"ӎ=b:yOOZy*76hHw b`XER8 A"EH.Q5D՚謪}Y\"4G>'J dF nݪÇ!E4XʜcaDT<8XK!!؆U+T7rCaΣlBZ yׁ:j!#BLd,pd\_fHPx*!qQ?bM4!׼s=llU|piژY%TL 3xt=w" l>嬽GSD\I^S Pi*mPz F[0.@1-5s44<E&EpT- 3f8ki<^˞*;R* [ xT(B9&?"|=%.$O?^m\s]zڮiPO+6rGz첪FS*>jw|W6iqY{{q*ګ`sy! 6rwp _Xo|;/[srۃ N1}v;_BKoy?^ }*79|tӑ0FgnqR9Lū5NyT_:Cyv[cY^z/嫋=xw*nb䠞e{6嫃G(cd0X:]$9tfyķQ1KryC)6jģnmgژ%ף+V\MnFo>qV s\ZYZN51 ]pqߵ 5g A#-\Y`{ LT=MAxk_*vfGgݻa2 rs$_PçG~x~е4Umv>&Q5ecT*k5;N-6$45n#k/(6|aR䎎[rϔϚPU)mh|*(221b wF UJyi0a MKha0}]0Pu[޺Y["H݆WM#w/rɒr!+P=SrE!BdlZRdU{l G*P@{rK sCmz_Ժ3ҵ"ye zEQ)Ʉ`bAj@0%*8EP,TŬ去PJKrfs, hrEk."ɻ4Fjܓ 9N9)i)׬D k M0R%b+-`ʫ#>;{kZ4K m k 4oČb\&4NuE mZɣ6ntqmQOZCكd:sM) JH{5) -i,ƭd߇ yk׋Ӎi]P ږ=vz\Cq  Epv~{)mAFP={XX0eAa$=3@ 5#%+Bэ)+z6"oi 9=iC `u0̢@9F`Z 8}'yU:h#ycs;Ztu }!4aMi` +MTgS!0p)X@삄7rO BѰ x*Ðk{=bbZxڐ ?Y AVHtԗY7X ,-)4P}Ar-U6@R⁖okRt.H 3 `PeA-SYy֒s#Q:ol0!肄q^UVtݬÃ(S!d )*i+RW btXO Va` S7%e`ƨń :g5'LB:MIEV ,p!t@} pRd9H7(Vk1, hXɠe)} ԪxѬ d8{WW/j`ayc 4Iéf xK`(  CUZ `AyGRzNFo c aVt`mN2Fq3V)x"v K'88irQnzȄW`f@<(0phP7|rq!؂*>.T27ӆ]!KU)(YT`kW8b?Ć1 \diゼVÞ 7D س8._e g Hk27<:^W$g3ut9oKEH55r9i<msPΆgm5kmQ##f6Y͢mmhE,f6Y͢mmhE,f6Y͢mmhE,f6Y͢mmhE,f6Y͢mmhE,f6Y͢mmhE:+&8aŴ}SaD)>yVDbK#TaKtXD%:,aKtXD%:,aKtXD%:,aKtXD%:,aKtXD%:,aKtXD%:ϠòJ{ltXl9 O<,2:O:m>?~ YwoV  M 2`rC(Qn1JBG`ܸë򭃝&*r_Wv~"X}^CpO]d΃{՝n9:߮[?}v^x{Sa?*96LɤDp؏>> R}۟5I|xz:ɇ:m3̎ʸ,aYWo0as7gpNPn5¼"(VNqF ˘ Vn+g͗̓;,zW'q> u[m=&R 8[@lh4@SZ#;=UۗwHʻ{?n^I_Suno?_o:xK]*;\sbڐ33FZzqx⻷t2|Aۿ޼;tcwx>rQۃa|IGJ(=@;ƒ$#^vF= yȒJ+mOK+mMkzvkA6e}"|'6'3DUj T P5B@Uj T P5B@Uj T P5B@Uj T P5B@Uj T P5B@Uj (G}h~33 ]:7!8d=2{ρ^}ub 'P$85=DT$KdNXؚh*x15zi)Ql|\ړ9iĬ>>ŦO1/٢}> V.ـK\idQбP k>ǷO&?g2{ZSнr$1zJ$F< ]. !!+L\FlMovi·O(Լ8%ՇI)SsB4B){.TG &Zm\9mhj+ݫ3xIwb;}߈W41_|g.șH,VF”1څ(ixfRPc;4߼CMQeL-ԠjM|TH~>jPIfJBCJIRU<\5e-nI[O0AιHRI V&e]7BF9G#%3<|Y; /uM<CZ]+' kĠ A-~Frwy4'7jƠT)E"p,0r0԰Hzm>`IKK6S#,*~h#F a+2Iz)R d͸)Д@%82˛@ &KD{!$+:Zp%lݳ-{C!m3ۏmv|ADv")r ; % ,= 㴜@ @$A!N*NJ AHU`2y)lUMvt^蔌VE D#-['t 0%.CVƩe ]-k$l=Nؼ#l6r=)/@PW{T!p `טRI\6L &,L?]AZd>JEt)18)'2aE+}[0Vvio7n^Au\k.50 *oytٜނbS~7 oJom;6sO= FFRfZD5AgT6K^/&BE(j_mPRGrVZ9h(?LivT)Nv4eק.h3:PeS֥g@0 LSI:CkbkbKa Jy0w0AkC0UUP4,cOnԅCI#["md㨤kꨠØN7i>ͷ|4ܡ Hth#Z0Z0l*W7 l}s^Y:1!|{?Kͬg vb$]T!]Kj٣LgB&8e3e=: ;Sڌ 3^(OgYC *`F*lFz|v^EPw :h|KNhͳq }i.i"!{[1ʋ]ZM fFoQIDp~h@zKp AZw'q.oƱ8))SYjFήGZf!̇M^jtgtr~lpo߾\oߌƫ^6?>,5R'N>'_vy=aӹLiXFTT/$:FUh4u47gXIVVi#6{ORBq92GEm瓮gi8i1&HQ4# '*^#dhӕԦ+MWbg@uz',^dhTwI1Sdzf -5JMWj {;ɚ2띱 "D%Q* X=8gZY<ݮ~ t7ݦ8:D H;;Fĝc!j{ȤS$8H){PŜeJ8}7]%{)bSVds$BU8STRb فrCms7yN9hb@Ĩ` Kj0Z%uNȀㄇ;_R]Y$Tn:ʒޟ^7y=hi}6{T(`%K /,6[[+}Z`TdG"6r`thTƓw/\ $JJ@Tʉ/BBfmp4n> KjﳥQ<.ZѥB ,lХsABt/vXU U=HqWO%.XakrV\A|yg ZZt1x_ػR(I2 tVmN8d"W=(=mPXRS\`X3_jgY&Z "C q\;^wx'g׬RSWEݠ 砄 ?1H'S7^|ɃxźȍE6"X(VHPE?J+66 un18ˋ@ "}]r.GxJk4<zhi)y 9E$/;,Q4J*K T9* 2ji06 ,E>Qd\vJ# E#(^>?"It$Y~43ٝSsctok#6<4ݳ徲^|k)?"9 v?}W^s~ZnxbڃbQIĎ&$]ߊ.]\>Ft: m&BsFeLsM˗:/_|zł1A-2YU00 "1Cb4D/Y ɝ=*cbƁV3ʂOF{<n%ZL&ȽI09^ikv|;¸L~9ٸ]m7&f*M>15[ ggE(,kd,zgX[<&ukOS_i;g/]@u/U@K-e\=r| ܙ3-#PR޲D7gg( tЈ(\ٵ4&*l!v0Xe*j5lQW5hy[sD{k wv x)MJ#aN)U4uPl,=.cB:G!KgBISFFR99NfR4uV,ٴ ;K[ڞ[7BUdVG *`ㆿˋA.Rd O_SF%^R&V З)#b8<DUlH;&"h<*p`7z]Uk"Zhuc{l6糌ƙ:lRo^e :`$^TeQPhY˜jElYe{1f9e8Ë`<5 !f'ABfLY-lӲ폌+awQ;<5]qBnH1Olm_ύk,A̜~B$Ȏc;K_P6)J%$Gs&"P j/IyhnZ_{j%S'|jҷҤDFA0 ͻfsM>ދs5✞ԜcJ[̛94nQxʍ 'Th &ɁVҒ +sv#A4_NeHO/鯏Qq6jWHKeдI\KC2X(0v]]iyzt]c@ *b_A+% Z%PbiD5A&]t;r# QYNEmkjF\.>f[ )9:Sv5LnW*w٤33eӗMsbbD j? eehsd "J /NT \Zp;d HbGc2H;GFX;4pήļq|fKHBΜ*LyrgnA!DAT` S)"vW] PrdVd@^^i""RDr!X-H<U|-RJkAri/@Pa\ Қ^dc K+ᄮ#BvG- ea}2VzJybE<^5ٕ=wHQ "2)@2^24y5gM!^eB0*9Dqy::.)R6яY5n?OU[u)6h79p\5"5xDT6)<"\4FٟFNXV76`m G`-A6 9F`{FG$ '`~*I gbK-);BhUF9h5xD8&aF8S[@һCVIM|?)nM8G`VP hZ}oQ=jC88/E?nI "S]_mk_N%/) #}`[i{qv?<ܚ?)jk] tzOX[K<"#d _'dSV縎9+Z!UVwH/yR#?ρUaXCzrﭨ n| U˭9&RRŸ/zh&ʻK8-^S3"X򢐇=ywzvi^5Tk)8A3NB%ø\]x$۳qEչ$15r[O9[xM^Zb{\N;^8s`J6fH$'ΫT;*cRV$oUZ=qJN;:VU @S,$^4||~2WbdM3i-/53ϯƋC)O3/i]-5 -ѲfRXwL˰v#I 8"wʂ"DRNy02pb Ys{dlGҕp*I׃oϗKPt McŪh+Tԥ/|mM{)_8&QT-HA@ƤzXZ=\1AC\4qkclSʁSB2sMg'ޑ\JM||:c Dl:Stfax3+Mmh?n}nEN%0Mi8cςeC:GZP!45e9͌p,U}/8Dsg;#&U;>S\ <,?d:j,(S# G`8y'bǢ.꩸i)y-{$jx"V~s uzk&۳kHo/OϷ)+5jZ+YInM~+IHq)}jg97^3>'˨׾& mSeM-5%HYn+mHjYlQ)d@v؞87;cv4Oť_1RVnQ_;`jAZ0OKӼAqړ2&ZHG6c:}.p =ZO9` OU)5RQۓA|3 05hF`;v\Mn`2P5N7Vak%>%eS"ǰF E(lqGoVmC̖ԻtnUӢm'S&ʲ. 6:/Q g*1}H& ɳC :R>@Qǣr7pVUkT3X`f"!"! < g `m!? ȔҢҟC="BC7G/JBsB6A0kY ZaWO> # v+VKbUKM3 |I#A^=6Ȱ(uw;uihǿdDOd?>:XsU X7JDg% LJmP~]νH%aZ\ɼkgJ̶W2lq di׹?3}Ԍ'M˅-r}Nmйra66h0&Dv̾åEVT; T vXdՄl_\/sVʌXh)X׷WT=e-*$ω掹~$7X-ͻC78R5IYM8ͳ{ɔdNyR.)RHwfPwm(4~-=T}F"u"?ؐ1,*Zs\Գ#gYw!ce!ɩk"#\Le}}}{*VqJC_C_9Cm`D7齅tE9_!ky$59[5ЩhN0QG=qn:;Mh}'rLcvV3#B94qE|Ž1W'k9.;C!-4*uL?:ss3vz sz5|lME,SAa'6h% ^Es9;`QŠGǭ"^Kb4_gs NH0 QF%~J3Ws< gHPkwA!!DV}c~gHf\vTvƂ% N-҄4HRM8LrEI'p=aYأMM5hcʞ}z>+!$G Fa jmL p^E)$'aYs^[id=lW{#Xɪ>̠d@\1CrYϹЌ#D}@ְs0iQTn儤+<Ѿ=YG1ρC 7'mb|Yp+̀T՛fo0RF_VeXk#J1#1uS_֑! ҠۅtMH?6v=BR`V</LI>p=XPyjFo;GQJ۶ٲq\oGS@v T8-; sr\3 +OY6n 7Wh\-h2Tx[b+6%ļ .m}]KX+sv-g3VQ DK) "Kyk/N#>$f?WF'u[C_:h$UK_\]"#{nY\[ (F.ְYfK<3%;fbTNQ kziImQ9.Gy^ɍLEcS֗YT$%gphD\~/@[/e4r31 f|`OXbp!ߖBs3$))q֐ːs/J* I^3|?KHi 5H7 4V59d aǩK)R^zH{:`UR(G<%X pYdr]7ѦɞaqSH)Guy+6{6ꍌ}ږ[v,:jٟu<ev  v˦ zc•2TUŨ#9"J1\^ȃ$rJucDҧ:X'G2D#a|!͹N/elD_'%tPM`H*J*v̸ )f]_m˼ӓ4&sv)MA90H/GL9bj8nN ))ZlWR˲w]PS9J[L%Z4I- !pu1/tS/ øq>f ֨cf$f7U2؅p=2&^eoDDc q"9l9g!,&D_5a;(Nc?(WI[{ qzak¹flKro8L' ݦK),@ ̭MG8*^&$Ing9KbJ$:NӸޅ`:~*XWB/Hsr=zu }~?lygW/XoDpLjr4i1,igog0j7C8 -4ZSOU/02ad}9=1ԊV2X[cS^Kc(H>)쩻J6y#~B DI_O4i&=6qppA, # vڔ0oo{"oL(yE4t0]b%C˜&5kU:@l!ԋ,x1]m%m. QMJ5u$npm$,8|ԨiSS"@.\\U7ϩe'ZVBj^4(;ϝz#^=stT.KIE.wRkz$( XK+=dSb/XbIKWS\MB8YU0[hb }ynUu \UM9\˦n)*PTwm8k ǵ^AȠ= n'`aΐ>9h۷.UpMCGwHׯ \k kAa]]du2ϢoEő\zHJw ]-y%cjf|esH^ZNj8D8DzA;=JZz&>QEF8jw OJPqxMlIOd?g Q20!G$p}S&M:8HG!QOdTYbפvʃE*U'Q[q}GdFVŏu*CJ# "Y1HVPd9r=VRSb^I.Pܕ!4Lʶ BC¹>zNqf,!d)wAG2o3&8Dqm'Ψ1iz_m'Fb4P,2;]]T̎N"SH"1'P`hnˉdJգS2dcF($}^|Vـ ǢwC/ l\HX荾39گ8@:qSt.rd(YR̐ݪX2'[eK04o.jeQ3zE Q& 5ԣ kS3Z9{718d;DJ J!T7JF_3ɥB1p\Q:bO':aAQS\gAS z0Zw^'gmޣS1BOT^4p,kt9M8g$0ۻ`qut]Fz:@43Wm{vTR.O SR3x#VM nϓZ*\NՕX6dچVu→0@ r-A&x2}7 LCdXj:pZb=XN{! T3q7[}c1D+88{nlWLJTmhԝ,[&>Am;>EH?OOiSo͟3&iVt^⥿om?d_W_3~4Q/W͗zswP~Y b,m{m_=dcbj}VMa?xoby~>_bU?}h8le|{L'ߺc-zfW o;a(.beezvVLՉh~__Gk3qDZwr)Dm(k1AL!TPNqJAK$ X$R2yfMفld\GYmi7ݽ1/|OmºY@7>fUb;eGϾ,Zi={>Z`z_KtRnwySzS?vqjݵQu; }mvg^'R|&|U{ж'W_~})ţMmޔٺԊ ꬥRSR-ӀWIԆXElg7+u.Z@ͷU߫ 9,_Wn}@=`c&*J8f<>3:<Z}@4fz53 p}NXr;3fGh*1*1S{n =BXx׀쭟+1N_Pb4̎G\bQUzd\?ݬ艼qIR\ĩJIRRI \p(!L%F4Ubm%b$#utw^x덼AdBҔ<%xJj$ZFq$N^u^k!zZKc YD`ʩZL%yb*9\eH46J;VѰl4ʇ zG&G}ܔt>-5|ed9Ҕ)FO[Z&mG06>i-$6c8˹|BC R5*$tSĘ0JSF3!e3B$rJY3&9ɓWdnr'bM.^Ik~֙D/nduZa-K6c=C$ч9|K>}8J{XLWޤ תQ"HH$qN9J\{|NqZk4\$ӗ-P}xL>@~38R mŎ5m!϶o޹kW+͞ԻmWn?4)^ͦ.0wyֆ1}ǏW˳h=%OZE'ooug"Mд!zx `+R 6[dw 2OgqTo\]pb8Ģ8 =@zNcp!!p~ l &8=G[}@:5Gvצz7fOۃ71M! ;Z}824"q #B<)%R)Mrβ;ӡ8 眦4$<ڎX oO\~oc898( ՛D2yfYP=} rT٤+$K+_&@n.2n_1jmam:RKj"i|,SӦ (yR! X.1$$Zݢi.T./"!>fLp~LpMƒpj3)3w5iv;:ds W::s] Zz]Ryc:z9_ l!z_ݳyXHhwjR^ݧ̡4W K`wCmkp>fc!6`@>]y]Ecdѐh}O. 01CbYo8FPq0vRL!dfJX 䭰p@‡(+ 5|.v!3846Gbu ʃ2Te OReT f"1ʨZWӒٸ'b[D[UŹ^ܱZ{!Ky^eD!NOO#Dpj6a=*Ys]vhj4Uxgr3AR@a$UD BQŨ+yKjzZ u҄jRF_uϦ 8r+FqΧ*os;r:2/HZiI@o-],2!\쑛'aJȢ8ϴ2M)')T ,_c y$P4h!ɻEZxT}ENcbz(zvee(!3tp8g걹zznaM8ܩͺTI81I JYx/rT?:BAǃ/+| s2LPڌb=Xp!?qIq _Xnr2׍DIL;vvN/_IeŢlJ"$=#"UŪz*XAk^39=YdQPT9r$rgY<9SPfr # %M!H6^C :ւdGkiUW$n< Z]s$S$d ='z J20C2Q$d impv]ɻ$ +rV(dV۲eG.H2 &?lET-I+ojG`zҰ krT!f=9q[=.rhOS%9 l/1Z/I\&dFN)D\=8B`)-J~tܤR7yhtԥn ] Q sZ\>1×"0SC[%ԢTsiMyG8X*lזF 5iLY(Q̚`4GiەRݕ}ss uA4eXC1H뚩$p xkWY ޖv>G= *pG#;>ݡ: AUTht}ptx gO<9r&2ҍ2`<&(Us%N}ԫ۴m#HJ'fgC0CL+ &fZfxAcg/&+OyDV_|ф ׁ8N|!!$dCbP[vfFvlr4_~^vzR158 aIx9(sƂ ICXn:2ꈡJ#Jh # y9EH*-VaH:.rz)E76xy414_|LEOqeo)W?W "!D>54 y.c-qJiTOi,٠Hcu]Oj4⨣&ϝ؃y 0JpJh9Ռ1f(atxoQRhCJwtJIDaٗy 2`#Dsu"UzyN/iT8{4g!ˆD6D is*żz 3 ˸lfb|\/TCM^s8iU%HoTm@GAimqDҨ#"۝h ,=h)Ыfy|wW\x9oQR5֛ds'VʅVŻmx)zpw2W3vNh7Wb]~"=ӿߒ:؛K7 wlfتi;56>'szpu'?9盎RO,on漾%q39E`m=K)DRȜd%(2:WX([:ɰIudqT,&3Y=c>G9u+?>ߋԉ )m"LU׋5ϿowsM?So`Vq_̗$t0xC"ՙn{o_ˇEsƷ=Ic:w[v;/z+?&9lgkJ?㏧+Qnӻ;]=.>,`,y`w+PwMr_`yX CYAyrR+G m6WW_07+_7p,6!x]u]+`H}ؼot1\̮6}'T̰bsQ88J'DGs߆WkT× eo]i 5̧4Hi-n %i1pO2RRdä`* 2fPBZ.o@tlPvupҤQf&RBcֲ6H)R1=#i ',e\ڂIhPIi4#<) f?qO&Oin[ ~@F{/ߞ͔Lь(39)ri*9'd$#HTJJi)+4a7΍HIlӢI%O<B`X]K MRL% }RnGMm,X;.]Yqtܙl96BK ynQ)r}USuz*ZSDAdyb&F"r^[Rj,.1"R|p:ލPN=) %rvqwUED&^;CeXD!wn iF܍o\y0Lkx) d}7'a׉EK#:w9U:&`In#x0;@N]c{舣ƲR(1p/}*@Y(gJ-[w/?g٤P۳1βELޢ\pP] 26u ?V ۞dz8u6 p&R"NB:"&D4F(67~ˇbk|@ʃ(YqhuzQDYn~/iQzy`?o(,^}:E0K8MBL-GR_b쨛%^ϟ(!QJbœ)#\bpmݙ;ʇHg=:b҃4K JXd"KqLLAcV˻M%F,%v1f!$jF,<"R2&1qԴ> ]/G.㚰(Sz|OX@xddN(GP1f||әDGfN$;uC)~-12VU`mokO˹x>[?keGF6 1(&7%(!ٳ<%qpc@+;w>3VY! 2Ae&HJ8|T$3w`>`i=Tx]6 U|-|zkό/i+߭M9Urr3^Bjf=m>b//:j]o[|@ @a.ܱ^%bOk9%q*fXMܡTv~sz;TV 0NC@90S@u v udm=J1r~OMB.tեت]}$6 % \P k J#2- v~1:8Xt.ը0 .^ciD2vOx;cptWwx(Vl> Qmm<Hn.uo\f0Ow 4 uq E'=vsmqc(S"0x\P$2itA,>_K!OgW$(0\ >hOM~@ سKqmB:[$`%"8Cv1]bs@8#Ot"@%:>\vsƾpV{;‡ !"-ӁHEà\*;R-ӀRR_IK`0`~{)0S)JVCp& F&XgrY;hԹ /g,#3hl6g^Ȍk33(;<+mN؃jRkm#9_`$b llS$͡,~CJC -keܜju2 տC d&5 k5c@DzSU#Rkubd5Ϫ7Hm:HjyNZI0!CHD%Qv2ϗ_ Nj'&Qkt#?I;u+l:̓9U?_mfYah5qm}5.F>Dr0V^/4h#L8Ǵ7vkV< ^ȿZqi|/֫FS69|>1z^RwA.lwe#%5ג}LښG yFη))n,i~/ETu,GzgG`6ADžIjPXhWasKVcxFc4Jmڞ;vшq`<7 &en_e޺KinMzS#+ӷe "]84h+0ںϒ+ItouK8ӕt7ۮf;;tdc~]XЩ1c˴o : guQGұhUVXq~GF}-81e/SwX85u{ Rk3CޠAyBazg,)n ߟZ삆߅6l/k"7)*v~U_Enpp'iEi:˓T2s޾}{r6[0??s.Z^_Ey/> °C*LcI)pfsNa2[b^A;~a#en{1E We?Jk bfxnfm-daQLo/;*buso.]Up~kXCV+&8WL{ e˷ PO%;?M)+Wm|70omG,|H ,.,\DXœLTV2o oER/gSg$DNa1 iX=ǼsyǵJϳå A)Tn(̟hT;o. `gi]&>w47Wzk[ p'#?]=cq<oNW_nl7vK6ZyrV,4\Om=\Yв_]!0cP^*6h{{Rط;ۓ&4AJv4XiO;=_oG <>0~7UO3˭8shb>ggS ɹQA\_\֭}aÔ E`syipL@(@\PXR:rg-/?.[7sy/- i6@-=bY 9[S1٫ o\R ZOEI#>e,)o޼^>d}iO{d.& Iy/Y w`eqG Hf3nJl<[c//^˯i#Z*ޱ3|7^,oW岇|k vSb> Xh,ĨsQӄ''k9ij b[AxNygc{Ծ^WU[ח ru]"V;B"eB䒘4Dt{ /C;¥ofW{𽓲ðnuobMOX[tFlbS Q;O,מpzipLa}öNƨ: ' I44GlKWDʃ Pق5H9[=wlm]_Vv'XafKz}.1a;rzXͅ7|fNVD"E3 ˕H*kN3q'{OG l4y?a܋UtBQxrEpS(h} K[M؀&6 rO۠Kx{0g' |,WL9t9cۆрC/%G xo]>I",b&RRio|bNe)(˔]F0O"O`IуG `܄ssVeWʓ0JTs")hΌ+ ^p#B¼z'++8YMMd:Yp{N|d^A t2kAuoG[|n^/.:ގeګ|^9qpU{ o4a < 3ܴy) cUqLD X2)Vhvy6ϟiM:wi52H2O" q*Wd"ak2ܖ}32*!M9dd~/b?3 ^%#5IG &;x0a-( (Q:fJܱO>pW^ƫ&Mtb ){D6&+ü$i,:Q.^Y%7)xF[vQ XmLi!y{:&C.y&'y3<.&o"`Omr*dx2KJkq$U44э(ƀd%M},'Xƭ|E.c{M+q]zoQ63(r;(]YBd9ősһնm;yxd8p'ǛC $b'!P'6k1O>rcRq D+NjbrJ5$ʜ΁0Hp,"n#!Y o!\@~t–?vQTCi- H-#ǚ[Y[LcM,R}jÓ2Fr/Dy˹4J֣}l(+g=,2锱80q&đh$$%ԑma-WL,sBMΦCA]tGTN:d G4#Ŏ 3l}}neV!aʡ< VA-,% JĠ,~. d e-ʺy,«뢲YceCIj}kVn=< n+~dJR1R4Ii .q1XvUeb󿼟r|˾.o BYM9IŐ a)~\LMKeX J-zŔsK`a%WԐgygP_QSiz["V7-U8u σsIkRf4]]}eWڋJqy'0`ɬo a01a|9~ß/2obvu~V̏-H O&]^ݝ7:W}r\KNJ p2gz؄Vj̊O]+OΏ`kwjQ1c l ٤a:=|oDkZƬPeE[)0Im+Td|VtTgB 03Hg&.z#h(zȪ) @ dǔ0Ry&yRi5kId]NHtQ`) hrƐ|j>Y?]`S Uc8gCbbg;w+QG*5@͆QB+IOKF=[ T;o ,dn缷=,нN?\|^iA"2eOSIǒѾ:bĴG~?iI7"F͆J%$ej1ѻ^WD4-/*#jP;@9 @. G@Ě juQ a\6?+1`,r'gDUHE.1Kzgp ۚs"D>W/1VFuh+9K]O_RZūR7 ~k[!O#nM-d1t8*X 5;JϨX^)jA ,] ún`M<+rpZqQ _bߍy<;XSJ>`47^?omКͱGqixw3)zsޅ.DeG`z:_5!1Gg&-,dzf/{y3f]Ғ%`1YIKUJ.qcۧ=vfׂU.TņP=Gvi\Wˆ$D$.9SQ]ܜ\c1ʶV "Oxyٕd~뺳AYOo0"L(lJK-Q*E: ;=ea$M{Jm/+[/92O5 |9 L!E犬cU=Cr g!}l51iDPqΡK-9˺vLiJN؞_*Y(%jbX9H[Em{j˽V#3:.D2^!VAo<3rOY hl19Dp9G=immo (:/!'A'A L!d_L>\g d>RıQ"zW8Kg@ckLD1ӟ]Hj||2>Z:g!IUby=Ay'N*l?ɮkX'?rc]k=64H%ر͇,)ɂ76QGQlKdЖ|Dݻ/2h}}.헆a%gËKJ%MSR:Nφ.r9H"?Թ]=m&=LW>p1LۗŎL-AL/3f393=ӳP퀈X6ي(Iuٺem(_ .Aac#" d &[v3LFFbZRZq]hM1 lݫ#:4bGx{-j$/D mCeu*1X}bAPrd1Y#fj-j|[Olvrn )k>R> NkV¿:y@ZVjl.&_ @ @Fdc]TU}Tu2R!5dJXTWRH[JK[Fex/|9;g- G^AePn_&dbx|zD/y;>+m4ys%!>ߖ'?.~fw%q&ygI)g^7/L67wMM7o&o߼W4j;[uһf']f-.?; gí'>g)?o+YpYHݣDg".IytrzԄ+YjnL eyhrslrt|Yl)c~Nj´Of%2K'`q3tYWŌ6҈Ι N9c|2Y$OƸ}Wv?Z=Y\^,FɂM4οH|3:F^1~>>K 'ڎ &x,FU!K{p&J(>yƓ ^G\ISe!%*F/abu+8vHOD+s#_7'6_cʜ| n~>h}v'-`n!?|7Wx(z(̈́HkY=4͋3ZK2Guڔg+nܻV|ݍ憬k'aS0;miuKuRB_M?o.%IVmoi7-jtlܳ 64l)=-Hb;$wG19ߵ@׸aﶛ ?(~)Ȭd-%BfZobNPf2+AU5Q}p@>& :Y yP 5,;[= :6rFWdԌj$uU00r0{Tb Q[AEVpG].ugP`nnR՞KU1buSdK ׆[X7*XQ*^|uBW_lIf  2HbOXƲ>u -Zy낣}cM"+0~z2)sU)BeW<$gcw91_z6,+?)5ϲmihiƘYẙWPOHywze1cT.mb-G꽋zxcd \sF2@ O=v3v4gZ[1j p#ۨ٢3h*xuDOCتJ)h둠X,2lmp5XYݶΌⅸE΋Ç5.OF2]2BEQ&@ʑ 9#CD#>g#7P1Ƚ@<[)%huB  U{'enu0RUk(9e (!A4% ?n(afRS$S\6j/TЦb\zbV^Ҷd%1Iӗ j"l!uT]!t6&c,+/2j2\Irfl1Dzj5v렚NhmlN=Hm*<7+B4dRX2$T69< ERhY|ĶmTrݓ40ǁuʊdSe=*mTɵFT Q2 Df`zyu!&^\<62zMQ#8v):eTFm74!D,.V+lEs^]rV&HaonuQnTMfd(28ɶ@@wPIMWu&MfX|epCq#:^6xGtfݰ bkug_)ߖy~2bJZ:T&I?nA-mg{J.&הyw\_^w3oXLt~?o'4?^_Ow֟ KՓ ʝ ./˘XA-"w_|ۏj:<{bm65w `tgݛW?}/P3OP:;ծ9?S\6ټ%W_|s*yR<;̹?i~"yWY4Q?Z`_JS_m;hP-Fc:ʅA*~^0<@o3Ĉ6{9IW #(:(Yb\qaCnjSiȧVЧCJQeР|3yGF#o`p0j@Wab/_` %zE6kbI;`Sr:I x~UX;F 9AfY &ǜJ,Ʉ[& E-Ztb3۝ͶDrE dUGε4@įT6K'j=LOg;F0?iKGtXto_ݿ |L#5cwtqn^ut1JQ:dufe +@nӬQ<>{&ޛ7)x݄MqOb1R>|5wI YGaSe<%:G+Ps;㽮[YvM,z0Fl˰d VX;;3Le絯gZVꁚiPrrx~0iҌ5ՈwO=-t#ABNnaM-H͹X բ V (*`Gp- 4= 4z]c0ϚZk,?U^HS`R hR ɖ]K:%VZtz*[Htck ֧iϹ"=E˚iSD4h` nĽ0C- UWWu+{Gu sU Y3ЅgQ"U(jl+*8#ѵVp@yZФ EO&H#%mMȚލ$Eh!FX |޶Z_7[ W5+1M4 so3ל^8˯O5~;y/`tܛ\?o/?}o1s|}=UQgrPHIq{^0}.WqYB3eF:̙F\M ccV(!{e0MO0=SXZAwue0¢V\HCet3`c=i6&MEG 6cYڔWr v y: !Y73B ұ#?_T*/fT=&8VP[ ,1u!&փ yZ$ly@UvLhP$p;?`}Jtjg.ibW[&[< b>چP{ܽ9m.N:& #w%dKTT0c!?MMǎZS%R`MT1BQCKA 0o<}`#id[r Bm!9j=$lXi٣m-5,wS걄;<9w5eeOla;.Y/Nzw{ʀ^@)JGAY4ڤbV[bCً[=th4;;ֺJX)P0%]Ek{|=2;6X܇Q7<i tXJ?%|ŋ)4KIdwa͐KDnQZ=֚rH{:ŌcڕL޶%`ؑsTIZ5B)2ƒl:q?ipq `68J#)7O{0bǬN )r( lDrvԓ19BiHl4֦һᤐZ٣9hs֒ZZ#ŊAR6-Z<7ruU!#j\9Viӹa J}9>㍗-X3iX<2dH@L?bG!bxegWs ; rkq?|kb +\_ BiW EQuCp@ٟ Ʋ=Z&qrʶ B쵫ΧEWkE9h)0 ҧH<ڡ(=n2>OADvDD&y| ʭQTIu?ŠJ"DD%@J::3v鸽k 1[1HX%ƺ?–c)$υ_j$h6.-[-[vpv=' ڼ#fk9[F’Q] 2&LM(VA R Qq(A'o 2WQK^W4~dd?FtNm&J ֛Ld7̵fS?͊+FWL~Rح%vu}hތ-k~2Wl4ˢq龽'ǭ?f),82z-M'ۼy$u,y'MKk F|浉9HP L6X]0m%${Ӥ &:O fu :ǹ#IV~2z҉K=W߾ ܦ;69SIi4)RP8w QCo>HۅjχѵU(8(= ж4\KaQp,N~ZV{Đ _p rtH7)䀅$,o X ʆy I;f6l}u=]t&_-°p߳h}{Q V;AAvCA,?B9 \!ӿ_S!.V"v8V0**Ds,0X},NgGD1[hr*rICW=KT ;]lṇŦY;^mh,BQ3 hkMMYuvN+/!oS0^U T `ΥPkTGmh'>Pa= ;hmd؛T4;ZF5"OvPi>;[sqO5̎wa+jr~AotS! J,&Rp6hv%[||Awmm%9ye\7VafFcƠ[v,۹eI9u$'@#Α|XdY\^\52:9;?W^~ҾBJ^3*w~IG-384e֕4T1%%е+A Y$狶dFWOFL}`g1bސ_SF+8I굴Z*Z]lw}Gm7yi{TBzs/_=|f|_1ke~};G .mrr"ɉ 9|SdYj)lC)J R"ɧ777߯"LX=4x\\ekI#q*RLA۾wo8;?z l9i+9~L ,z%6_[eaI|$e"UkWU60]DJPM=!An^rυ6٦M6tBL]>ZP,G^X[ju2{z#%qZ6mOC˦e?вAG-{/MRBY Z<^yKsqvѫP] Uݍѩ@Z /#0^h~y%F?1&gZׂ|&!A^椓 {k 3ݬC;" !+{yQ [5 jXi3Ry&ݶ 1y />#4(낆b#PoeI ,f7['n?"E?E֚jDJj4Ug9fԈ1),ҊKzoP~+<]-Y 4hZ]Zב98EJ\ƖP6-@;I]rEFUT>^v*KOY„AFɐ=:2|3a /FuU [ E5赥YF4Yp{~(N طnK;ϯq4;\۫\7m}o_h݉ hB̟ !+ rO2'ai`/ .oN?1HVO#V  a6l޼Z,W!xuRPK]!c 1FIdW7Kz@,sKkEj8 m`&lv=Jxk#"Tkjf4`x nӢbTBZ3(6 RRh'( oy#uN;79Xx6u(gi~_>k3A*ܚ6}9%JTʕa &d2âr*GT0!a-j)iQ`QXED Ul,ƥTKƫ)O^Q{XӽZahc8hBτ|mn2:9ڏ~wN跧wCg y?dn 8&)6? EŒպɚd`G30Q%߆ilY.Z{-gZ(qn ==³nDo7Y)!?oŸ3ybx?~g)r_<V#HwGVY /[x0`jd_Ȁ )y#3DDEJz(ZeY$8ې(6*LINa1l_ˋLr2V331qW׼.y|G lh<bJTBЌCRF\(f&M/)sFy (` Λ-2mR6*J]|!v !%DtcF+Q#%Ob0=¨F =B-|6Ƣ-zy4/?!YY%ޯe = GrV;\:-4룀h4Djˏ7:c2twEAg1tHpn$t@{&^MF,o1g2f NClÅؕ9m +wg-ʹ3pu~{r6ӻ CnH:1ue?Q-~!n1Xg}ʪ=SnN2X`݈Xsp)9fI۟'iwC0"0U-ߝ]]U0`|`U68qM.?Z81osUYomw)գ^Kƿzy_qT׋;rqk~2?NY:>;ãOG]h4|X׻)@JxN ( I1I #Wny7kiNQXf|K1E/9cAZGk%޽=۬jߦ%gd@, U b0Ƅ픉ܽq}\#} lb&Đ:AxxGl]J#]N ܡ7}Cۛ%uѰby9c]%"YwG{&o{-fh 20OtWb#=`d^;Ak~7_A}oyټa[1Z*©C(xA6##t'\ bL$ SCS{P}~VK{ry~qG4}d)qH0ryqq;;FgIdSMWO;pK{7o Q KVjyx';𦚕y!"J~s7CO]9Td2{쌍 U{չ'9SdvQW9.Rh-8O}@3+J rCJ{:=yc=Nk+9RZcE9VS]t'~sc%:#54l h Z0pI'n?O[XSL~ɷNoQa"rxEngM[>k-dワ]o25䢅wZ_}IN83B8/f3/+X8z4ԥ*3fk Q XrǪvM}&cMlyogP4'A ,)*j˰:yǢ,}xݗL feT VG Ft&.u!6µ SlP{!A.Veg1W10S}gkNwˮZPEfVAOek&uZ&^7֫웡I&d Z5#\%nSsZKpX-9.)fhZf qTښM{ VMvj5z VwVFMDM9)m`!T,*RȎŠ+lJ y_ѤAnzfhEZz&p5`E `%sB.b5FPSJ?&'o2T˦U`E$ ѺvFPac! ﮺OqAUŕ)Ė2$1~!x*֌>TSOҘR 6s]=\|T=%AZ ΋*Jp*XY/ToU)%*t {Dw1[- 'K<-O <UUh" [2zAt c٭sPLBQἊ,`d0Lq^Mff_he > l(!C0C(@Qd։'?c;EzD>gTWbn,PY7/SĥOcl4 bJޛ ٔYyJ1}]lF / wǦ:B-@d2^D^h/#+p~q&@ dX+~]qr!q{ Jvhg}q=qPS?5JZ0R;n$+Nd6J>1Pk؇,K/s u*RHѹ"⮝www !s]"Qبs5Ka NO /Q"%f4P!)vO1ٻ~jPdb+`JQk)l.S*M'mab cWlc;N+sߘ`6*-%8qiYJAM\4]\Vr w5p52HVne seʳnf}IU3(vhhJ6W8!RQ}`sxbgW[ u YX[V` g(iq!j0dSce?X3CҚ/R]qIEbˣ]TQI /غXrXOO ưO/foF019 vpc)2!utk'+<ތsدB-ielhCR('ŖI æմ,o$ `eTNU.;1E1n^{Or)a25~ wӫaKOH2j @X# ؚRNJ%4bJ& :6 BvXïZޏr|]1 ԔO[k˃ 9d㱅x mh% ڍj'ݴant[B##6/ȣ\!]*iM+M6c:>[G" A(bCm X蒳/2*PΪ Pd p\##,XaDHٳqV®$?{Ƒ X6CqX` F?-)PEU4)QX`=f=|DU~&~2P ~X~5<}Q\";8Nc/d/S,^0|3)9[ z.pn} ,}[w b6]nݗg{i6YM.A-fp ~jcBަSac{E7]s?$pOua :7tQƯWD-uןΆaT~R .%`.2+g4EKhd0(8ԕyߋ,nQ˕Ԟ7Kqy >ql zNN˃~눾MWX/Ek4 Ĉk {otbɵ` Ρ?geRߧnV^t𻵒]s}pgʴ =ޘ?M "F}[ H眂Br&vh >R5d[x 1vrPJ9?A}ZiۻoyM˻oyN`baSY C!v |yUԯ=Ky䦳x%΂\REt LFU2!EV ~gë+J\Ф_nّ->Kb |5lFo`R|B>-[:5PL͟q 'xd !]=*4nE$!}BIZt Iz ܄ȂdT20pCsd+ZV$);G H&ra|"[tSMSv맖P*_Yr1rs=Οk9ɳ,oޑ;D#fnE6tO77ayg|n9$ gJ# rkB(#;0--= (M0ZriC_Uߌyeԋ{+ 2{%Wh[+`j1Lij J&!ٲf<>%1l#T2\ p "m&_(ųwd $a;۵4'/KY:7F0F&=uE3'CIY0͉BP'p8UG70+٨Zs"kLUn0}nvG|'oۥkwoܑZ4;wFZqw yx6tG 4)xN+IZ$2H]S)d6GDZс gDh]Ԍ.2#144nqy,XOIɍT0*:XF[q߈<߈<߈<(&c$qnSrgDFC[Dh@󨔃7,zi @[ H/IFAE;p)Q {u=&;@ n Ԍ{I{+C2@:x9 DPŐ>Cm4 RT4LEJ!GdJ$l( I\X\dDUg5<R8T/)yW o<)+*sY%O2wt5*)ͭb)@*˵aQ|dV@A$C O&{PMzO ܙ0}TfRM}N Wz\2> AK#\ !Xth & Z\} \!䕼S\$̴ԑs*y k U"DDT@$)ih"Ց].$)/Ъ+G@co[;x-Pm.^?k; n+o1yWw:6NllGrp:qI6& 9+} ʰEq*# lh7Y}Mp0ao0pr:,*t%&"a1*e%i*W`,c@0P[SR}lTZ&ؠd'--$j8@ĸ7h!"(=PI(DOu-XB/_=r13~Jx4VF=`80 wƊU"HEQNdD'e `ӝOqu$r)"@Odʀh (ҨLTwܷ < ;/x6}t}g(,v񌂡L) a]f1Gcc`RP]q yasM"\l2!ϋVكT&7|WTOe0Şbh2%+Bϋʹ?j3F]7.tRj(Ջb:!lP-& GR yQ[{(p]z8'0"qK@Lu}$e9z CYUtL❘kY=ewchp=nq6pX)(D*٩D·- qUө&t.;зgHA9XӊkLQ[+(^x?,jQ~]Nu]~''j:-f tNw?4CpwƑ,q){&3foBc=2Gىn"+TR ~?|4eoP7]-WgT$~eW>egZ&]M'a1 rR!/IU^U g}_ނ|Kѣ)k A=d)'NN_UGUGJ{MK^P$HZJ'Vz&: 1# uQ ߎ DXH󁾥Дb*1x ևJOe:ganN〵SjsMJ|*t$O\>q-㍔Z¹dxK<)בթNfO4Jm|M)#1V+`2sVv*ⶔF ̍&Q',]5ȓqٲF뜑Hɢ`8Y[ڶ8 dҚQ9v^qɦԐLTaJ*ְw%_xC3Dȳ(|hbl@X $l!5Y7+#y$䜓mX Lj=Y#(plJW:SI |I:u)|%j.$fv;̖9\yZ@ywY{*6asv95jJž\Vzc'bѕw~ǿ{y xʙ#!>ni{ufJ֌5Ĩ#0(,deK杫0 dSZ ܎\ nj`1:f+Yla@:,KK Bum0ʉߊ-|0h珺+cV2RK?ο@m%[jTLיz"Dn-mjx@'Arv]i{,C'JIepАsSHѹ<4w=Y|os7 v%˰-牘.n~4s|q`8$cc"E)} pBj-sK7jXMP풤zPv=!6?VKeR0FC UUea5oAcV-剨KodtCٽ[~}p)W'ƷhMy3մPTkp4M.۟V'2?øvdwȾƱR0G_4jm1d˽^keXX BcgQq)ZA!( Rn }\cfL>]Gˊ[v*yA,(Zӈ"+Dn0 A{<VM"\-v& -Rcc!V%vS1ُ[[he<\*ވ&F QccrP02EXQ:y(g-Z$_)B(5D\>Ry!aQox{\ݰ-N^iȪg4hU3J{C8p w髃!_Cߒ_b.%ӉqgX+ۉRS0Sr?YilwIíăD?{t&v!?|eowF05DȜ7^x'~Ўս٦3!DzK-T9S!xΗ|t[',/1UWv)HqtF_TN2}S?8ݝU?ײgݭޙ9W]}5  7~:'>NNM"sm?ļC%J/N]8EW.VU/4 M~2wC;Klb{)!c{rmh!N8*wWM M}SJ,a{X k.t|w:_{; d!s:BmI-4[s-*c2DΫ !Amk\,s* R }R͹CqFsv3wh aH)|伐.ͱPst.X5)tr h6=BBz-qdmd %겸V ֫92o_R %=uzNAjpl/N--{FlsK$M9)HG m1T BPSAƒT>Wؘ%i%|-IL.!IݡPB1(V-*T;ڊY@ZΆ{"U!يn$ʥ%B!sb J4 tE6lyzo'+ѿ>Ouu\^ rg~= `nҨ{U>l*d z!+-H p_Ɋv̆y#MCn60IFN,&u,oq}"{߅?u=A?U4#ȱBPig7M)``bC1D1Tt|(FG9Kґ'{n Crbb[Z+wrt|#{~v/[ j2~tLK;_hDbbx?)͆7n2$Dž8e?cюt]ӷswiYCpyB|Ww2_^Z, 5r6b0IZvα{NϾH/q)x^= +H99TԌ436M>Cd'xE"`Mx 76Gſb@~}Fg;lڬv?a7wd}*,ޣ}oWo=uta8&ݣZYc-W\6z\*ΎU[8',܍ bSֺfWkڝo8Wcn%7S_+X_Ƨ60iig<-%Ji$E-,J Jʊ33J_u!K"jTh &DتZmtԶF{G \mlspmcjh|HAc4H5jr|"kY'wՑ*RcJV7'es(1q3*٫"4aM."ko;hM`@Р{I9:FRUDm -#=222M*F=pdOEΡ (2`!"Kf`K~Xg`BTLJV^y6#)OOIXrbOڸ~ے=_:<,=-Hhܱdq%0 dc hm }ְJV"YT۔Yle! V*MV"]SV ]%9he8U%޳LD<q%.WW4Zdc4W6Ȳ 1DjƢ( *RHXv, u',j ,G5sVf TmxTuL!v7OlE!NXٱ'(8IaGx5yq^i0_B~O6V9XN̢od6-4B2Ge2= T$!F2}%Ì,V2Ĕzt֏p͏i[mik Rcѧj)ҭH9uFe_Bii2S,.!ɮH]PQHKqA Q4C|c;XM|.l6iS%x8tD}{׍tq~m[3o'Mϓzʦe= bs5{ p<Ƙ}45as2JJb,OX㔙fmw fzLm4PU( 0F O&bM Q5QqN:a[%b˹Bfdk]DX,hqӷ:<4[ <qbBڔ OntI*M'jv#8TMUh!%ho}를D=1_VfaTPv'd-]޴}ϤY>Hkye&NԹ||*n$ 9"x) 6Sq*NkI2pnX-U"lڵ2sh/YF{h^w ;+)_Iɹ3cpS`61-J B%V8vAZKS7ywqv19tŐ 007)zrps6Mwe`IMJ.fW==-9)J"vV L5P"c KξȨq;klJ(NY@c-,{2MT[ivƒ]6n5O96uM7fǠnbоdz/":6V%6g œfup> ofi6KeDh]eyzadҫgOAH70m5f +ɒlFON0 CM- w*RH"*vAl:.oRŴVN.xlX73k 5yI 8AmGdUNUv=|~zzeqr(~O[?t|`9Ȏ̐{t򱫌{y>'ǾegWV]G):RC mƶVQPɌ1~hG H59Vه7ϨP}!E\1CrW;i$9)% 1_澅X|툎{CK;`/G署5ɶooxE5V3uvrֵ;m.z6xV5"I#v)<^% 1(o ŻjDPdfDg缙G+5ݧEo4Aتl*;C{ҽZ =.R{&t Lc& жD+zlti){mMfS=yVN38M{mU*:8` )Vg*-Ů+(ĽaQowŎ@`{;69>KQM[H /-y!>LnT|Hnta P j35Ka">)r2]|0B7pSkL<` 7d~7[\Η%E6U$ƞ+͘9T)FL9wЗx({m tjh4ɣ2H6t* Rn5*5DXcx=vv_9J)zb- B!E{0,fȤj"3_R`!谒̞i}X3PN=WZy_8;ߞbe0{Ju5)ݫ}10\1f\-d FQ 7P7pE䶳bq%Lp R#{õt%{`Hq'ܧGKn8Z咤P9=ePszDYxB%[SX !ձʨkDn4nސF*=@Tky*ԕdGi~w'{Ķ]zr/I9KkD`x&b7,!uI.M6`  /$V<etW [?cm*x%VqVbtYN=<lV7sb@bib󏾜4T %@|(&성/xIvUG';HIZ[&ѱ 5!eV]n8Y=}s:g^[e6 v>h]WԐ[yO?喷*E+!*x Еd>lG*?$ͺXĂct@" j[@*t1Hm4~[l(ee]C_]ftt?^-7?/ߟ&QMk}Z!W-p[rpƟgMR֞~韛^i{Zuܿ~[俏g7:fe-u{{V$b0᥊,.z_}i mO+cEɖ%GȲ4L>bWd!py&E&Saܶthߋ;/^h֞:osxGs*{+Ѩ5fai֟Dz>a!,ޏn[-~gi;^O?~T}Wj>W(9i7&wѻ4;?ҟ/_:`+_/5gܲEoezvǦ[8kxz6[CxB}&T*KBߥ<{.;N-W͟~ lk&7n.滸;;DWA+dAZs;?\scyy/Z߁onn9<&`GxmɘWm*T틉Չ$9  dS)}E.蜽p|[&6UHB."hWYO2jc*MXe4ht1;)fEƕVjtyLka\ S  {#/Rzit`C8(ٓIVtQLQ l6Z3me**9^AA$"J׹o"+>\qocKNuݷX/l5cM0쭔QKQñkt[}xjn76dB>YtZK.'tUI!Ȭ,OZR*1jCt-"ؾ:a{݊gؠ BhG'9R<m꬏1 @$u(kTxK "_: ^ '{h1pJ.&LDH*YS^v:T=EBɂ͝Svy^^,eWڬN0"vc` yޙRr2K(".JM/]tx2dK/stRl骍b*O&ԦXVUtd ؛3A:Z-eglg !$Dr"7E췑ǀ*{ X騲<8Bs| AA;oEϲ*)R60J>u!Em]݉_I! :5C,ӽi0/܌c\*A A.壕T50 K _-ZCMrwE){ljs @ r%WA`V/ v$2hkGN8:8rij*2b\JS .e^)5LAA ~z'v*崞?/٠"'6oVakGk%/?? RZ(I6$ ""jaAG *=d_+V'4f c:7a91磦uqS'i(\)QShȁUPurvږ:6uR9g$  JPtEBAi%9}qn@cVX}Ɛ^ZjDYROU]bڳ`HMAi%:ORT>dʺq(v?E3mU)ƃ=ΖE\Wf ØHzlj(4%}K/a~^X)!F1!6eD U`ȠfVXRZ]8ڹGqj2JS*dd\[$MHE:yu賯HymΌ6;?S.9닛zR b-"X(&[1auY]|uVyk_ݼ:j6&uEӯ>!lVTf\c6-e :jm_dm2$V}(PW[(%e%A4>lq(x|ޓn^ה3\Ѣ``Z 6O*V"B9Sw CV,gU|d(HVi&49:^}o7}|=h5 uhq hZ Q@(S~/!DK_\Ȣ͊}4dr-EzU4.YXHM!V !i2Z 32hkɹFI^lmHFw$tQbˠz%Л,? N(AKA4 Jɐ*MNehhZ݄E,)knOdOkiOm/a|OtQCTQJ}هYρ>':*RR,!o7W,`o1=_qtL#ӛvz9+m ϯOlO$^~Cٳk0eo!`TERT2j(A@BP|b,LXdx5K>s6PL9DZLȧu$qPK?(K#Pߵ2F| _N-9՚qA徳^=9紎|.ǹ|=Wv7Hr^̾`+5[(q 'pUʧ~=s"*аOaJQ0V+^)$}]2[[elvUFT YBWų?dL*[kL&َSQ` ˹?ΖY]R>XE(_&)jb Ձi,1LQ獫 *L{[>,Lk{)ErWv'u}y4yϹvF8z!Ή) Y)W N#@b(NtC#~uXZJ|nbt-XedUlh,ƥT).j;zMwqe~N!_|gtO}4i>]yryu 1Ϻ&\U oAA(%a 5+3; @q0-85ld1T 5$)jk $IمMhۭY悡`y/`Z:+Q@GorLC}M/d;F"wb `D1 o0X̮J ruE lQdUQVY#+B&@Ƣ w,ٵ;[: Bud9X}G$el[ o h”}QQWSXi%q%y' 2a 6A;G] hg:[-3w^Ƿ{IFp`6=vX Xv,modI+ql~o Q0o68WoٷN|f>j>{Oo?71HV._X1Wp [?ySzČ+x|)zz'B=G6bNӵ]d8W(;ᘎNQDg汷'A%6lLZHq5}yv8ᵕV(DNC^49FȢW"L|}~KӘ/!0C/hAs1>oGA}*=͏e܈YAzQW>#hqG[JL[9c"gHH?i "AT2ҖP2(aC0zmP\8Fg ` gK5I6Tx:\دB ^H[E2e:q&;8x1J )E@T.Nv(FG8IKE=:<s3U3=W y=\'xw9ȁdt 9I9wQ|xF?1fBCHskC:n (a Wj>e>M+V( ɠ6FcT`IpBr5+Dm zd5m]3p>ӧ9ۚ#k|rKLh8I?Zm;u'o~\Kŷ͸ Aw5c3{EFbC%rB1 kأ||@]MMOFiLRo-"6ɠ l-a{p Ztm vM[ؑq|>sXbf!B$KK, y*QKZ,j(":ywZnk݂m f8rCSk;Cto8j}9[A "g,{j֢j @iՈ.,%"vNjavJ.G)ײΚay%`_M) ¡ݼF,Y] -HIQf(( # (d ,=YyEͽdQ( B"j j0&kTId})X5i*g|;a&*\<+9r>} հ#Q;$&LUTN@nq{Usc&C&L !:s*!TuIL0Ғ#dIs=:O;viCgUd_t8kۺ(G4p۽ڼk6j5S6=?V/YߖFn;_-?ϵݾe8ǭgT\O|py1>[f#_x!6GP/ wL gˎ ǣmuLx7\;غEw;zj w0[,de3n>Xp$o]^w ד=al~,}ueP+*X1ͨxyxcb<i"0n\47zᰙEZ}us+ivӄ>)^ *Y)ݮ9$T.FtY>\-X67:s2n'Ի(fԙq4x>YтiqrL]'|"}#7_:] < xuyVe'HqL.N_WQ 9] :7r<7.ϛ'z}5Iv:F^v"s4u;lEnz_?VAF4OZA>_\hu~..߯s/( 0 Wk}M8=h4/j1$pν2J4rD"I#,x#GTV '/m,koYғȵF ڻ6$KRIu{1=DssHo$vIxK_jTu;FФ"#V|6Lis͸rlKDpRe'!P]R:x;nΆQFtMXVt8 4,^dNd-m"蒈RrSR//[0Z%F-T A׋ 1.ȠبM)saC5i0}^F{4g:ݲtxv K >l&iWl(gNj]IS"=hb*}5٫=; /=8ȸ".Z!h'Z@@@Zb4"qKd֚Wstg_3{3i2tN8v87{т`v{vzH5[r)o, UE6$[Y6~#R~#~#b~\/ѦD,Jr=Os3<2$/sRB"GHbD~K6DQ쩥(u1I#:6;kQM wlDmlwlBI8/:JXBP0Z0#ku<`dEqB#*AT ) V2/JPͩpB~pwysO&QW ilN7!Ya> )KDB k' ~US!u[s@*祋"B2Դ\% -{3)qM1tΚX:4oT(Jk w+hQ|Q`0JHq#F?=WwSWvaCbZmIr-D".GC̤>GpX6d"\]fdiGWn+xi=􏘍`G| SCO Obr iK $ A<=^tdJOk_^d(AGt: C@ TY,VuB -lgA$ ~[eL,v,{fL28'3(x<\*&ę>}qP_% ,B6u<W%8p^6Uo|6\D%aqb"K|`$d=6owz}3iy2oL\1ץᣉhDfY8ŴQ@<g>iH=YHBd!c"O숌ZٲgLc]6@% rMzzv}%)IJHd CdQad$hw %Yf-ޮ+\$7Ix#N*YgmHI k%.@Z{UtU*{-YWGt1w蘛ãkXK7$V=}Z.]~kw*.K B]A0`sa Ig96{K큒ϴߎGsyy_S#wXP$(!c3h|I<|}DRFJO2qeiA]6\Aiz `+BV}<+Mj \zsxgِW{C^1F[fN&uqӳ+RwcʯaxGÆ2tڰ.p4^fZCʸo]9}[IK>f`@^m+ٞ=O-_m[rR~XTNl8b>vj/xNڡHCHĐɥdzJ2nؤ={ҥ%z vsI 0R䊳+&He"=d*Kr+/S>X#o :_NIW*n#* D`; QI9Ð$XݿU}!%QzJqYa=G%j/xY(쏃!Ϟh WVϳ9ЦY`̬8㬂S{AH@1cwIltQbι;1JȫKPfůԐX<-ȵoԈKm$%U® ZţE& P",[Ňrn㷊_o^;eh³: Xt_!d+v̝c]?S10VkьhZ4c-7^V?3r6n 2o,٫9 ʲ5rbsB1AyS$gdnbXo mynqg!W`[QM1&ft ^Җ(]"UNN28 ϡMR S\m59*ET5r%_rɕ{^74!*5jLQς橏rCZlÁդMm*mjr9MF*H$hノA#AF,S']zQ'[prҒח83JL!.mkǹXS ЏAU4Z3fsu)[Y2DcAv>TL@A h6?l1bV霂,ri99\Q  (P SkNC͝%:fr7Ig@8*Vj6JЅ.$10{?i80m##gJ[s ~o8XveW4iͯěaU;"`kv_Ň=XNq&RBXΜ.j͝]r,!H&M co$պ"eȗPKlńN+XW[ 愐UAs cBcB`cB&VA|hïćù|lR;3=  @UE\rEtrbXԜxȶxs5o(3O6P*pQʿ֜SLP R䝷j=֧x^jɑ~v|^HdaB&7o<@Bw8EG+s,fyzE4%s\ct]j.í6ӇsҮ4銔k|ǞNsBYI^!9W>U_j-i:~XܪUqƳN~3ZVΘ{ܞo|J ߗ9@ڎ :_Lԩs`@fB_ƴwԞXts>/:g 2UwhG4a%UT[T IQd,հ`=T)*% M.!sUmy*??)3GĭLE>׆IWHim0`R5RyD`8!ueB Ś*僪^S1Y^ `JȢ (Ss֔JMF㰕v^`'HT p\}:fT2Zf T,V>FTih$ 8DR?6 H@D\ulR%T/h@Db"A0LmpEv6Ycn)WMWAQ9+(1ǜ*A@Quwו Y\w} >XD&둳l]e&[%Ͼ-bq< ,λ4_l ";MVq Y,fG|,ɻB2Gy}^΅lꪢDXU7/eˀh6,XP#dj(kl2u4Z\R-(qU(1Da 㢅'.Zj6q+Qz1_i^NҔ;)8,BGQiNG:>"EBجǸmus{F[ :K)FXMR Ɗ| {G.6soGlE1uQI"yR91BgQ\4-'b*qGg;uN^$wiGJ-3T^54top?20M5t:kDÜi$ko#U4Rh{3E4dEUؗY栭xG 3VrgDt$C,|dbLu*'&qy"=\H *v0=˸ɌuXEଢL iSGd4] Եb](@Fͳ'}K}K}̋}l` Hl h6j;sV)bfBc#T qYCգ lS(kCL!)lPVCP]PAmw0e|8d%D>̓OL ![Y8㭏S6}%'FsTy9n ݯ;xNq ]=44b͓1Y2䏺B)KixG[ο ӭ DOv65v?ab|-^|'޾, SP1)RK}$oDIGyRG' 3.dGᅧqRA)-"NھR]+[4;`) @:oK^#zwa?̧ 0g,NlOԂO{<'b*l1֪ ގ_fsh3yzpz8`&( Ed~*J_R)6%HwUډbh.\YlD0+#>ĹВl+B{qy{3r;edA(5RJ:&TR"8Eh՘L}`thl@[ӟstO0"=,Fl"3{{S6'Zl.ᴞf'Ϛ%Fv %b/l5h 餜UqH:"_&KѨ$/MYiQ sr2sP @گn+ZM{)XBRe4-#=2ҟ2M*zgJBRa0[|.q'b |QDNٓ΀b8/&8o%N:'9:H"JOOVQS$O8XnS=R^q@Ny+wk]TҀ] mD%SvbBv\ Ahv3}qi&x񖼍޸Yr;.!{O6)P3DE5D47\GxhtpPGz!$HF_\=K~s['Q}#QXI}\$L>'K?KauGgrФwy k,_OSrj9#+@U~ǦJ^M}0-&ͮ듦CgA4Mj* \]٦E^o0Zԭwh0`t9 }R4ǮoQYZ_6GmNO/.6]r7cmU/ȊUqsM7ֹk|׵")lkxn>l4}gu-7ʙ~u>ӓK5RBjqB<K^K&%]m~=b~Ez{{$}N6CUmJ 8 V#,Jn=iL51Ckw{;% D 5ZC^rX7'\i5 3NȬI f a6`x_Gߛɡ쇂庆jot4Zt0VdFaZ\gL l` K,ѵ-Kz[ﴇPQpf FeR&١7Ff=03-?^^l#%o$8`B6q0rh >sĹzpW)ѷۓo>Y=B2k Zmuwd>Y%i\vb8]`g;49(t{om0uV<4)GӤ`Zlr}=d#l ȳ1G\n6יѕK]tr޶<FN 5/TR)`ÃlzunF^k"O4ڀmaڼ`$im^y=ʟhcmJ+v3wLf!j[l= uHCD li{ۺguL}?Ȕl`WxD_2\ߑGʼnFHEjˈN)`-GPDjv*B1.#3,XJƘ6m(M~vdqFL($)&yRMA ǥ~<~]ڋ/b}iI ]Yz"jNu ֧<D YvgbWaijɪ&}vya/R )'4āY P- Kʕn^xq~]fMK_f jAd}Ie-F5+Jq d !dsOc#g{d@!w}E861/m6qSi:WX *y2^L{a4)Q{F6RU]Ƥ@just*IF_RRbAѭ+;v ő ^@FI?H! ̺WqXr)rE ːV`e=ؒCvekW%9d-WUTtavNIgz / ^'$gJ@?,u0JItfcg8TNedM>gl9gE]*6㜢FPx:ucdI\@s%,7QUij#d@0YQ<L9m!N.ڎ=֮?<noOiɗWbICYPM͐o>Ɏ.’N{PjUls #>ܟ=oeօf#B&:lh4\w2T}fiFƋM#XMbF>r aS"%ȯsZB}mգCPt.TW\b8 (X1GD^aFİF$#ɘ !(g(= b˱! ?@hd !~o_RRxIt&[,X ttulVfY,׆u*2c즟rfw~ڤٓ}-$u~GzrfLTeR*$ҙIMIFbV] gtw]/7D<3-%D~Ks"=Y,y"[!}L2YG1\;8!wnG?`7)I8'dއ SxW*}% cݬlʣWWGszwy4go]#no|s{ŭ#=hqu(KyHwݟd6խ.8geO?߾Cr4`/~f']Vvg\#hK @ 'f߾pY_ߪoS@Mn gZFR^7)ZJM>^n%?{Iѭ=jkUI$XH$IئJ+y6¤̓e쁫)֊ƁE艬j͜"JS2 }"{$̶T[Q2䙣 FPAF'7W)-53.^rninOH@XXӫ+z]KrEdʶ̱y1~<ٯƝC} aEAPVOy'a˾ o:wUA _X!ͳI3mo)l9J`Dr1Ogy~e!稇)C̳?j;Nٻϓ򫫺$.~uǴCe|1dΙO;dLÂV/İ~N i [?vCӫJ3Q9$UAWca $"mۍi <֠côPrOZgW5d&~9r szw+;?4im:Tt{'9%}d'; OlW@-; sKOc5t3hS!5ˎM#Šzj YZ}3.b"K(lfc o5Yw/4嬚ΉV0JJHZA8+bBPinڃWYj3O.gqv"vەT}qr0g]wd?ʿ1@Fl^h`m8("Vu2(1CU1Xڵk_|:A VmA~IfZ](};eȬ0:5bA))AIpnQS;J2 !<Ӎ=%y 3CMe}5Y 틬H7P׏vV6%>6z^\׵DB*n>/7ֹ'n1̨(5ǟ.GaiWbZϸcZ;?j|Q _4B|+TF71i)'l^]l2ЊCN>g|tSg_^k(YMON;@weTir_wMֱmlz\j+b!mk/$1+)SNPtJRI8:K$㨘( k#-~ߝ`{_OlH:j{QO8UeQ9ʂɏSwZdHWf{gΘ'q>Q1a ^LA8쏫t:ͦ sA,Jʳp0HM#>Vϻ$eՄ`$5pdKHlԠ{y'w%VT,=s \B ,:h=d ;[^ofe9Esu̮A"5U.2L8kDL398jct$PiݱiQYQՌX4e j\՘I\xuB6"{!K+s?;]rSziڧ&Z2ȹ(G(XkMFQ. iܛ6͵T Ke@TD3VctOzAw쫝 &G3bH|v}4j*=Ŏ62ld1z Be jS3OLzGZ*!ϊ('@!hjd4>pNaԔΔzs_1}յ"mc\O,T:3j:!žfPunVW':'emd_COӖ81й՚wp1CmZy $b`-tjCldmN{L7k_.̶=qi"ED!0nD6IGol跜~(QޕtVUz7|vI~'@/#/y}5>AN)>k}{qa}럾`(qר(iYX;o'%$|ԣTsr&_N߿e=~uE3ugz N5 ioK\vd/N^vǷ=~(ϝ$+br@vXihqGOzDQE4mxg£oE鬾QUY_⢿ϯ5XOŧWΦk]2WE_OIguՈzy\ųM۝5yP=6#_^ / 1>l o47&>z}.q-_nw߭,/970~߿c90-ȞQ_X9`a%۱xnUp޻Y nܡoG5qe ?u<]9+H(櫋|)nK) v߯OfJ6`TNEfS3/o 죌!~LsVy!?>nE) ̭}$!/]_3RW)G%͸|3وP)[THG*=ufOX֭qr>k')RcRU N bՑRZ>lw}+NٍRYC;;_}pH]?BW-U'̥± k"}LVD\#JZYW bl#DxA* &CjpQ)i& ؕTz:yܞQ:q %k)7QvYB.xsЍTR1TkCSJQZtI=آ?b[s2Gok `Q$aܹύSha@Њ@>rjf9="͓$]G֚5 l 9gd " ,mDП-^9Gд`*9#74LΰoFiutP>[ar z ȝ`J5(6X1 ԺR=ӶG"dl4(}h|"Pa09~mCfY2BmJii"XBgrRUU0$"; o>P/Qr#I } BY¦ 6h !)rp*1@vn}Uȴў`{dku!iPΕEW dpTw_N kV|C^3H4@1$Ȥ.:9 9V*t*7 &V.Q A'!lsB ON*z(x@O6s|ۧ*9 gcr-D0[ vhdpAW`$ƞ|l9'XPJvƻ!sc |UFֆ| `L(,ؔʀrI`496%D۹FL*uE./RlHXb(,'%>D1fcW}g_9q7MW rϪL fIX/g^Y$@$DItHՆ"؊e*2Ft$ amYD-Z ڳZeg&z([9J]zCSJjPҺYXX ZEVjAsug0 cНAG-5,hFaI)>\1HIhYҁAM2"&'hek{Ye& 6nfK <_f<^fUW-PUBѦ"HjPk͊By FhD=v>[tQ^'-(%lBXMR xLT> ʫmFGo{%?&8:k۔k(61' *Cv6܉-os5>by=}N>-'FXMI *A 0nS'5@30`E7u{8n5ňKSM uRa@} ~e47MO1j"UzXgeE;SbnRCN 5Ghp5 #DF; ;U޹#ieC`4vov11v"K%IR%u)9bXz_M i4&pH@Ԭ5 p<~EJXdM@M"1.Vd(=eZvZ~thBp0nW#s!4>Zt`ژ\ADkbcN@W{"][`{'#P P R?4BS{U$%jx_cwz '@Р\!%l ~}XA09/kL%X+^$Hy~Ab3HǭB]|닯p]&\ Pw7$*M94 =4TB`j%- Ar+02nswc6) ijMIqҢF>LGExIXn5r@;[p%C!ܨ jF60m {F 咯 h4$w7Vb0~ E4\+fq8S VB`@XHݍp.P{ݚW7͋sGf$u1!^j_bPH y|Dcw7)uWjTGS![̺%"VS # n2Ow`9GJ%"A.-:w& #E(}VJcv]^zd֭[YGn3 G<=_E-m+T)#X!C~doċ-E}fh(VX!$$c,f]I+ږqgNY1RAYU'ʶ -a qeK[Xה; .1!m n陁v@ 1I"ʹk +'c.G<퉝n 88B1 (*8 DIks Ҍ9 ܫDݎ2/d`jT cg۳e(g1\P"`V5`sE| Ț\+RO21k3(xJm4o Sq^@I@i- >BZKuםx!CYz"\]AR Q "\E0)~1}YuYo|)\򩊤"ӫg*GdfĮnv/>/_w_#,H‘bz}BkZ"U 1G'#pf7 7yΗo&^Of'7]8C=-u<]~i3~m}5L~/ryxs|,_&85/o_+-ϓu-Ky38fUmt~Lз5.'Wqz>:[NV5+Y߉ w1o3-V+zwέsXpX9,+<8?#|EqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaгWaYUx=—ò),ԙ'ay'\ 8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ_apXhx+X-,qXB!8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"pXNu8,׋=ͫzszksXh%!qXFz"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,^l;w ^69uK{WpCE8÷o߹ֲ&y~9[>> 4wLs/z;mrμ_`dtLlj~ןyug&bXzMdwE簔O-tyvdܛ7M.7c~º=f]\I;\gM>evq9;8]|䓚?[3bZo㶵,{t/ۣux{>d,=1p(UZMi=*ZJyG[;l _qCu{'{^js.~_!lE4-lқ[~NdV'Y憮ֲ lj!\FjKce] uJ)}Na8i=.t˂A8N| ([ ~ n9Y|8=?ok| CY7dtI>X˚ 0G0YEztU|Fx`17iɵ|.46szY~~yI<=C{}_X<<'%{#G^r+q~ט6:/#2"[vOO[͟ c|MՎV^X) NŸ.H{.ܾ~m1)..P?:?~Mą2mgiJ\%R-;۾ݷ#k5LNOCH҅V.;uC~RZ~W:o:j';rY27+Y1mc+=Ki}0 :6/IsיٵL1]xǺ'ˏmW·:6p^ cua-|*c!ٹCˤ20/3.) rhIjȕ?U`lP8c 3Mzu)u>HV*٨9K\r[2t+'ve؄ Zlgnkv:R䫫YB) PW&NX %%ķ' , e#89MhafX ml; HCRNX>7 uH8qp'a}hw~ 6 p+šS[ijiwR.S,^\>)l@T^߾Z fD@i9^}s8Ђ'_`K,XC<?$6'"2v.dm^4$}HXl3CƬܡˍ߄m!wp,J|,i~0pv9]^.o`.V;;=Lii72ힱᰙx~ a舉֥蛉J!ey[9u9;̶8,h1jީ4湡ɻ7 I"|'SPEm_yaŋUfx~OWCF,)rb&CK T+@Iǡ--܋phWud:~̞fOVv og۽^]-lb/NN:hzУ>\p 5l 9 9m\M?9&^GLadyۑ B[!4ox`3 ( SY2Bj%`iVjX~RaRLC̪7'@;[GbT~||W,+bhk*Z-6f͛ :HRJ̒Bv'NjѷL |M4lw9x14LCI*OZ5).J mK#Dl^Ы'F=aԥw,+gIRlgKxh1>}v2`̊KE\g`mh4+%qwCM 1!е xzKJއشw:e=BH֫m vrhCK3C^gn{(I_atQ47[%`ЇFida|.oY2dœj*Cٚ&:a%, {yZT5cFSyC4mb5ʱxOk?kUʕc^^Kev>ygQwr$C*n-fy=bR{S~0u*&Ÿ̈JVdlٌ¤$Ѩm4bd^-GΆ6w8J1+/c`'oat1Kt(2s+IDXugt X74v]@Lgq&HRsXEy~;; tibaNx=+]oWcc@Ǽy`,`{#K$; o:HlOtHnI~U,wXNV$>~y[SSpH6I eB)p< ʛ>xcZu&@ @DWOwO@O#^!~V{൒1 ݱW`4-釽d TGn滒i %3؜sӥzosFMg1gav̚.1a>I}=\I캖u2Q X\aJu ~ajdY/z`y%G-Ʃ\O.{Q8#DUwI5VʜL""0!HkhG іզmUt@ S+:O9+EƔ2{E(BlB 倧OL \2$zh[둣I:bϼW MZ'L\b,x~C,ܔ ek<E`k`K>#^Jޓ.v(fWR,druoה5e.1^\.Xlk8mr,x"g TE:X{>{=r~Z|Q6UiSRA!fn{=K؛Ģ)|ݠ Mqcc9'C,:hOF3t֎U_l{`h\*nt4hpoOH=Dgχ c?Gev<j8a޽0e oMn90;?\6{࠮"pDf/k}x _÷m;-͉fW3#:('qS楷/T<EϵgZ?ˠb9c^RB<С#yy4H['9K 7bo~vKj#e _M@Y`˯Mao?eմ FIvhؑ> dL.Pc91 ZɺcPDasYpdfLpHU/]WU{.86G'Q%Ef RJ8+ l(l}bNZ՗Yk#Ncy9Tn[` SHlr|Fl~ $UEtѹ1>$||NE}Vd5YPl9,ǣ<3\0L DUMݮ??%`݁X(:*CPg ј|0D5^)=Xt*KIFŽ "ki7ApHZP4|cݚV) @7r1:{kD !H H*{D@RFrE(Ifi`nRKԛMLZA h<秄!;w';1Tݜ,bpUϤ%D5' %#1%DxN0TYHC6SQifw0FTҦ+J.(<p.dkm2X"{NҪ( WD;Q%aeʈ'lHGaڒ<5nuIIR(2ϐ!aH`\I y)yx,.P'ƷA󅧓jaO]ZX\_1!h!#eZhğ$Ƣ|LYh#UyʇlO$5prz2 t־p!Nzoۗr)QmK~U/_;Xvm;8\G*\FFa8i )O&5Z0V\ *^-Nr=ivjY=arRE ~OgTPV đJR^xkp^NN=g4_4Ga:/g'Ԛ^lp'¦٠q^>J_xA >̍8σ8辪>hWէ9GϾY1~>638mQݿnܮvfڝ }'+ZcH,`{ߒmSh.MZ\\k^eەM2eh\w2vL&Yhz2rks'49Z(PRv pIm7*|z)jc_q g#5Ӑz-"t-6ƫ~p|Bb, LW) ,K12='&X|׊iѷs*lmY$ȐIH`Vfc3US񶄘BHB[~8LI˷)6{ږ_/ī7!ɨGMq4xN=fCد ϵP|{=C_j{³raV?]AS5k\f?y7ͧ |uJq0ʵRv66a[5- VVu5+L._Ơi=r?\Vk>GwE>2ߤwۑ9oWSצ 1ٟڗ!Pxmr ^ӷ I#8rHL$)whAH?ł4L!9SjO[\IV5P &$߃= aN jP]&XXЍFKM6%TZi F!P(!JNX{m$|=~>t'H`a2}*ӣ3uz-q&KႫd0h$k[/JDg$U1K钉 sa~|>I8ei|vTfGgpwl5տ,Tbyw<9vcg!}g̩+:7qHlŨC] B~ lF =>xxb.k.=5I[M` ؘB\;\'WrҐBp=fBlSHz8Y KRQOg'kBlEXMVŅ |tz a i ɛ0rHrW)r[zZA1%($EĚk$UpH|XyD|xmZf.f}5-h0"Է1GĢ4H=,ּ'/$Q. {yρ2kflEs-R+/ʻ$K-xhksJ+y>IwAߣ7bsSccmzHsG߬N@x\R>=baPIZƕL!f(YsK7ݴ9tE hb7LQ%2pJC2Q RJQ1=| Gf6*AW周é}WNjf?O1QmɄ\A`uКx 59y^<1o3xCX;x XV )Ƥ\Rq [.ޖi株xr&4d 7ǜ۶┄-M9r6yQ9_(8ޓ1C:^!gX!lN檬]wS.zQ8`SQZ9dn8c*x)gĉ9#>j-<Q[ GE0}Szrqq!c|񨌫'yэ "3zXR*Z!s+=7 ` AUeSX>LfY'.`P>D-Ti*iO*,\51l9Cz5Pu@WYanIaw)m RDIb4EVE#yg%$.gfŹ_';_t)PrwjIYZ2dBVߐ6*yn`2QR?7]br.vCmǛ641(͗?!L7!ܲQh&{u6 fvJ;XH|6~=:8߫&jNj1_{)ɵk>I`LRCV %^Ўk{Dm;RyhԂ*D=uߐE ӮgV -a)[pbCHJ+'%}<Z{V0uc5GQ3pp%Sj?]+0gNM(I]{Ҳ]ˣZ?:Zuk['x WL.[I<39/t#]Zz$}ġ;~א'vBstskgf5Ry˖ m/1Qz.:"i٤N?~=oflTXxm%]yw4>?La.. ~橱lt؎%LF$U2yVHrv!fg3 U^*yVPJ]=S4Tp}]kٞޒ=>vQMLo&EABh) k+xݸ 86X )\*fab]؅t\'DȽJ+kS%Zmmחclμdܡel e$7:? Q@?3Ҭ1[l is̞ U$cHB#.1cTKHr1,,=f$v3x%XElσa OA3a0}:˷i,jP./1:A[ps$-x_?L^;'xNҚ]$v)d"ۀNQBnT":f b` 71ONk 7m )r6U `zhREmV=ڇi`li0rsrP"bxO-lVBZf2ХEzQK>]5eK)FEbS<}` &=)OCvsFCOP;=!}-HcJ;ܲw9e. 1&{!֘e)>f3Nq4zqKk^7)&"} M&"8|l%YQqs#3lT1XZRBKc'1IM(X*y?s ,Ljg\=0hT4-f '//a@ w Q&ʥDz4ؔTx(fЀ{A 74Md} J@t=;(vJ ʣ [8E*(o!C1 ɓJ,1r#XcW;ɖ@ilݒ<+I*g_PYCbzN10e60#gW#ڜ1}L?3 .z"ѩ)\_\CelV@$Ok1t]h> pM=Dy{:-K z͖Mbf1B=ʿk7²뷳L(u*ˋ)Lިe l fO¾-HӯG';D.Ц\3Nyb.]:ۿx sǽ1Ȗ0,T.f,?Ƶ(=xp_EZL\\0_ .8?hE`,-|_|E/A?nk1PY9Ό觃%?,q؎N7i:)?_{*ƕ|ٱ[]5tM 8;0/_UuzX? ?7 I]tkpC[4ӯ_\b `x>'D P<)u4c4*PC][oǒ+DT]dsyKNj2($ۋ^J1!3Ùꮮh)|BS,;E QU Df2KMLzZZGX|@qH#}-=)̿wI?WdDO9&FCw}/$\ 0K !]|R=%Z}GrB]Ψl3z&z O:E"+.ƛtS+99o_41A+Mys+q;}oj@$[l0'b ÈHSx i2$L;/AYh#-r*̋RhVKki PKfo&QnQeW$>TG*О8![sϐn6?A:ET"wTh:)9~Ӽ_}XCJX:{ ԒRnH1*^H(t+:8ү#L_n,Q*.KZ0b:\&h s 7췴K :s:+mSoa=L/z' F0 ^cuQQ0U˗Kbu% *yQy <E:iUhx\$kJ.%ZJ0=GCaIGݴ9[%Vr\32za]=5 םϟ)Nl,z+9̚srygXe)W}Qu”@]NQg3ͤO:U8P2 Y:hš.}=UxK!c3AD dYG+=ȣ좮[pP⺻Ci-C\A30݇<W_&o%#zhg)\~H!M$ !_B#?,g`ƹ v;~i&)fb:OsgOM N;X}3Dۦg-qnpdS/omBom.Ո\PGuj3?NMf[8xѼ;MY]-fu/Ew}&_-:MFtY^ųvԞ#Y[Ҡ} x" E:mڊUIӡg.]z>\ɠ UǮL@S4~j؜X+ʠ5nJdFox.]t hԺ-bɝ`Wl辪']1X5IɻyWi,ioM5_^N/`hԬ?XFۤJػD$=.QrQT/h&V R> ۀ"`@,M-_7 6Ťj4~1 &>`"q.BxL7xWp0/d/M6 *k HD`\Ŷ!d/թЯ}8 -#j6Y}<`y'ݨVejMK bc./:.4F؃܈/={"eȻ:%s *AR#:uR[3OMIIK1:xTp߼kD`MYJ>.}2J)PEXS C,J雠/deڡϊt脆j"3ty}Ä|?oeo"S0' pitX'WzŘ=c82Z5E> Y,ӆ9ބ1r;`܄$8Jjxy\>sV4aqpM>gkoRD#R`6r!`=iswJ"dJ@fU|TPVY'%͹a:( Խ ,:L4)k+>N,֞yڊ>Y(.K6m`M8~Po nT ?ig$n$,|; Xb7M4*/Mc`ͣ;ļUd9n#{9XI1& T{-Lk%>@e0qj3d({ ^acT@5'֕<#pEy9ź6p¨&p]b6B "˭㤲&y,4aQ>;=ܞc\`]㐍.>6ɞ93=r&jrϣh  )z҅+:6M Ks }~CX`NxW,Ě is$R0^O ?˻3|0хd:~ZM 56/|~u=ODL'Ə6Fv(O&oCtrcMŤyen.+| Ê%ψCd~bpx h6hϢ'_L?^N^Tuۭ/O t)ct75L{]8H;?<4~ I3"6c^ɗըvwJ9~O=t ~9s&~n2hP_Tf ~@sSz/y77YꁲG(c ELqLa,Ϡ,g%}WvqH<;w5bԯvڳ0[rJ%0xVLYVH&4"PvYn#$MaoN6#)S+9: YDB4lH5\ *Ǒuhn,_Tz5߾պEU?=)L#s6]7l$2jsE@FET-.\7чc"/[K5l,L=^{M"e:{mkDn;"7F;#i:wlsXY)XG4. OqرJCl>` -72-#F{Xmn1Ġ0"=[Zvɹb7) KĒU}{)+[:TCMȲdwŚU[S&&*9*>,O^&{Wꙉ-v6XϳR^s/Qt<\Z2i J"BB# D)z|y- C,z`(gg=S\&( B֧Tshru$K&2r, c"mּ^e(vD@Dw˶E@|vdz]Lq4R!U!Kt)xmLvĨNPHJ%0皪rOJ lb[h$fkDjJ#5:1ɖLRg5=OK}t`&Hz4.7rAJWWJ-䢦[#":h2;4I{5Ch9 fL [1}}5zuU Xָי́ 8#3{JR 2C*V-8ؤD5?`vַ؋*` [|.>AU F{ rͅR'HO*oc~VOߑgt0j)39gFg|m -dF5qC&CoPe0L@S7a>#4dR9.EArA-P`bp2)c|+KaZ$!u_cpb^jfp\qV[B!t_ &r^:8d<@CcÄ[9VK+Z=ZTc&(ưbflI&gQ(BYurV¢yJh]/b&mDBl)dHorbqx 7%ːSAARb#& 0jGgK嶶V *Y.}TI߄^E}c"pnޟɔtNwnp@<mп1!N#a@a4BCaBMgEV[T T!+A6^a;M٭u=ے~_̉)3ǎwT2{1>yy  ľ 1jW%[ ::-RCk\eUb\lV\2ۚ1;~M{S+27 jwIHzgJ{3_C߂9uPDў rC}(\dR?dt/dhu. */p×]]eWBCl 9ҸX@6)R0YWTKfJm%HI*-QZI5 Rt:cӐ>,tܘ^q6\SZN TKJ MY2l&lB-\R(Wy܈VZ(mHJ6:,  6&E^~{s9Ci'n#h4tRf_뒫\Ał T `&5_8Id#W$W!WWRJޗ׸bi3^Tn%fo `U%좄zFӱ`wȩkIG#wyPdĖˋ-m{ Džz| bŒm^C{ɫO_Clq1/8].|B'M`"fCn%m!r_Mp߬diw_9׳!vmz%\llG5Bj G1b!KY < B(H>zG,vO%mVcDj0UeX3%R*`3'd|#aDVaJzgNB6"X'EJN=*xWp+g OʕD8+7}BH_ܼ[jP5 $;½vLjs,9q-t/[ h#E>mO#o'4T^kN̍+s?xX6y7x4j<8֐[ۣuL䖟frֆ_;|ry}ѯ =-?Gq7S5w(ף_>˓.z ,/.s\G|÷  fٰ_./Dp lj/O_/hmW叧guO>U$es&Wڍ'k?OTo~^|-6=; x"<u NtL\^#މ7؅g.=n0{vwko!(4xW@nM1xET2Ǘ] >N|g_gתt>Yn{iUDda;%0|EiF^Sv\./RNM!ֻtzowś7F>>f ݄|_ Kh=BxwI~H;c?ZY}H:*z0ὢ@ڳSP['N7}ڤAU^oesgt4tD:\v>ͭgݼN_%{~,Öc@"AW/..CZ62AHdť08 [GI†NbS&|i+ aWdnaMY P![bzHِDʦ*jנV.*F$YFK&|󻬍qק,[PZtE:.u=rlp35LR1Fu(M u 5Q>Ɓ*DwgHهVn]158D`O1(ؓTM:BCs/1vw#vxekdM#;atRizw]|Zf'_-$*z(_@!=9;"EbhD!A! S $k/R%C%lsջBٓXAL9H"Q AԨMdbwxMsLY&8x0YrGo1f߆Og1J0a`lCߝ?86Dװ2yFn7]s&D Y-q̇ngC]*חgȉ6}wh_0aMfbS+yw:Fh> F#Az7bct1Y.\)XM Q3Ee1iB-ぜ,:Z'r+D! gȆ<9nyɊ#eX4AgZ2U$c&&@|,uf{yqvZkjҡJnWqq&uK!\hzikr4f)jRh\P$"m!&mhdR['{F@])oBy1ac,*ɖHY$A{EݚL [S]3GI' SH݅ykƭɥm_LtǓgGOru~Qɪ_~:OgnV{iug?jk$糎ϓjQD/ڈ~zucW?uxUիջkSF-0i}?< Np-8D"6^ג2ϾOip[02?|:tpypV(2n\,*Hbu2!-"^wx|g>*Sӫ% lM qd?tbz cΧVh4v9AtDc8xiTw??BӀ*oF4L5%Gu{dUMh.2ע9(U&z!Z2ˑmUZmewU^ :Z˕pTM\Rx{$= 4nw`zt\᝵Ƿ8 =~U|K6X7nIJˍTWrIYUVy$2X%balI &eۯ"J2LRdϕ%)F;kΆJii^wo{-'鵲&dݐEZ ꙿ,JҳYuΠ$YY,K*ƋvBn_1"J!ҢN0ϔltx*y-,kGtm+D@ ,2͞qCUc N5lE$}6Q3MI >w$y s[\êv1Ίt9BxWtM0EF1Xӣmq0DK` "eL0YFHЍi?1k>DEKVR#,1 !,).0>-)"!cGkBn7s 9#x!(4!d:) šU}F q#A`)12}.%# QȽ(䦠C"17^n6ttB$p0i]n#,Y7$Cy )ln]Uy ##pBraPAmv!H,ܡ ktfeQET lm]斦ǃulF2@:\yv> }}W,+i6)q2.т~痃OY2Ƥ\n7{ qPN(&dm*Q#QRYlgL21 [ugg{rm^ ]z:~o;\O#@wZqq>-":?.R.+a7v*}_w''yzqzURt]wӿmg+{봆Nr̃#}#_IpX)FpfeM- d32 !(@ Y0>@ΎHAs$ڐjB"'0'ÊD;v Ro+ctZH` ^V]d+m.?^MY,riO0Y F&R9FcP8`"NNkI1;DƷS<^ 32t.:4q݇)7ɸ Ru}Q~C#o€[0,:h%xQ'CΧQ1$4Exz˩!G (DU>y9=q`QE>L9m#Oګo7ѷeui엔g䮭)M4h5/}@x,Z|.}2Xh>yh8C,yg$VgYSQn5L.8Pػs=G$}; ׉Pupi#yƞ-M31ؾyS' zҚbLd8chRYSb P B2ѕ 6VE#oۦzk6?,$e3bmH߇;\u*_ vvtD z]t Z8)&5A-6*UG]~+c0I!zFm ZC6Ӣ}Fa՝Y^%Zb=t>U#vr4< 'YϛsS{R w6k+܁bXI2U,y#h0^L5? mA`E|jL|@zm)` UT/mt՟ޱR>"12碉@.3?WGq"Ӌltz<[췮#XC9(׸Rgbw;?sua Di1VMwqK,>lY=3[p\;+\C`1K98MKuh-ZwW6BI9`eL6 g)MJG&V=c;D8O}L\yk j"Щ2䤝Qy+c/ނ`:p4FH*RZk3/1FCRyaȗ]??*MxqE2\W@[R.T[f5Z“.lJ(y"!IP0G.7>g\ > \5J9Eda2ᛸĘGaCP1%<8 s"3xHg5:#vk#V @9I9V8SH2iBWhv#8efȦPI Qe-~y1u >h2l2 >bldk&A~reJ+I)!ZD1XA,4"›](f/^HB_ mQV,oCJt:PpqޡJB狖Qqی1V)lZZ2)Bc=ŁhT3 Xl·HU0X 1p)+١7Z)[ ;4g0M!I.`B8tjv%cbZ撺=U}/"4! #mMe q5@*YgW1~i"=GƔ= d ~iRZ'ŨMVCkDԩTdƀbSLb||~7} gD9&&Yzt@ЅfHUgLedP 1,& 5lmazok.%IxM&KLþk= FݺNC.X6EBjPk h%HDx}7yyEk! )rºx1KD_ÅC;vNG%ƍ@!!/J!P (^ [KpWp=nooMic%:MZ/|q dNeDaŽ[c Cv"-FPE\]:bSRH=DkiouNWeL` qXE!2"3skKݶj类\OOFZ&Q Pxf66vX̕6Bye/Cݸq D1)X66ΚqKUfwI2k3`TvudeI|m\zݙ'k+Ip>TC| ,\qd$ RZ=pv Q\RYQMUjkضl8d1'+Xw^ EA2Gۻm \30B 9FRgZ`06-[n,R冎:(zl-BGxSfc]o5lj=dl%2B,@Rdublò5G HI B$I f o(;?G bB{=62',krޱEaa5vL ![Q|o$S{gџ8gscNfRh4M&'SfQm"GMhqel+}@`҅ H3uFL1ػrW # 2xi`w'H`w=:,m$mEI|EH"O]X,V}_f1+aТJRWw51Y=LVl8 [R ]LDf#&jDTS$R,sHDtD8ȤU5!U@jyx||U3i_ts\tbzFr*CɭJc-ڢcA6\Ewv܎P5!WUiA -HN|YWN]Svj. +.VQxe%p֣^ Mhd@KPA*3|MjkMHv ngPxDi#5#N >lcM9=@湪)"VUMKp}x5GˎtuU򄾐:SCR|r`̴Ʊ)[,ԭc~xƫ`fdPOSLI8hZ7J9.*];'XqVsw*x &,ߏ|Bev^|")d0 ȻML[vq8bpÆ)N38'M"!)uX8\˘4/^-&6߶MJvP鯰!KF,$EѣRI r3hCfY+bZ2FRb0z<zkq9$bTtcT4X4+gKIV.$2]ҽj )Xnfaj̘;#d:1F̏\%k]GCW%Y-;_7Z`v4W_D F_[XC\=^2<IbK࢟شe-ar.-Yb- ʅ86~H;Z&%w vt Qgk2F%; >Bݧ#K"Y vJ&.'aWMyP$#F%! fg횜%3f[&oa׵ϓ ,"7>^6eDO[nq}ڼ C$VE) Oڜ\bPf|5 eK**ɘFM:*;N&k/e JBCD&1n34i*0DC\\|O?/̚Ͼ&T74M|s8PqR2z6EbKzE]9oN^+edZx>kD-Y߅oo ] f18HR)Krrm zݡK`&T#%N0G$QДB}HjzS\n͍MseB(wM~b=R9#XHώ8\H1<ϴn'Z:HL{91TDEj &c(k2dx+^ǵ$5D UDžT JoPe cyF$-bQI,rB֪Aemՙ+e7>[X=©WnjK$ޡ@%p$ܐnG%8;Tseh&k2<"#FZKSҺ9MSl\uV#ZGtZRQ)3>4յ*3?]92\(E|N(J4X$t.Vg ZpdR)ZMSC흠PIca08'~F)$z>vK %]@ sCdˁETYD[2IX sz\<'h*4:s41ntaqd]3!듿)|\I,8k)σL2/ޅwitzx,gqFqgxIq8_pN|'YrٗYkb4 zP`=F5OKt~ϗB|BeECONL9C/Li`n:t5]W}h%¬UMvݻ-y)Jnz;,Z1[RZ2O7MsRkO՘TGnc ,/GYø}G^5#ojF`y/Z->UtԪs Mҗ5y4s=nȴjzJ#۩Αf:V] Z2 g_M=wna0U-X`aŶ91CW⥦/A,?2jT=B6FjVT<9bL*Y4~eUNKM2-kDw|L*+ ^_k`ShrrMSCQ5K\D&\mJr>ФZ %;)+MtB03/OއY45gkYuҊĩ,V+eGccI-GmeWA7Y;"*5e\ʗ9}4]e&6#`=e=/UkT\;:o^2lpB1I)yAYƋ)(}i,,7$4 G2 .xͲi}Q`ժjz7uR[-A17ٞ0a«my{a3*vr{{:4oa]dr%΂iC&9>)<e$oө7OLHo WEπ.Yav?fJ3~2"UNl4@$o Zrh$'ckȑQkJp&͋+*y]U< ,tI \5m5yaz  5)$p$E-Lji;pZ- ܩhx +8l?NMd$#adid b[ 9#9F+6\~ KKѕ8Ҫ ;$`Dr '2pN hS2mVUdKƕwJ8.B>` m̬)"TJڶ2X~)ۖh,:{eˀVf%sj{EҎmJ%YXi">2-x΀+G!h-R$n\:Dq>Sgb1#EY&/u?9*!͉qX gG\MLjvu[Ͼ9U{HmJU`m z{m m@Y"I6,1Kb٪p>Au )p|@%4Rr!~(UTt#!qCbg-Kylb6HT(^nPNšn=u@$ҐψȈQM U_%ZI"yz JBC,hX_.gh-ΣZ&[c࠳$M2IYv t6G{7okVK (FqKqfAZdȚB_^Mоs EjD:e,VɤeSp6cluw24RDQ(@%1hsQ\5!9V6Utxr) z/ #/ MU u4 $IPX,ٸDdH^Y<}c$:>e'1B,@*A>Yntsv˛|py8Ǘ7]L>7 8J߇6%?ߟO-UXn͵mKPi:]x%rvħ[u'_ /tt>N8|a䮧}rӟU5N"T-nʍgsO<wœkߨƋѰBAʻet^rN>Fe_i8[xo߾](EAa.PrBo쮢Yˏ߻u(~en zv/7MJa gpFRwoǸgq1-7\X.]8~^+8[o\l@>1ӻƺswBOP|6'?ͻ/5KW~쀞xrQ- ||oXVXa*fZor~ G$\=%8\95`B(U9# "f-R@#V`uZS2_18OeLh-,t&A4޹q$k_!g_㼜]#Ku(JbP1bm*A`9$G5U7ߌP5RZFwݳs0a0UE:Mo5fr oc%Bƺ0Buv`t;ht~_o(>Y?hs2h x.UU},}>|?Bm(d\rcN$|.džDXO!< d8mWs-bhUƙݯ#^VS<ew[kOj}v< Zx**QG*:w^ChHgДk6Vz=8 a tC5 q3ِ\F}UR{*iRd`r!mS!1jJ *,`Rh +QR"{9"~֑(j  AU +sTS,uH=&xAz@o(?kH"BYTjh*1L( (.BT:J`ZՔ?X(єT3XU Xbu-@aA6+ "(ziQ %y7 bN95zqfHY3i at,JJ c) $I nȆ s jބ{6QXIf-hh$TGO,ԔR̪ZP4 H(ۍpҺM / u Rho=k5P3&Ӑ,sr&ji{xjލ,L@ ghKD>DrHӀ5 0[kz֐HT攃RjĽ9 b#Cƨu]MdK5E{pez3mNCfњF@` @t7x]F50g^! uj`(2hoNk뭝U>I);*dYh!<5p*r5cy`m7Q&R t6_Z ѦS;e-?IUh`1n f_ 0~x& #w(zTl" 3X?Pk@# 8\aD8t;Nv^iXG5 34$q+( hPug ^ qA#S/?,&xr{W sN#5[^gR 2At֫r0y\F?C_Yr Tjh͐rSvBBR4aڈf]Wt@dLE#ER.^>(&R A?zޫ2%h€2$F$-="EbbINU>]Fjz@m\PW>{A"C6A:bP7ÖxzTˎ.nPR*~ ?,YCQ -FbGA.nQ"cX=lI6iۭy\p?MuZl 8=[)3Oh~_}Z<dd`&#;ec c8͛6[6;d͆ŪK͕o)}wK&n YB?~w7>6{>1N/WWkt:9>&~>c>6VW|jpgb,>V[pqYܶLGZ/ޕqsfmYϔW^LxD;iINgaAE|8voB.~LA8?hvO woklۂ7Dzi|MS#w.6m?Ng^_,6sKkv*/);b՛w7+ut3 jLuJ}~v278eh<S5Ω3)<7)_-y9wUDx3G72q}O\ܮdjANqJ >ϭS/n ot_qCryj|1˘u^XFaeQXFaeQXFaeQXFaeQXFaeQXFaeQXFaeQXFaeQXFaeQXFaeQXFaeQXFae` ψeDnb:f\XFD72r! '|dpXa %pXa %pXa %pXa %pXa %pXa %pXa %pXa %pXa %pXa %pXa %pXa %aW=9qX|8,ϛ+ ha!ʝ ' cK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄ#8jhnφBSX S8,,GqX[ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8,ᰄK8}UO㰞6DY'zq9o 6lqO_y7~uC=,|pMk^-9>6⪜ɮ2t}޿[@7s(szX\ۂ&ޞw|4`VLiN +jI}|*jB1!%W߷wH"&}o7kr Gi;,yUM n"͔t|TGmr72QDְ*]na4/GلL5ꔛм//8 ~eݐg?m%Odq7$%Z ^^'rw/m~\lEip0F>Nct}mxM__z}Ƕ߾?[r d:i5Bj}z 1]}ѓmlqJѾ'Nkp0 w?\kNFs@}eVݼ8N'%}>."*%3j>~@N896~ GS7c9v+7+73mI_ 3V\|9FϿYN+o)8k)0raLû'N![B^a[4tp=|#}PFWIS8C4A| N߽M?t:}mC}v_}?:}ِWJ?O޻{;֭)T !qD؈3O!93DrH8ykWU$ɜK$>ȫ,Agt#ӴD~YE z$LQJZV9K~T1hi\bm\ R.܎*岋F c"@&Λ@h61 >kW%b.[uc65hԠk#7o}{_ R"7>^*UDQA}|+GՈY /=d[-՟dM+mL5~<+Z],,-: >FNFEn:?P~  o a}=Zz[]NWmuY9_/VPGm[mt"p`˄rFXIkp(,zc-YYGnr?-2ZK  txqԩQB)fȓr|*$̅fwm9Nt:g-$㍩"Os}@Hr@ekXdb )M)!3)UI*{:qݮn}iW5!Z+iJQ.ꦡUMjNTf<ggbɚVZ^sJDŽJuM; .vR[GXC.}EIuK~F[ɔֈHJ5`@%K:0sZ7"*L} 2Wwe4hOHs:֢j$A)=Y>.sˎQ#md}/d (I>̕>"=fhyȝ7/#ʨJ/@>C8NXHO KbA6qp'2YK|榒:8>LDZr([C\0bA`4:>*yLGk[;IwMf.UT]J\bAZ9TVQ﫸{%@#WTuA쬹 L&MQs'#9$-abfgW=5k'h*Xj<*Mx,mUZ~"ڢ-jiBҲˈ`x՜^Lwg%?u(bpF,( 'QWO9U9J16DJRάxf8pٗsX&kج}y{HlךBFgȔB'I: Ujn}oaW;v4 X<.фr Tk5rJ[R/09fG[ 0r!~  ut>b+i!b$ 4g &-Y m"Vν50P9QQ 7*h :I0J`ڻV3aFGIlXQi*;irZ{b[{[j* 'K=娈M!&D egmcks[V#N#< +fDXe =l@GVץr͵e%7'rɡ^a"GFWd\я{#u~T g">"ρ@Ppxy %>#TjE>dٌ]* gmĀCTzE.#GYʕ-iMVI" *X[@{ W;IĂЄh\֥XB. ^1b miEnN$ʿ%K 9GYYmxԭ+&o9B%a %qNL|`+_Gkms%,&Si!MT}2+:oGϴx{Jϝww|w `t.CqauPјG7O c8~9y-dE|,uĖ.G&[%xq;] JVƀކO5"8TY?][9czJz,f% 2T,e*-7W5l+ 5X48 Hm`ZL^шPn͘P]9r}.?R~=iY:O\ŕϊĬ.WsmuEi|w6X}~K.id ^yrWkCb͙w8=pT "[D_񻯯G'tniub0獞XݤvD+eh~5ѷ;VVg~nդx>9+W׹EB {s|. 8xoRhA21/RB01i}yr)s-=`~v82)ĿgQvҧ3ظ ܛg ^lϯgnz/\\bc.B4'K[4:x a%7Z]{d7;0Gg_҇ŷlt-V~s ׬ug5hOl㏵tYZH~±Oqw}E`clS&!\_]xm Z4i >Gy$EK-?iǎ<3)GL~X,A}_3PNqkfetQ-OC zH䓤hN$}k7"ׁlɝpE6% e3MoeQr!K6 Y(&8p2>jie5%%1"!M'gO0b/ZXOeRJ+PҔ.rLLNh\2 'kERt:+ ԀpЈ|R;k|$$_ w`u\[CKfGCYybMrv祲.(}(iLұeT>\i6+3BgC!_;^&W/ IK%,GMgBܗRm>[HŕptR.SY;’JXMS )[)-eg 5Bg, ]!ˎ~ըdTiն*y`A =K,&D6Bzi f8*pi6 e "Ot\ _ٳArݥ1 jgFlL]-Ơ+/rR*(#yE׏ O*~n+-27Dz&묘H6WΕQ SG'M۰kI#l~9tKOSFs==ZIܰ%D4";Ǚ1y`1O_WҊlN9OL^rm s+VBBYAɭ(䙇slKzbXn::5(yx2_6Q_+.ej3y7ͪ-+ LgI @KH,M*)3 > b"6YӚR 'l\cH}-g/b61V33g 'p;o/^S"TZVAE~5ZJ/cF]]Vʆc6޵+ٿHs&EEݽ.̝`Gƶ%{ I}#6uq3 j <%z3^|q%i)aO{>:MF ԣGgjDPh4:Mp=W`۶yuzD͖"Jq.X].R 9fF$J[A>8 -YfKA[&ryYТ+%H0b+krȚ kHh݂#h;F1ʝ}TY `,g)Bp-肩.܉(CqȤb𶇾wF8L8ڧꔯR(_KbҴD&n/:JSl+,%4G |*AQШ Hℹq1=Zgý[OˠV;an]>*"[xr9~~w {ㇿe_OfN{ |:7V"6p?ut Fqvt !Z~Ak5-/.Opۊ&  = ap>tC*8f\6\WFeSOeOG$"I au/>W>e1szI1l'58?x'|[fm Ö$|ͮzwɬWd'.ntπk+=Z>2 u7p؝ovB[RI9mtPnD JN(~ݙ9ПwkFS18E3&'6&*^|:ȿfia"!֚| xpzqwn)m8}:\dMۣI}8Ok\Q-Ftߒy`ﮌ6\(ȨhOO} >Ǿ&WMGwwasAr/aj{'j܃]ty1l#5o5{ 4um{.V8*C'.5h|\iu6;7)zo)t)HV)o" Y:(e.{壘GL1U=p.X? oQct5+|]C^<Re^‚r@N:XcbSLʹq|,*$_@`N ]PkOzU%#ȆEw,M| A-XH)^' I Ȁbruڊ@Z"ntM^9kC62W̚k#{}7+|@Sl4uɒ9\ !>L!k[ mu~C>jX$2#_Enqՙ#yHv/x\VfZfj'ny|=ORӶ,|恞3rc):E7T>,Nj$(=\{̳%'|+#4S-BMrR2(d0xz6\{2uM]N+{:keZHP4e6ޣl=$I5R@9SF_ո`Rm?uq*Z^Gt XknR(l%"C=mh_qܶp!*_St\'.!.&OޝlB?QvSdo+ۻh}):s0(!T 4 wk# F[jd(hAk~na,P9QwSw=I 7{ei:돗1v>:dzM{ie,-R?weO:A+W|͍k޴yӽpqܭI|7|*tNd]gN>;_4@7Nߒ˫^,yItJK4$:dfm"CD;S>;=wML˂5fUzd?veg]ېug3)ml^B,wfBgީԱ~:솱ۧ1n'鷟Y {(oH qZޥő[]Vm,)[5vT2tJ 0[%s5+\m] o+u.bPZ[ ܒsiL 7v YKf fy6?ʘWxr݊gٯ6>7+Хz.z] kֶhsCkIYrtFoR)GUUU%5QV9VW0{rD8jt.)0:x OctXX&Eտ6i I޴J,M%BK*NX*;L18>  k+M5b+UqMZJbZFݦ2i@E6{G*wCp[_SpI04q"Z4Y;a!xltr,N%[a¶mLOV6dEjqYKÃk>e1;ZD?xD+ {}+Ō|D=6D=='S%M۠j؇6ե/L!UK'o2:>n21j2BnDrA=<SrZlq(I%b2zAh8bdӔy5d xU%T(0ȹΑ,em=M f'Xv[Gt4acna^^;}Ob5 @Yiqx<oy "mX1gP9; ϠD#%??sq V:˦x89%!DKtӰTi3ԕΆL>QAd9N O8mRGA+ƪL%Ty#*QRV[ M]R[҄'ATGmuPoդY6!1PsهJ.u 9 ڄJD.\GE]!!WaEʢ"JENL "e202F+:E!2Xͥ3bLFld@Hfi(x*.eӄq4;=/ۄ!u -E7:E8 $Y8"(FA[p*P S k& b>g{{7=viXsuN!g*YW5Kx {L BDvD?`9c*p06 H¤I NNaG-B# sTYG/IcqxXxX&=9iD&qޕ$ٿBfy0ؙf@OӋh)!d{}#dITERҖ ū"eċRF@ KAa:ZK$sT'?3.M~jrvIi&ڊ5,JHagJOIi,BLGX'kꘔ6ϋP3lb=]aZx?TϦ[5ƵO5 GS)ETq"hZⲪNpi .C:O053N0r79W"U5҇^SO ^zOV$yuKFz.b5Pj] 2prIJ0X (}nz:lp8 }UvqYƑXnt< }$i͙xMwS Eo5:_1h|8ͥC͡wsx3=8pn"/b^SG":0E & lbLM>ŀ=hӨըQA_ܫ`-]h8,@[|H3I%(yT>Gad'2VrEe})9A\$G%Gݟc1`-2RFBٙΏC.U]>isDY2RIDQ[Bq1X镎Vw4(A2#3=^>KLd2#bs WsYe5f#֚ T>/u3Xwv%BpI Z83wIeA,ŗ5:"Aη$ epd@\[[bMV &>IeirJL%9w"! * %BQhu'LZ:8(V hY' {VI*aII*r%<|@+5^3 JJI3L% Ȇ;Hv1], ;^H<[/ QJRhOyK8QBZ񜔲Q$M9Icr::CSWxutՠ&r:8N(T(Y4O`+dWf&;7 ŸAҙe,%WpTvT2VpxX {}I#CviRϛěuO l5{x cm0+"C459G"3!i+ɷa䮤)r[zG<5UYU"8暯}VRZJkp@u[ݦ^AޑaC=>\v_Wݥdfr: ̼(6H,(i$~')@=a~%|_ʌDM"KQH# js4`QQ _{mJ=ɰH79Vp|"3jd&ڌT> `%sY)sk7HX27d] I@$|<; J%Y1=|Gfn:yˑ\w_7Iz/']@ 8bki"YRe/{Ҧ:@{$)Q&"x 1 y*,řC> 4Rr sȝE&8^؆Fz>*+qnSIKփH}EHx%Y (E+@$\V3P{ tOЎDzТ?Hw%=YYzFv_V,&DL˭[zbz~^GnAB_FIC٘A+B,4o믿ޱ_;Ŀ o ٴ0M_7=)Oqi4ǎfͿNh5wu;ij-Gobn@mtu+Aeg,_7G`i>7;b"QCb2ϵb,Gf S,}uC~bI ${ ;^^]=! *8)`_M#]T!Xd, rBQ 2:l.4k>\5}r?_Z~"ƜӒ6/xk&hO'(Gz6';ډϨsxd=يt2Dc㻕$2KtlRoh@jeD ^҈isY/u_d^HY8H| g/xA+_+'}fe -ƫmӇN ϧ޼0 Xc2/ГB{I+T ԚP EA8igI: )gg]ƬCsr&^w.{eԣKH=[Ǡ^qЧ~$O]v!@zW,C#$#Ǜ\9ŅFߌo7qfܷU*X47bq6\Bq?Os?Iae3O(?Z7? 4$_ |g`ZbdqNNt`h秧{ 7m{l]wFjue;<:ܳtKմO9嶥+-ӎ5ҚFet1 8lR vFZ{ՁgMlic7yJеk)ֱOOZru+U f3͛fs{RطdeS&/O㤝vtM69bx ,D 2II+ڷn6_A\l:Ns}VLS騝4NY l}ȴŹ /{řzk0o ՞eHf> Wܳt%l׉9GS}x(n6v;jF@)D+l`:gj8I[-w"L 'lέiG!P{jBե2pهd龧bzvR*im39N0)=BZ\3Jm†*] P;imhY{ޕFr$B̓ (GG /`/ey.iQ$Ep ;}Qd7d% fDQ_xf\ۦAHitA9KT\w*fj)JQ"\d%]˦J1Lt:ChUhmw\h<2:o^ȌΑ W$m X3 h"k,Ͷ;&W$vrش`ӆSL2lEoиhml382 h'..sIisqu_ .d8HdpLRZ)jRV]p1ʨ'zK @EfJʫeRۆM)T (f1v;_‡bPnJ&MO6 űޒ@ME{Ib@o4g*R7X"+%F1 0TZ5yڹt0:ɳ7=ARM'ƶnyfi V(ILOI ǒ3$kulN!2مgPZ7Q>#'#bƒjɒIcEƖυMtUv;[E a M;^jn\x,CRsU}EJ4lF 5d}H@a8VFG'Yl!%flpM.x3ٲgD#D$s;^v;[ L]ۋRM?^j{<k 8` T@rVvb$O;'f_JHۓA! d.(j[Y[3I{k~ Q4*QE׬ާ&! nƒeMK.ke?#+FZp${+N$-W@_eSJmPimQx+ XMF4Vo̙"B1a J =bXu¦آs^&E"4 5 &)Ҏ}䪏AcQ%*^>ғحD9,o`-Fz7۳oS.ziV!J1;KeQFIDS-(HXaBE%3A~1KRE7҈rpZIMLSJnɘ'YjҦDޞAp-*PO LN>sdq8|ͫy>Fbyv;C^-k#k;LS}hVRÐd1RoW ڰQ`0}kȮ~W1I|-Aє#b_Tk]g)]dKS)bS ## ![< X[iDViKQQ󩓙f#5ftJmʤt. Sr+ 7 j\$i^ěJ9 D$=FrtS=KK⵵dM$^ʅSsq8XϣSP ˎAFv0kI0?01PK9y77/ Ol|Rh줚6ht{Ȕ0y4NQ\d 3UL4ׯ-p`<`'/n]I^Xs9Bֿy~uavĒ 9 r0:yc!Ç}'1:։9? WW\I E-Ԇ9)0TlԒ/-n>Ld <٢{oU,= I@arĨ;1b\%(!j0I۬s>hl5gv-IWn`̼w 8pFFλ nU =~ecEOJV0\*8N|Thzw[: 7u\ KX,-uqwx [=λY)G#X)4uP"`v 9 :jjS'fp47ׅà~ she>>pY]2y3=Tl'`!|&FEw6,¿o" b(jQAIvgcbz;mxOA*e7O\hwM 'J OU C̠\1XGAbKoWlVwR[lZq5rRV_, @.7>6Ss*&{`hG 684^O@8k]Nq8:]6ikˆ&E$,~y/FӉ u,^:s&F؛up]A4y>zG;tG\O'|C>mC Z&6,"G=4!^bnNY% 4E،#ߕD L]{o۵ǹ,+( hb &qK4:hPRĹ`bk̮9>r֖3jMQ@8UsN$p%T5H%. xw;v9k{ytoS^Ss5c 1B-^__GG,EiFVLRbI Rч)z~nl^t(vp)?_u*^rpCEu74\]U}IyX_ۗKqOw_*:OkO\(o#Hp~\T_Œ%'5F|aFx3T=m'Gk>܏ȾՍՔOM>Fk;a3' ;돑Xo(&ǃq ٛ Qp̶>Ӆj 'C}骹,[ҙ cC>:x:L(PB8B̡g랐@TtX93fl 9|7}59bn.R`c1p@ܱM*gc4A7 ֚F -G [R8>[f|M&eRb#1 *u?^'um1yk+SlĝQgb!Uyp@\K> و73@-M@K.r B9rx[Qqy PhST9ag'Q,j&8ΰkHG[\72֖PY72HvRݻSᇋIP~L`eǘ'A MkC9U~wceZQh"BPDNx {XaK:, 9XMƒRE05#Mbס@[3%OeKEߪH&F< {1v RD:2Pmt'B/̨Yc*% =#6SEǙX'=!}%H}JX#[:x޵#bu1/Ud1@0y g^c',[_EId9ݒbMrw"?V}E@:ZT%F5+*YJ5ۢr)|b@RLqw>eWNe4=;+u#_|>%l$ )d}0>nOaq 2y4ojі|T0sbŵ>͞XP"`S&Sh!DjZ :p*#mʓڼڌMGeɗ^hHH#m(r]PBêo ] ]P.4vVh"(Vh@2KVՠ<ELT]u*MU@Id{-(U<70bhh ȐjJ,y("xB T&=_9/$xX!ߌJv{#,4c?OaRVnh:4R"? Ԉ fOCrkk4|qra~urq}R77>L1F%$ $2Vt:CAt4ᨠXm-Y #l SuvC>J./Q@NXWtUWU "lTQKKNt%'OpєMUUW[NxI4=~CΖ]m{FPuZDq")S*S:S,IORm Ԉ; G*VYe,XbIkt' D-ܒj18O&STTXSFun+ZAz9/GwBV`)f t0{2?aINkI_?̏  1%(&_F(m+s0W"NƛYrLo{,ƛFH)V2ڔWhN2)IUvWw93~mollRg^\@1*',U ^@E Ed+&.anv嫛]fg:لVJ~m fr;ٴpBvk5H ꨥA ~lZ%`B]m!RϾzo$\J~yO(=-:=L 惺H\w ôd*,^/9Y`; Aiz^ޮEo:f\#D2PP R`L}y4Y!VDG$%4!'c+ϒdBcZqbKAoTXJB:)P24&2BR@ɶ*$5AW:nޛH>Kx' l-~0*;/WhY̎4M8#ICEW`̙5uuc=0Hi\F!j]*r? *xD51ŤG .h@Q9v ˚Z?5P=QgOp~:xV&"q+q{;(6+(RRLe5B Rpݟ,ANbi<#Y42Z 3jd֒s** d:I,2!Ŗ^ciJVQ7Yn N(AKA4 JdHj&s,Moy9ej`|}V5`OdO^=ٳ.ܚj u=wp}6|.&$a͊.݇eEW?i',owBI1kijן,Snxtu󯟔p6=~4_hM%}]!@Ƨ~_wn1|?X%̾ZBh,gFc!]L [S߾h 9;c{-z~tVeb$G'{>˜R1"{[PG_ZIJ!v]zîcwOLz*[BcK~P1ɊUPI1ǘ% `m=7gso-{IO" d-zt:;HD:5?i7f+VqyLڵ_ RzN\ۃ`>ڦM6 I vw!hVdd"x䞋7Wk D 댯uq5>ƇZh6 _[H9'%qu~[G}uYρ>':*gc uȳ%[_=o/Q87ӱ"*+m ϯo3h6#z?{']d{k=֡eo!`TERT2Rj(A@lDLc `*"ë3(^ B0XpN+KՊPϟ7Fgx7?-#F1wayUـЬ T`8[:VSW~Ǐ [#?t 77Ms:D^ͼ{|#г{iɁ-D MkwR'h]ԧRըvr SՂ2OR:A}b/T;Tq˭]Lf=n]%bMbKPd4VoX: (@v-}j-" 'b,RCPK8LNH2c]"ިP@J*|F2l';=tv=i;ЎO1cSv^oվhڿ;^Yж@+nbN+ v gk4T$Ns_/=M=9wۺ϶iwF*70?鳴th$B[)"(h-*-T" Ի݂|>.&lW;81~ebBpT\L]7>8Acws/7DaKъxf`Tx&4Y8SMbUu`m.kץZz$4Nصv zeM!vԟD~8;]s?K"HuJt)\Lߠk*M{c[q' MBnжQQ9槰B#K'۔wR<,,  h PTe'ja!V/SVK-mSި(\(-uV:.t^_vu"2" )AԜXx)zPT_}Ȕ fﭏ~ƚ).0ź%z 1"Lv%NƊNT^-= +*M^1N)2;9>hHO>z6>NZk/dkKDp£Iߘ5}/7~RČhY )*b0nJfLx_mx/;vB1j3|!?EwV[}E"B[HS 6ٻ޶dW 꺷%.u0;o`ЫX-FroDв)PdЁؤENUW}vl̼]ÛT)֠4`RxrXgDMJ1SLPrRw|9#R"G^)|d]8tz_\?О6ecH8XNNPB۫»bGtg3y&R,Ϣ랑7(|kpj9I "a)y޳JJezW ` C >}z= }7ve%i>{2PE1l%qF^w͛h}!hme#eJuj+dzخVi9"Yv_:CM efȦPIPcx*eψ8Zg7wu}g'ޢ >a9vԭ.]<|(̑?2a&NFe-`#bSLb|m.p3,_u}ܻW k R,@]cEZZ_9M;. kcI`PX1:r4;ChxY06CFd2Ќ%{S')QU|K.;WXu+QIUwf5܀OYAl+B)"?$RN[tLWC; Uuֱ =G]v8RPO;iHE` ,3dڰtlX\qÀ}oxw1VyNWze59r~)ta$kS\v 8mq<tV:Xcب;/n覞}: %1 B+M/${4l荳48SOh HK8Eb thi9rSoy#H>#1{>;S/bhK3tFY$G}ZxMIqIfϸA`9j'fԋ=xfI$ yIk펙Ik9z{w(8E?+wq]f@e]=ߞ3T^ #4#̨2JFp"Q Pxf6DerZFl՟6G+NVږ4eiCWWdwCG@8< E'5dF#nA!ʣ׷!Sf9JZZū_xz-,WK(([;/30fObxD5NĬLTLjd8`牝_(Y%`Ԡ J듑6w%d.Ar?:%H BD}XJӳ=@NEqw71L:鍡zӼ"-*vXM1.:Vs8dxMjF:&KuF|XrGwqL .K怙^p/Gm8o7sDMf*<,@=*^wu)5r>oұMSۃߔ](ȋ3$[y%" 0>esGzUd$@; BI|2.m5h(R`Di5%bUp}.p-:xW\sg_}]Gqah $i#B#cm8XnhM𮀞d#dfVh1G)_󫭎%PGdFPBdڑz:ᠭٔl[P.[Und^3etOLE:%k)8Pd@W;$b)lb&{%}U62)|uefg"l}xŷcIl(CaGr">wL^ AŔ@\N`N1 y&YWZm ;tvg#a:ʼn 4\"Qa'L դkdc`>^ݝ c[ YW5f 8̈́hj*P3z2* ŗALk乢)`Bx>vbKE'BܤWeFUD56; T- RK ]|XsaZ ؍]]cEINV%݃R+4n9[f "A aG-:a=AA\uyrA'Km&(ǀ\6oڟ:0g\_wz 8pqJƵ=PW.7.z,m뎓'P6-2qVa"X!55xR m]9+ +> ؈{J?/]} {~j ik`8~2TvQK v (2gxE_ꁬo}'g%7d1fP[ȿ~]"nk, 6i ҥ>NiJpl//%j4rSSUGm8W&`wJ:1isdO$AqTb8p8pdɥ&d၆}u nWKdu'썽*tHǫFʨbgT}ИAJџb̻04mTr-X8p0>1qKQf$lkHN@pc/1^e_QI8(<:}ԿMq 0#VpPv'7dS3$]#- N_"#N(F}˒ωKf9qjcqכ˽D? >!( ] zˮ2ݨa*/C<"SxGtw'bBC-d@쭒GuT{g暠OճZўDž{]Q"/4WqhNG- C%`V>m]oCyΪ#>}T 8sE]BMEZ]ǑE+7M/3H u6MaQyG3nx_#8 !<\r\46 o}+ zhޠӻԹG/+?#ǔ ۝隔+s,L,{`Ol+cڳF9/ګSst.QbXP#6b~^c?t5SfG=,s=T9p9gʳ؟m3zZ\dba]Z:a(#s7>3s³ɍ8=vu1Z]zsnƝ[ V)Km!ۺx[n2G׺1w]<ӽc߭`zz#BП_ Rzm9ݑ'縫oZ/Ov?tm`. П:*S_jsr6;\uZ,apOpnO-_O6w?!Kvw ;3+Zugh*bi,&SX.ԹɔʎGF/}qcg;$~>?]ypxQ7@LN<,e߿sS\*t{Mť%B.Z3gvԔ^\][T7>tPvʙ\m-T[Kghqq9卜_zPv‹_Xl};C>M{qC(ﮊ ϋߝ^VMcn+_(kwڛT=;KzH1xNA J~aZ`T/^ɋm3gݥ] w`\\X<}yc_m}[:-yBO.HC/(.}le'ggv@mQT0x^z {rs?ou9=ϛ,6on>w :A?tȦ7_~.l`h?Mȷq:ݞ2bK|rAN6 7OzOۑ/$%ޗz?n>]hԓ[>I{mnޞK3D헏 #_LD9`aoe-~[dg 58 y?ƮǬ{4捡Ohķ~3L.Fn:w߷>=3x ڕ,S|v1]nn=|/Wi,Þhkѧajb5Qk:uc/o:O;(f!P {ڛᖟn|WooᲜO\3ZcӐUY|SSJ2 tǘ䧟Gx:M SԪzG;h-\h>S)L[ޚe`0pzvak$p!AB=rikHYN-SwِUzgMN rjST%&< ֌ґO`/)B̐Z*Wٍ:8X0֥LLKq襣r숭#@Hٳ97!Ք5Q L6\O- B1F8~p8QTbrX)"ʀ`4wAv06̖fFy _tM1C49hlL𫢁8>4X P<^FBmـȶ1+搱&['B !/h .4<W mc,4Yr@U=;S&n) @HAeC aU8r"v2^SN.AێR"S ( |Hi46Thsͣ%kkAj' jt!+ 7 ృRrk`˳_l =u]A=Dt a6Y47(@;'N$MNq8])@K]Ui QSM+hQ w4H&;xRo < b[z uQ*7U5f#J&L-꺤Bn ڜMP^Eՙz[!1m8n/l,B(S[a4$gBŁ{n(azhv+ 8}n @i`M+"?iU 2PA\ɸdj΃$[!BGU De7>cd^CYLIH+Bԃ'( L7, 2qD)j1AһAgM#ry.V.]E%٩66Vu0 4)${Z@࡭bE#4p)(xX898EɂbO":!8;\5KrL'k[GZ 8 h. c P= mi4mFl,1꺝h5A}(ZuuuSWގ@j5<n1/Q)J6Z5(n0)`۾~h1N+L/ںt2u,#ZCAIK4wrpt>lSӏ3ޥ(\.aYyci6%9hg&A-nY:RT *<$4~9nfHqtڸȳ "\=$p3 0ty&c\nG7^$3C" sEQ*P+j $e@df90x :9\dy\v;N02A7n<!?e0gv &R#jՂ퐋z?T3MW0aolC;TR*zc=ʏi @A@]r``=h9Q WJ-MKcG>֖XMK(^! :PM3oؔ}OJq74sۡ|A x◂ʼٌܦ}_tu-OwΗTZhA9 KOPeo6CՋ'lf.qCv; b//mnhgg6~wmIWmC{K.lb4eK,RERMfșUz3Vs٦C4vkM~/x /G*yخז:X>lѮrj/&=M|Lg()O%xyv$|7G|]W2qգurpi9wxAfdmDo/,R{=AmcZsPL(=% UY3:Q%jy6?9O~T+t,X?b0[&$[P6r-oE` F'l'zXGT;(UP. < .F>Z)XZ%V>W;Rwf;Jhx33y]Vb4`unW]z鷧'H/utf1L{Ph Kξ(*)0=tcp IޘL]!c{7w]J|K[%ݒV}UZp4hjn94يeʿt=qg΁bcx#]N4j0`5߻69&I6%n2אC1Iou5>fm:]^vN}Oݑ'~OheVd&azG *-{)rNP L5}6H}'4p1գpy)RꫯͪSm-iazOՖY%==܃]h&+/ 1F:AY*ugs;K]l mvg#1`!0kMRB!+¤ehí=xL4 2ƾ8 6ȠGDoezNZЎ̢l Qۡu˥^^ul~֑`cRZvF@pMA2O: I*J ܛ .hMǴԶN,5P‰wq7LTHUJ]g4fKF7IW0BT2VpPvͿg}2nEԧj'C3/ga9.HrH+{եySL}S"BK Bʀn8zVpvIO ~^vjic/~zlFác4_>!iw uVպ\\.{"|=yZb*Vʜy E<[3J']-=\8lJLLOZl LZy<QoΓ6%!QVI)!9@%1X9U.Z'v?Z L[7.h z*2f~c΀6.=܃]r. J nFS*;8϶_j6x5F4smG1_L]zj ]z(:Fx @]^e UP]kL 4›y`jJFofo N=zC-bOVT"iO!u$mF_A\NgNXǙ/NnB(W1_7:L#-sq?j5:σ}]UzԒ",~}"ؙ=2iUWDEEIT_4S,K<5_+`k1YtrA)W.hwy٥F*vq\@k5g,rQ[>kbvu4+!jlsՍe΄" Ynt󼧏0؛~T8 )b-讅[Wz0QC`tu=O{ˌ PfmS&*  VV<ʻ\n,A_}qTlW5:E9G3 %m!Mz.nzYG0A v]@MD_o%s"قLWlgih&xgPt 18>y.eR(smjLVĦ-[je0l%՞VXWwvOgO!>'SKvbm4#]ihfGHP8**mxA[[3^m)xszt"OyP iIyeNR;6џy|uCJmxmXJTfhwX|tT)h#yaSRua |XQo 90h缀 TkVZGչ;(8n(Ғel6Ls@B0t0:ۮZ(u/跔]J:J6-A20˙R:kGx't0(*!ckUݹ2٪D,^KE"Z IcXIc`6]>' d zY>@%@ u} ZhvTmbZts-;g:۝*m=pkf\cr:jHc0]+5M+MjZCdTr @1mҹ$N ΄ZaB%/imzjoCq&[-!GB85Ʈ^zu'@#oּ*iPgU`P }ñ;e-6fc3ުP)m4,5@]ײ~PqO+ b0\Q!F`]tw wn:N1[&ɭc u[mmfa?3횳2VjܻAҨAymm"9zkܨ?;j8g~Ca ٲ{\w}dkZPr4Yo!}+&C}0(3қ20]._=ZKpi9wxiX@w|%)5 `ÿ>-d90XGݚHgw`} ˖5 %ki fڵ3#Qb,NT?e'ex~J'N<݉gLh=T>9̣TCފ:|D~Q1}6W+tY&JAg=e=bkOTT'ϯf'`!) 2U|%ʑ 㢭D3 Q1HǙN∡|:Z:FZYp.õޓ~{?Cao~7#y)0J+~tN-3`-\*s^]yҿ-ǓrZf9ϫ._}SKx77yz1F.3`89Pm.xw~ ]taμ@]\Fŋ&(H?|ыɍn~4Gy/%~G*q{"24lsst~ C-2jd:a}hjuX3npE/j696+yaNlsi8/'h ztY-Ahs$j%P2woԶKUМ9VJSjW~_V_+C"~l87_<% b8gص̷.{_]D}yAc-wR1&⿱D|4\\*i+YoK&T5Vv*K5ՔQZ'l!ܪ'^TaUVkI(o:FƔ΃;=ce +pIfSGR;%}HI+NJ9d%eZ(?I\CKc~k [/-_ػ8#+bJT3 aw3"<"ɶVSп;ovy&] HՑGdċȗ0FGO̩%P-(4@ttOi h4NH缍:يtq.I}Cy6:?Pjr?tO@k N(a?/v h4gMZm'X<+«߾}ܛE7>nt?lFL>ȇnи(8 90٫Ï_ eS ''!d&,Փ\ݬe\ k2U:a<h ;YعvD` KŨ4gJ@cd^@"RGG%:R}lT:$bd&4Z":AZf4,JT` (-AШlSP:ǣ(_60 $ÁI3V$mAʘ$eJNTrDR, ]kHR%"@OdV Ѐ*D5p}'O":@y>L:G>YK3(,v񌂡L) a]'s}"RăIy˘NF̩U9K1",ZE8uGŇ*G  _{~_-{@ϋ=:rɊУ+6?`  g!YnEI"U!zT!A%(Gm N螫*8^5Fsڃ-'C ȹ@V\;G`:YuV?|맬jɣh6ڤJ.?ȓϧOviqxxLiC9KqK$ٟ4GLxt0GrJ$ԝᬨ 4 B,>7yi҄e6̾WW:n>Ik˦Fv`y-O5m"q /_䅒%j"-G)TS"D֧-üMU^]YMX !&KT4%x1t?Fgê%[MK^P$HZJ'Vz&: 1#MP$Fp8"]$Hb㋳YFnK&^2<򛗑nI)b~Z_<^`_sBy5< ֔UTI%s!Zjd-HTwk^Sև|s ;'\]jq^ :N$ր&J9Cl\$֛!` mg6ڸ5^du)b0X;?0Z{(>,.ͮ%Z-X~j9̽q蛱0<]7\ok15Yk+nj;1}r@uyKB6u6#OG #A}w<.9D AI>qE ?np.gN/|&ל  Vfo=_@~6!4.J6nh&.X: \":l@lr gSئlO8y$pBVt_,v饆,aQ'1b1t#!-;HK|ZzXsR\1y3WJ+kOKblg#E_PjEw@KH><-sˀ ӡ  ?ʆ= -TFq]@ !)MAiDAYp&O 9@{&(8DJ%S TH\QA}4>jip .Y[Ni:L:.^LV^ڞ 4+fAHüjS.R1F0 &v%8 >%]CPpTAR:Dk͵mg]Vt͸>n#ndk͆wX\>c|Dm6ˈ"А9:Q'ɂvk'AOF (J zE9ZŌ3칪)؜q< }iYgG\dQ3פFRJk@Ԩ|:7"7"7"7+q䔷 &@|@*Gas+̀RddM_sN7B4XS4Dt3I[v[]%UCNOFCA.kTmglrK0x'nRi*seІjJ] uY^;B| x>} y>}Jh)Y8O5qjpR H%@\?Ч_'Q_miˣ_'5df:dDB Aa^s-eDX֕yZ)"JE䂐h_6MhWKMz,ٖsORENٰAot%c['Lv:ei~& Z!rDee * 9[-Skv襵@8ݛēsM1?-|x]uR] +P_(pqo(g솣ŏ*S-(RϮ~=c <"jpVk7o_z\~~4_!urDz&~"X58| | 8ywNjq/]#WfƘe.RT9BhXcŠ~¯F٩o~LUzACzDMQR'zɽ$i&2QTE,Y&'6V-Q.>8Fz`yct m5Ly|Ap=?(>[3ad͘B*lmTsM,kzܤ|Z2Qj7CjZ%"Ltc/(-DUʌCtib#HB]$dH2q` V>Eml^G48*MQ C74J Ŏs` . nG5< ] tr?/btCPଖ&dmlk&*-,N15I'46 AJ8&V2` V--zL'J>8RʕiV@?0,|ցS]Ggqq5߇(vi1ƮGӬtHi}eeF7'Wa3c48F"hʯRAĶ-KtDDcx){3dox`+--GXBdJ1X_¼&luȇj& PϜ28+cA^f{lϸt㉠;0=C`4O$.Bf"R'yZ@I2RK!9-E(qG:2Ǫ8=_FK:ҡ(dU6ԁ'#h*q;E7(}@peTeury$In Jp֕+Qᤳ2. b *6eg\hI+S4cS+u2 L O"tdVy( v;t~d\^8j}K`'F)g@;6S6ŌS!|W8b)LO2Ksi9}Շa 3NâY~n@k>HVXlޖPz[+N )I\9V,UV s݊;_0V6ZZqTˊt?.Q{EXJ.Fzj`؛ήt]CT:4*#R-{hDv:X]m,7M|\4,[q;;JeOw7RY!(^ɹ'l7 ˿*詨^7,(TY.h!\ -VR[nʗTIN_2,2u~Qm5;@-"XSZu)ÿ%e`"zu^NB~~|`ijH .GC,xEUXd#hBbޔDhRm}ڳ_ZM3cbK *ds)hdPK$Ѹ<9Ĝ<^yby޵zn9(x!*p:RњV!DT;$5xMBD90ƃOL?p\0*wS:3%Xaȭ8eErޅ]J38IIeZ;wxCHZ%X2$TBD3Tu}?m?_e{4sj=&SLA),dhB< $ ײ;Iȹ4 ٬$]Ƽ|:Rk.?ݕYRХm~ dPQ%4$AΩKp#> {Q)KueXn|alw:=ᄫ*2jTҤ;oo[7|Fa뼹?r5U 5~ huFGI@,c(ǴN&-4FirIn$ݝ"y˝"oÉh J2DeDȚ;ITB@NqnLbVPI0WJv{y0#Cw>|!>%5оW\ sUe;a`!!K #Pf:M̐aIs3J&74tY!v>AK-;_#} _ґRY|.|, o\z 8g%@F`NRzOvT\O^Bړ_~PXbaaG{;`VI;7R=᳿= שYejb^%-):rU-]%x0Z\[BUb*T+ f8*T"E+UقEަqڔi}Ի@l -x­vOęQ7'.,ʺDdD6 Ӑ+^ytV޶z+bZYp J]|6f2ʇndQf*Ht0&`?UĂЄJ '6wYXB(]0wѭُeFmFE"NӜtB!C\e &&/YD^MVIɳpBD뼍֢f֢C=#Qa ]u7 ]$\ OO*|^ӯH NΙX"ZfQxp?h~÷Wk2]GR"fkk(m͢@yz=ȋݡnj _p7<ԴPPECW~gSPܘ"-XDMc/w 2\J-5d}C ǖcv0꽋@-Jh vr??eJ}ȽZ"Rb1f aFUBUN)Lf:ЎP:m69vAКFtq6pZڥX/ґJ ~E h RK\$i4Tiک 2~{' j!@~Zuk/"8}Fm<":'x(>Iț-AR^3O2=\ּ.5W"^2->݌c9s*7z䤶̚GT!kc5J#oG>RCM]^%=*bVdٍ8VWUʢi Iz̚rVKŔfUrҴm8cM#ިq(b`m{[i#}7x% X+bq덵UMmICĩʦŚ.,3HBŝmUSgoNJacMc \ \)Y^JkZi%V /K).\FO";k^-G'r3rM'FKU}%*zYeg旑0Zq \(C4hοkZ/꾝7KIKЇ:z/8R-u|s@b@YQIDWwݍeycF*C $6k}fy}cg.PE4 ߔeXҪ:q ~ . +4L!P'څC3L?Lr?Rm)P {tZfoĉKkF%l"mx;@aW!yy)T@Y͋ u֜)bASgoNf0ۖC Fm5/Sh`7oiUű[%yns/^Kd\kiLg)Fy!ik ִ<.F6wܽ9^MT R%.{/X3~SaxV p˝G^ԼHH)L GZE'F!(q/]%%Vl=Ԛ:}e.pap_Bqں7ײE~ۗt\F\#ETUV J kSakBQpf(ן?_zft 8Y?VtX=?0tHr,^/t>]rzp՚:MyP%rBPv9#3[ytXlA]sWCgp.R{Gϲ|ڐvtU޸N_iؚ 5]נztga؋;DarS 3%Vޠ|Cȷ&0bJ; rvzh0 sRDZȍ&An9K? W/o[!Zo-dIlT[xK%[.avƶHrτqfn)JaryÞH]H >YIh]h<c=I]@::mO@*IšTM#>y}?5}Z3/9TPZVXÖbkqAgj=C0TIH.BZ%RhnV`j,H_0k׭]PeA7{govi^J| PaPܝ @B5@&S\c_ icy}?.tO`ad}|y58gJFxH|&`DXed̖vfZ+Ӯ0#ʺq0kUwEjK߶lG\z@9D=%Qt=0GuF\奣̦֢6}ZM|S=twY|5)**JA86uEMb-gWt'tf9\oZ`ˆ$2- F@2S2!\'(4Z ''ßy5=L=~hZS[^7p=) "19))pNYYJe^*:#"Xyhm_W L v,ZSCBȑH98H$dfy.oix |K, ! k:#;~ n1\15Wr56RrVrr\SaSD=}8).vWɠ@z9Rwsx䭮[NywsyxVfʄVvh݈ep`+ϡW-()bfqw] wѕ10gu!J6K.q{+ur-fo >/7<TجQ4 f\Va`03yQj^Lۋn= bSޏ1N.0 ş2cH t1QH@q Řa}FTӑ^u?ؘ]B1*߃lg w%44x%Ui6:F joŠr`{"s K*RI~b#|@nI.r:|z J"C9{$ j(L\X6rV#!Xd{ |)cq h{w)$$񜨽\1VCSeT{ܴ;/cۃ! x7F>Ϭ];?}/aJ;靈j{2Cc89ƹ{H 9w^Ny"A|߹ۡxix !vN{3ZGC*ۧMm>REӃjCe )[0;Ubx7Έq#UʎܨzV.6I۩^eO鳈;P%:Ѽ@ %N+ pHfHMg=̀pa))b$Zv4h޸xiFds.1Z6̓t9h;Lum<m%<{o6CX蝩6 ;aW{!}w5xgd'OvL6#)h\ ~wp!\J@h=Id_}3?֥c9՘xe!)L癣Fp%3d{=`Op, 4rŇ z-oW&}?R4eV6F(eo4dE"1L4F{Ae)%dyXW+c-!|L?XnX|OE M50(%wvhR10)#oy7~٣:)y.K(aB I >\;wZW"- !/|,1N :OI).׷ja8f*y~V'F_?j+&dZ.&h> 4ȊV=َ1*lZ)t o]Wt-4v[£M Xn-0g ƹӃlO^gu dxQ}qX%'1 }GD=N<ڄWIn~ d wdy׉<8pdL 8]cn8! 0pNy?[ "H.Jc16 q7f `l[re_>/]1^s3`~E~cdy+*[Sozߌry&A^uw{ՔoHv&~u,#-T ,ju2?VQqю2[ڤ') GqNMX.q61ľ}<#FhkI~޹VSQk@۴1m@9)!rl=v^@:@]sذ ؚ\1r& LZn`Mt8Lڸ ' ƆoO Qo8tx݊`:D<E:# Z^/t4*ߧevzyRAF=I3gVː(S\b:3dᔪFK9G):G'/ ' D+H|, tn+o?/T9# ȄXVe_x cK`rqP0Dl`^UaArj?$IYwܙ{7W3|HdGqCJ8<!%Fʐ.A`"R[:kVqtQadW:49$}?@5zϯ|i EN,C>CZR O%%<: x$ȍx &Lpk}Nv|X]c$>m4ъILJ2n#0CN\j.јG DRE3q[c#뮈=-!HT\S<+S+a8v[rPQ߳K?B>5nuXAqXk䈥q6:'"X2tz59O%'"ia !JԘ03ʉ&w+WguU*i!%3OoD6uǸJ~Rl?V EMϊE!EoL'>d;ysL[r}~{30a_^NAߟ%A5=QN.ahbF(4rʾQ8{Ϊ5hǺqoVۣIw24:UK6a )I JDD \+V,-I3ܬ!X"_dg܄6eV(bmaC!sbWEbڧ6ʱ-aR<׮0ȣ&e:s{R?T*  SEF5F՟e+@\ks0wkcN6R'oWsͦ' 1w>"`kغp⪱xZ,b,VFc`VH[;M?51*.P2`jbרDJZMn9F(&j>Jo|qÖo^\=^8?MWo&zӛVjo~z~Y~rw?(Ti# I*ɍ-Jf\_zTߘ"_̯MY>kV4k?~lM?g+eDbǙPMy#e]_nGy+M7|*gT-Wj,p}f?Xvᗄb?{!NLJŝv.IEo~._P7Z_Ʝu2,C93pj uNidJ32HLP D,SVmBOZ$6'@bV~+(HN PT0s8GD,͡"9J}N4BΕyȧ/Әȯcޮ˚>wL5XպɢZ˕aG5;S~*.Mf3#l ~a}\, #CclT3D !"cqi+ ?"P*JEAh9'&B}!2CA'? ؘզy- XS{R!=_,{m/U˕j]Z \4_ceDn'4fu8麰' Fl~4Rˉv{mf313^XF_3&v-ŶQ>`%@ ipfia}BmAsr,Ol‚J ֤HiUQ_E'p.FSSi9y^]}vO Eq)4󙃄j*{M:8DB+f$ UH)0@)Ւ* fhE ~@YQRܭga*UyM1XS01P A rIL1g`ʱE!*|*]/v/IK/9^bYjV)dueDtiOϛJڊ\gfmpoNߔM62pͥZo+1g^+lli_ Yw258?[jRM@3v?д,:F>MןネDΟ}qE|3Je /F-=Ѐ^nf2/&~߽O_ֻ<'%b?O?+8) l)Ny*Jo?7S^WqzrQ92w7+sW"r% }-$`weN;)nolySS6hW:Փ7.fZɌ9#ژc Ԅef#*OsvW ̵*$Sp S'`添i^zK [M֛bv^ofzz0.7ԱL7,~k<}fWI94fke81 JUuv-U9)$$մ.cT@yRА@Kg .%N}=X8d ];CB $RKWbF`ꆎ8 kD?{o0+V?(#vcac7 *cir]5%(A% jHS YxrgLKUcJqwNA)8%wmZ~ k]{~-n_.8C2V"ˎ$q/%Jp$') WÇ3ĭA;b-߾8KPe}iVX5ȡ[.RbqGs"d Z +%)='EQ8 ZLGÃnJ }'PXU{(QutIP"C9x)t(+doE\Lvɼv.zrg~K^Z.?l7y g9D ;nuY -fXF%#JA],с!ǞX=ѥ͗KFsȳ9'Ǹ,gFEs|E1+')9j Hkz$eztYXj5*@(2>@![4^@s2\YF3 /3TV ΆO]8#rNXJZw.<ƦJu*ǃ=4@tpu]TNJ.Mֈduj s06jr??pHYr^v.=gL#U+F땉7+LFM i#k!!56u+[w XXB/}Hp)w=4]K/!v q00Q+ t`lIG>dYZzJG9p&(49Bhr's{0rVM :ꞍkSe7pfPa 1vk{~hd[eS)"~{{ۼL}[ 0]̧-"D 'Qu4XFK)F#t9I/'u^=+1$ ;ߙ}j,NvȦLy.$UctPC1 Bϑ ˑ[֞ҍy McqDhq#Y'h_  |& ,L77 kd J+9^`ٶ;*nf3%<%8[g0&LjɎtx'caWh'ie A CQ64lH,Rꨭߏ[Xڞm.x&FR#Hk(vuSٷ=US:;KgFthuC9h*ͨ𽳩M#|yb'MQ<K&)'wfjsIGJ'W ,ҮN0jrClVƠ ڎ-y)AXRKJ!+2j4=|linF ]r(jYO+ҺL}A&vvXLA3%JRu޶gX>m?B9 pR SRVh&l[Lnza2C7C@eEb@i 2|؆A#rhq{5G8slW{y-+I_]lOD839tA)Q" sRµO!vP2 vnkU#i~ŴRfUQvB8M{PEyVM:fV4! b?))ߎҦ_+,5qnUL6^]~vE5xtVVZo4y}V1iz׶$ !{i/0 e8Sl aF&îH8 6ڃX+2hM:f?0% )FaJj4#gj ͧR2ӯ^[v)٘74406ѿ F$b;l"4|# \Zў)B%C(hŠwω.b62CNuczfU8!aYXPa!r0*Ke{hڝ-?_x69W@.u͆d̸&ZK"S1,1b`EkSLݪvJ\ H_]J<-fh}ۋ;p)kwqn8@t{첼^?߿/m_՟O\ &h$@ヌ7`$`Os5fqݪAMjP_-|C[oӻ_+3#HŚ/VTvypߗ ky7&7a6V]%ߪϷWw;]\ k)] L< BKZ}>_wsef%[r֦Pgg_MhNj<1>ؚ X >?X//Whњ}sYQ9N+ݖ~}[J;Vң/oo?]%O Up˧U^.W0G Me0./Fh9]G \t*Rl]\/wle 26̰lyQ2czO-0p *'AXz40X%mփ06l%aHo~o߄bWH i8mnûG Dگ]g7fm'"I#ύ-<vom@ȲU:#, Jc@SN݁ԑ-07[-ʰ#1KōC~s$89UmsK-88fRKx9Xh"E] iCcZ^ZρmAò B2 ǖ?HYhjH9 `YN$껶Q/xb s/NDN/*E ,bF'ic\#ZjnvA4Ӟ!n 5|"e\>dB=d ).͠Aʇk*UaKy-(_Ʃbhݕ}V V(h.D!%rŢBx^Ge:!+H0ċ ΋y%u J]ţT!(`>LmYw{|qw['sq/T罾:'I CtQp]]O|S-`=4Zܞ N(㣚ӖΧPv-i=1xԓ="Iӄd`cH<FRvv@>l"![s 5c'7ꌈ(a/bD3cO숂5d:? AHtYY0N8 dԆt! )BkVda58?Sw(!>rJGM5DV0IQpq{]N ãpM|4ee{_2^!ZZqM"*|!Y5d\ dP*Y BKZbQgZƑb4ۅi w[|dN(i_QeMit?& KzX$8SRVe ,42s `o"WTD#<_K?&N,LnaB.7CEYeUnFwu7rtxy㺾TsMWތj70㏛_l_*#h^|^|O)rV 'ͣW5d I6TGXh9qMWQ}d6C􀑽+ [s# |RD,EU HJXRalNܡbB?K c<jO3r  i^YkU.癵gN+ (JxV w eX|oWF^o Hȉ6JVZ1ܢPZ y%K qmV~47F'R_UĮH[%oCh\ۣ]nrZ"yIHSJ6bevmO(Z s#˰A\f(ϙ.ܐ*r[[am mm, #*c$?ܴ*Z[Sko"B.u* ogsQ{ d"e%L!UxYqR2gVM vaUAk˭+gR]]g8W5vr`}8X`x݁E*jV[g_pm-*̤E XN,Xuj,iVJ׼WhyM(Az'ց~ d8ՠ(p? gAy)<` Ҕx.=c'w:!*,Mz=֤8YTOuv!VcȵykD%-Dvl(7\VgIZy|JXb:%[Tiktm$>rmO$.~DI[?8K i(Q Ec\d}I.itt"{T 8}1bzX%Ky}bkO;/y)fxʼ,~"mJ$xnRu5>T껸 ? s6fnݭyĀ(KBظϐ!"O#6>#ɰTr(?[oyµȭ !^+L_dVi(19L oZ9z)H#mn]aSF-5"s&19wXPOV;nnu9@9& z_YA@i*J.9 j6q2׿ "\.;nc5QiE6)Q$64i&.w]Žy]4U(aiN=/UWecasUMq}HOAqYrN}P]#5:KRr>CCDBTp2ɉPvkQaIݢ°  ׽TzQ).٫x&|,00yJ :4J @RT[A2 [f67uh 9fOdAn9QHtZYDuhuw8GyMNyNy.SpT*YeyCQgUk,o˝n1l(59Wr)NVfZRGi ҌreVUFbJ=hpCcogշVQ>~-yv^'AQ,lbB70|o5)e:vQpZ€jU `ʢ0%i+A1uhi4)UBi"4D61}Y~}GC ,P1G"E%&XXA6MYCp{{%BG׹TIs܍?A2'+ll D B4٭vZphn}h(WF:!Is #.vxE5'$yp`Es[nq3~Z 3,Q{7$ . ;qW-QW#qaBIV}m~[},40Qe߭co`vr,v)H kI( W. iεt|QELJ SH ReѬ LVBw~+嚂}w{]yD2 d s`@71tycYs=y-0d.Uą wR3IQêxM wkc HX^䀥A+;~h^u ~!4jРt. ?TM|<7)ZeA":B-0tXI픠dgӞ 4\ T33\$gM=_է;nu] 9ugT(ė ?smOS9lN4DꌸOOE LF)wZ GaWsَIlޓ8V[ C/&Tkf{ RwioÙ,jX[sJ5 JOSA~;waC̢v"6}=3Ԁ[h=];a>e< (9|͆0|lK$$'[f r*\OjS3o ɴZX'Ic/!G)^{-g/>'7t>]z.B(|.pꮍu>0ꄐuW{6`aM:Cc^Inɯ=/x8q*Wqh,>dH<'Df|qӭZ.A+LNJw{s$Js.K E]42t4;dX[ w1 o,䈻 l=v'  c s'iT?Xo@_58^ÉӷUn5=dYn3zC'ݿg!CDT17ʫz$;CtIݭn57j"Y5#d Δ"^jk#M_ Y<:*oBd slqh?M ma5W|V@jjdou&9;J}]m-|t{S o~=!گOܨ\,>\T7ʎ#zuܱUIQ~;zH,W=-mX.&Wo7eCyǷ]&r\S9| M'ez/w g;.Χc/Clf0;/ibs-.˓glty6?OVcpk 7F*0idZ~j^[g|L'w^2cko~+*W/&1f\hݏ -ws{ =F9\3*YN&Db.RUVҩa`9, ?u/d˗;ravʯPb+XVd _R8.GLnEr4[Ght8j`lrTc0ɵ߄g<)l1ZCaϺl ԯlaZ:4w-6[գY RhM$b[RҖ6ϴ2Fr+Da4U/=v{/L>RlqEsocǻ̲T0«-Ptܗx><=3r+ 1XGoƍgnW 4^nW#WFqv Bv. xTFNF_6WFcge89FQuLVo9|10e@?.#xk4p=Wy7Y֙-E@ʵD"+)%`iS %Lt#N|b 6je@/qq~@7:n+-za?ݫE@il?Y`%]J@UE%YfI1ʬ{l3*8Ü"d:dypZN8h/1Cݠ 1===!;'#lƮ_?H:Kk^ߑKL {rwCv5 q p2k#c̯@] \OB0l4+GYc8$+ishE>aMMV'I 4lbKrWØia.xZ+A 03PZ @JTP9=ЯԊcp~ 0 . @orj)JMrղcq۝ϑJq#+Bp_a ]d\f:9VF=l)dɦԤ1 H ǫ"D%'n'jͿu~W>4E']ݩjRg\AY"3,^eBLd}tq yz/M8W298%7#G1RDɨİFx!>gCY.A*o}J|H=Y_kԠ'鱶59^ci6($ѽ!lpP5 yΈ1M'& *rA ]  ,Dmv'1,,lFa+1?(q<yX"hcTTe2[#;L $0׉ā[)y}K"[x~sQ׊7mo˓ٹ^Uvt%X :sf!L,Yod8C_,⾈:{6nTҳVUۓhc|_ FS@ש:׼?onUҜ8VQx1 syWu4F3?Mn?ejl}vtSƷ|[ P]WKڳg3?pv___ogW]}V/G~zsGm8 syU ]C$R c\D0C ,{bM R&cLne}dkZ+dY% B"=$?Oz?dx^-6W^kaB9_17w4,hYVKd'q]u#YBz.J>R&_8r*,*#$Ӆ`EooیB ݹp ^+spq)eECM7~䙆fӠ2xY.(n4Drc3;+W=XJIOk ](A,`"q4"dUuƉgL`=HcіKB%a?g? 厌#՗O,.")t@]̛݉hjMHX0L4F0Eȹxufigw<'y@| 2r|ivu)E46-✩nq Iсg86[wSDT_I" Ń7lbӏs *B@08ds0mTJ9pg Qse]A808My^dt}Br7q_k IMQz =uc'eUݠyn5m"4CtqѱpSbU  ) A2;7)~"Q3kԼ'IDT 9) +sԲ<V\)%] צq}BW{M34^-6^TȞƋ6 xKm C?hcYceC-Kgoo #q%dy '% фC!ZYvx?IQ퉈(<1,x#3ED30$myO"6R#DQJy"L0Vb3w2 1DPZD͓@FqXLEђ Kh{`ojVԷ"jVF)2`uwG X KmQP 9 nyM)xʁcRL"c*n."x:!nUTۘ `6E&DoMDf ̘-S 9 zf`T16"&URDH,8%:NRz0̼ })m:,y@0Q%"_|$EfJU]f瞞ݨ'h1ħ i v8$G[iAXE]i^2u;2@Vw̅И?8f28@*p_dWy}6. aPrE(V hE<7(f{ƳնMM?Xu4Ȕg7$@ŢhY`9 XFwTd29>qS@EEP=N=\6-Yt0AF\df9LL9\vUXb2b0!CHD%Q!%%R `q;u%rZ)`?uMptJfR^|]Ņ͕8Tmnx.YIrW5EZ|9kɞ/pC;áK:y£y;9T%hb3;Wgރ>ַݾңΙᄒYGG֟|t?^\naX+,Ş7{=$AYOjYYYlLUlYKg;#|]u/-Z˂6Rȱ{-aᱏ&j>zgIAKÒ45uW{'23)q\f>Cas,~Ui<AOo'<~^ʧ8\ep]>$>h++꓂ԿӦ0d,6d\yWK}7^\<~iI!~+c%͋мAwun⻳/ၫgMA|vkogEL2.Z%JF7|ɖ ?ooj.[~/FPÿ(pv=|d޼5N_GY G_6CKf Ԍx;\@aEt1ێCM~Y1>Ekh m*uwsKd$ bSMD# %=Sd),~8~vG);ͩ ^Kc|t& {P⌳N3a 8҂ qA3pv׸@. 0S\3"57tc 2HXd:L:i Gp3>zyq Q|[_{g<<  ]KmmK2>ȧ9{'7xo#'[U#1 q?Ѓ =p)OYz)}ϛTXwr]LfW][t~v=(,F bfvDL?_A)xyYVTwFqb[1*}68YYcv2Ffk<{ertsb{OP<)稏)۴ Xo xUn3 GCp9X'3$M4}'N1pRДlC8S 2[&/]̀2^eHcu2o/Fmm }"zGWԼܡ w"qMi s AHC2X8 ox5Z-P.SH! S>0}”\ήG ڽ!GהE-@wlz8;+9 a{)_o[}_H(9}H#"ĩ+׆EX3>NG|ϢZ%P4L),3Yvu쇴皳ɹ5?n݄s==~i΃4I0!L/NѪ3'/o؆Ŗ+4RU5QD fs]V:Nj[#vlGn1HAՈ^܏a{K"Ihm98l:ӛ&[oq^e2` O/8չ/R}t1+# IhǨ2(9 YYjߏ,Rgg<Ҳ4q f>yHbl^3 $S8S iX}UKS|4F+L`-z"d0%un-AؒZ.i-Qլ۳?\#|g8vz92=:54[aUW Y?);wbbͧD#ΊKY򑝒{xFy4X1bSZ4?iO?i7@<=xw7`Ru=pr;Pvm?-9g_/[k?f\,~`ac݃bQ 0ADF|%㭏Cwbv)O>e>f ȤFK;=}?ee^#~; ]),zo ;r$ZcLm|YB Cll}; ?e~SdN?ס@PFW!{4ϯ(\Ȇ˰ IBPEP$p'iæ4Ul`mj9[ "wF dxڜ,^eMTڎH7AM=8׆`y^BB S{#c-W)Ufj͘X-' ]lwh1Ba.E] jE8G2NYTmY۸2qţf)he:j50ZmGGc\;%UË/8 lT*Ybd%Szˆ-:,R!qPY$ Fꠦ[v=pKvW]acKڨm=TG!K&XuD-U @%DY&CDV:Xu2=j8cܥwq~=N[=7a 2Rޕ`?rsBe*eOY{rEE \aEAdJGe#0ƨv-bio|a>7}Hk[c%1x3 08S1Qb^F`vU+=r&~k8N@" GHa,e,Kpv^O.Q҂CԠ(TR;wMt*VUi"iH9MBeh fщե3*{}.)>,z Hak`M>KVcIB-41FIIHC5oU*Ⱥc.5&>صF?TcU ."d]{)H )wl4Rӥ1C_jCJG ?l/q(`cHޘhG ֖q]ƳISƧ yގ)si{ve0zE~] 7a_o姿5}Q;W[M_MZ+ :o?Dfce,إQhύ"ؾp?zna&?fxWNoO/.ګUr/c- ] :[ {p̫_>>]sɊj?=X|ιp9/b?{FA3a@w;`&e6OK^d9"[Rw۱lYE#cb~U,> K ֠{gq 9'.d˻g/z#t3])9m_V&c;vQa۶9O`}uNV|$UAjTr8E]tGVa\2DF2)v4ofRr4h%24/2 |ۿoPfiΆk?UVM4]v1y:/)Û(J=1ƋƉ=^>Jv>27y#XSNt! \MRm~S7qy|eQk#0P^cw*e'Ɂ*nsDe;1 (~RDd6l\:ͮTBFZZzQx!Kra.o.!Љ ̌A3_̳5J U2  FE(bI!|{eɀ ,#Hg7Fmr>D0PO=o(֕%}_6 Ds]LC4).G_3'׏9R zfrh~te]GC{1Me# \%Z YͽH 8KAStŏ_R]P怠SMr.f`"D p iΘ-\hr}&?dxmF!>pΗQdWW, ‹=Z]EZ3Nn8Dijy4zbtHS&OV"ESA0c Vjx}p`QM&Z e*XH,Dv<.ƚA`*|>>88 i ,gQ}څԡ}Ӧ٭ /"RekUXC+1E9É~~׹)z"yCk~FY;)a!ku.arUgZYvX;z2"FH|/D٥k)ЇT)yNGg=86Ԛ#v h0HcÍ|靥Rz ){R twɮN8)Q _4/ݼ,/s4!| c ň{̑"g$ >Z 㓧<(zHF $w`In\PJ;K.yS4 &55s܇(01glA8-F-=yíCKdiA~)؟ceJJj]JM9ʺcN'ǻQNM#Ψ*bFj"e'n-3 n i+v3"j)1`PW"DBM!5wU$q."yá$,2.[t Vb,2@ؒs$Zg%HɞuW3;# I0O"!Ogt!&śǰ,Fw4Es{8۔.21G٣]b&duL q/.XU= fL 8FZ&@'߉ }Hs^ 3P?tyD E#}`f#-m)LtWom8 vt9k(S0ˢIv[B8yT!ʻu /?RQD*wbI)1N&""Ep\/zLBejd)<,E)}kԡA1k5/$u| ID%Fu9v bp}QD6kc} I-ԭ=tɼx7q\Ǡ`Ȼ i(P_qt4n1&hʤDB c{ooS6 #KH~E'NUk,TlcM)m Ns\|R X:n]?Ynh>30W\>_. `C2^.X9{{ZpO"x#LgF)1}T0[=G2 l Czx95"w Wt9ÐI@0 i%`mdEpX*l,ŝ>. A6'DY+F #)1@OE"e765T$iΒR0BUsHxW%RixQ!i8rVJu$@JFm%w= ut0cA;ApWled,בgs&&s#ȅ5 h 8+ \}n]8ek{͊Z@0R.yU5*B@0`g0A" yJg;vv;lR`!SF]cb-.KLvxf; Y0^ p" F,`"q4"oW*کӝ{YB(81- ~ I'S,tQV~׆ mE {3x wLhco~w=i|?]7mi/_ڥAFzuy]$,c~jxy70gYԿ{ޫj֫' W؃gW˗yzĆѕ{z 7OG8 h\6otE c eɇ.7M>e7;hG^sxӛ9t@p_ƾf_7U[1aiAK }NJ1I:oK \4aI+tu?Uzs L<}R5qtId~K!?*n5YX%lɋ-1zl V"gڔvדԆ+5wkX"J_<"i1PJiC0h,#HHi?\ !oP<4 YՐ3d쟭 .Aggw6G^jJ0Jl.r1aacJ1ָl/nlE8Ɓ/y-ziCvŃjf Gi, !mSo?*%hd7s;YTwހ<f7Yn9ӿw/)GicowYXWʐoAm<ӎFn{U^l& \='˛a{=],[^`G΍,dwgz|I"7-ڭ#;L7MhGyRo?Ά@M(8W7q[ŠRxouGe,{~Ǧ@n!*` 1pB1Ҝ) ZgdyAQI3uPjeDaruR1T1c 6 !FMQ@JMpcy\r2U8eᔯS2q㾃(1Dh3CTSbaI;hsK"VË1L%$(JQT" t5O./grub=ݜn$v7"?cfAd]L^S-tY(`dE; 4Sk2S0_Oqj|ի]4Y fSWX4 ӹ=H3RX'ɻ89|@kq6bfiT((OW2uVXmdB*4ݏggg4WXW"\?_㣟ˆ/4\mTD-?`ٶn0~C|ԎL̬t /]\̲)X)AgTߋjGξGDz k)0PAI[[(U 1;x^M=wykkW2'BY FSn[z.y̾| S"&=9gGS{Axg995oG kdxRKNn+`y 2ygăP>its^ 9 bF5JFJZ  "P\s^ V9Z)o?=?RM%b̊%iCM/L`][[I{%N&y?Zx LZ)8uvƅ DSxgB6 !zmgx8^ڻ7kEe[emtr*@VKdӤ1$ke!H$&LISe:Z/ee׾'':ӦDD5T"5)$v򱋒\@&EГ TQ wP׊uPڟP~i?Եz>@Es<I[l| I,2/b]eH}m}+[0hUױu:q[s.;ۤ43 xڮ?'xB;롳(3-jz'Gъ AdPX0TFh n]Ѹj'VEYrOitb;"ZVYZ0RBhwo_.Ɩyfz[sΩ~6 S=~]>C.d'8 +veD(R9q b rZf5(Z&KDn1Sr[ CνWOǡ~'CMObDl@bJ}izkYzFnp3:?c -FF>pf橄<%@?Y2|,&W.K\e*Ǔ(^ H.PdMF c*^[mٷ.=¼E]~@rǵ:Vq}Ne; ojT[!| Ŧbl.l$5O>'>qj1*\ -Qr<aO-PP-KxzvKwB8'T¹^BŐm,`8Cdfq\9B% 5\YiVI!Y.\p"E9'-;Z,OƇ|͒JkיFu,il}gl o&Kb WD/ :[LA˶Hbko]%UD!^bt{e`5tZOLԅfIx>_Ըn]B몢t}Vzt5&-[<3a(釟7_j?-~ۇpi`۫x8kOW?_]K<*4W<3P:5-Cp?WIQW˜pS x)]0Zd~s \pDſc4ݧa-]?ȽU!tl1N c]{&ZP.z?m䝊Czvɩן~RP2|VaCnQD@Ώ >a}[b> 㝽iQFPze|RlH%P] e]ͪWƚđ- h0lwk9ǐFMˏ,!-G ')O°E1cV11!:v7*I#-AÚDPƛ!ӷ7t!d MlLM}ۓtO_21͏ثllu71w O)Ѷ JH§kͯuZ0YfZI\CSmM|/;t7J6lF Ch Wm6=+͡p0`Z\ͲbM{%Wxe I~s~|" ~9uxW|T̖Ci3M+T>lxR֥5XS4 i (bMASY遮\?y Efi;;)+)s]ii;6-WijQRLKs:1~>&uV莬䃶R=_5ycz 6X,N]hrg|`Pcq15Y,-륽9$oI}Qrlm$ {3@cJ6G|tex{ӄ!hVru_צ0JמZ튨E`hlXM|ik|z i 'luY<+<ɼӞF؉ t=q(@м9,h)ޔb5uG[r]>#|ؗOݴiT ljNt6Mgkr0Z(Fh1eJl0)k(hw54 "%G/++ylEQ'V4mBUv(!֠V> 4U,+lJJ9Ѓ".MɃUJHլjk\\hUZrNI`+ f!`b N^Mk`>XQ'If/sBd^^g۸NcP]_dX-oF=N'k[Ia'$# ědV"Va]o\rWB?hhÇ-{q~q| #%򮲻c_jwhΑdFd)J>|b+ $5*tmz1A@P V% WRj#yCvŞl"~ȱC/fώQlII'LtRBF$ 5bO.IжQ/ȥ11>K3ZR^E})qTm}+F+adJ>=/fպJ*wZ;& ܖ,ŽbxiTu\zEqN>aV_q{q/k#DFi@1 4 )US %>mSnc? csh'H xG0fdNzCԌDPޱ0^]07 E.SzAY9.lGytm6V{95=1Joņ+,DI QDMQԒy~jL0r*Vv yZ TiM_2~kZQo pԻ(YJ $Џʌz)[%TZka2vTѩT} f45݇XRJNRL4>*dj%}bVS?R֌{?{[VSrVmg!ۢԏn?MO eLу$V1hmE\q!Aݣ)GR'Wx[2EY)f;!`4c֑XmUyv^jUrXD7VHyLCvnNn'7-E0f`/1IU7*AzzŔL.E(*bIl" q&kAC93=֢R_ZayrA$\$ E i#)wC;$5%6RdzՎaV] db/9Gy{0h$&8U ){e:ةx6g'G=.NL|hʷ9j5N^=s\׹z<Ѱ dlתkͼH6_bfJ3-cdUדxqU\g9\trVj֕Cr.V_ NxvʾG~ᓏq[IC7tW8]tw^4@YZH;u~}ڏ6qyYZ;yXlv2lǓ:f3/o~|~%nKvsG}s8In |5FM^Ng;"kNgWu,wV/$*H|*(]jj0g'ʪbZGޫP#`e33~FZӨddX(O| |K5fV==]~}OI"]U5!IYV2a WKڑ);}4&b(F9le&8aN˞׏}SQJn OA;f=]<^9u\8f0x"+x ø HV"՜+\A( cptv\5ζݙ4-riݸe%|_بR (4e) (Pi.sY>XC#[a/[bje,j"1Ȫtl13R+IL$IJo%V#yp )c-&}%`L&)胩.k2b1-_dž[9]lIO4Ue 1j\P4[v|@ҧjP]+=DobHX#uIvdPRʄ5/0jBfGx )I^*k /%őx<BؽL[WU &ExVw_p>)qNGHA r)RYƻTRP r+J5gBG!/ld] H^fϥ7DI +n0P V$)&P qD =6v/xTÔ'ON!L?l2tqgʡTfE}І{] uY7T Dl}Ip:f˽@tBz 9dH#&'J8`#-peШH'ၸt:4q%+Ov[L,Y#Yt/sR e#Ank\Jk-HIQǙS( ?S{EKZh7WBWB9N㿕'9xFzT4_? L6ձ}wc H0(Ɵ~&<]@e.(ڿj&ǯ~x_ڂZֻ6m?=T"/ޑvpMZb6fq{7.*7~u!ePy Y N`AJ{T1?^|"I̢ThbߊZ}Q}L&Wk3y.jviky7nw͡ :]fG홟r- -&igv1UB߿ hrx: PG1N{: AÃߗSsaz5Ӣս|-ւ&!dIpJIq&)݁]}eS>pc",YʼR Mi Ak ^D.m]c%r=p!m˹'"H'M G%KƈGCU cp8Ҡ0NbpFhUA1E]$9T/i5C=*O8t1,_Šbv./;S?6m:S'j"z[8pRlN`kO"!Jj"e@ˡ{ʒ1?P.iE(ϩ`{:7h0䶫 wTAZwΡܰqѤI: \RrqޥuF GM瘤)+k{J>$'BTpeZD#v|Ud0nP ThLr3;+R"i\  eϗ(Zg]#q,FR.p(a4`W^@^6;b/4i9F"Si5WD>T5)zLk&{yyMS&C}8qcB$mK#9}I~IIc楐"I8} N@IBzr+ Yg:$r M>]q\/ZD(*Tg_N;$I3 %Aκqp% 6\tV;D^ $VV~i x驑 ϽJY9UJs,0f8R!sjbxe#@ήg =;}s/Q 5V;h? f @EƟGJ'Yo6s$į ́ZgR%e`Ԣ(s^`-<];C#K ^ <#1Fs:II;AX`^43AĨDBYq)X*$(gOG#5\K#;;;1M d:zY gs$$8"M.Q)OДx퉓IFrt[Nꞣ>AzNu)͒B)KNxof^`S1Dᨷ! !цZ!$IZR%JFAv4JUhafҜsAds:h )FlnjX#\ψ$qeu%W^q! 9,)y@hR!?E僤n8h=]sP}et](PQ` 4w"V'Q%S- GR0&}Ool3=xl)d%\t FH`h$B{J2h@Î׸p`A:Mh@Ó$!14ArPuA@h"XBr dW Ld_0z>.O]8Ut]6Nƣzz6b:¼O%"1Zp8#TUvIg҇ alYyATY q,3dur>8ԌXEĨ>4 罍IԊrL !]7DH8aL[.)LZg5=uE-xNLn٨kv4E4}Koԝ]exX˛_ClJNTHU+"pn VR.1 <ȨįR&.MsqP XB $[J2bCAsM(~VzCty9ˍ抳qZ ]&E7|Z'M-_Z{5},3ЈAok}67ga}{wzPyTu@lŇ/D1]p`*hn )wCF(vRTjުmELp08oT?PϻyԎdLޏ(Pض?*bJ?&[Wj no)ecRRmHIa5I#a짮t>ьs۲>AmQ:4NmJz|QnwTh^̊vOj7r>ߨwۙWڎtt|6항},`Es}ٻˮYj/ŔzvzQtۃYOx̍@'ЉђYJa*ą|P:gښq_Qu1o @Wmv_RSbYH'ɯ_[-;zOb[E]i%l䰝-ڼjzf{|ڪ;zw'w#o6Q-z}kPୄġ%9 $lN"h.)Q.a"-q }tg"F~n._ kFa97V]sB&_ƩqhrMWMCЫki?h f!َNgkКS(*XSb.Rj/<gTY+ֱ9YS[S^FҴy V5)<HG+кjbVXb.5AjJc 9)T'6Ң$9;|M 9煛K<Gm.9dr+og.=ْdS@Mēg67)]d}Q("05v[ Ue Gj٩jy޾4ZNN_xLS#MG[gVx>}-Y̮sBH*u$C1ˉS#x-1 !O,tl>׭ 08S^^st{?.Q8eCRo?Ah肱^554}:͎ 8ی,r{ )KÒrkR58I)yd =Rȕ֙rU]_h3Y^iEH޼% !ym e_6:]+"F9xFoNe\55Kl4yv$ZB޲PK(m)m.Qe5|[Fa@z%yO\gBm^=|e+F >-aD%}60 .U3sa=uor6уNFN .yך]j:yyo2GjFjWc& Y'+@9ςI3 ^ӻ{c ||2"'F 1ˆB1"3Nɂq);%Ы8X],O r U(;lλrXƶ`J6C *Ix/zb"6umxzA+y &ۨ{P_Z NihvqΞlSkj.Rm-Sn/gƛnm{,B=D="Q/:KX_ή x3OKd7x[zN5֕j,A enldV%Vɢ*H*zpj|9 E'Ml5)kz,ROnߟN>s01o[7NG[5^Xo$}wma 17&UE'rqpFc-ۤ2uɅG'T>H*${[l $bԦG$ X=.\O?\ɒZյgv|gl/SBWnyeZvK }9Է߯1A94rt`zptjt\jpJ[pЖT9jq]#E?K }BM\ 挂QrC>vBQ|I+نڡNm-9O5vMz=Y`P;Ra_uɫm9'er6Ŵy`jΝ*$V\sJ;O.1Eb*Bq.Z5&j!JPqnjXT=4@@=W;95}$)}GԠdMg[ 9ÀN YCK4n]+ogQ qq(_nZ?ݍ>OGVMdhV05D?D_ ë;.)jS|DV@uRZ>_ G,a>R1KԱZ6ނ_KK^WshX[$yEЧc6){U8[uB;#.85__K4L<)K=.^ ޡg7׊}F% LFAR@ͩ>#]uTC]!Ѓd]^+9 N@z1}>7춅^0lM~q73zj%Rz'd~Oz&PΐНET&)qêp,Kvs߳4cuVTYSٳ0gHcH( '*98EZ@TZYվRsFK̠tXp4ԸEἱX'=T.6u;6eSdSGL#ҳnP9lƺ@DN7 Vcs&vF Q%5 -sGM[|=9t{~4xF{W>vC[݂T6ukfƒkqumU=-_̫˫PsF zpreh2L~v?|?0ڤ%jA}ӳ+59nުـzAl])ۨFW-Ooɟ^b8aNW+!Ah> փp45Y4D Ke\.nw@kF _xoŒ K giPc\c蔋1$R[:+:r K%gBu`>&-DyKDYmNDIL>D*%qe$PB&zMαH%օ!CˁrGYi)܄{:2'K|e6נ/'eaϥP{iח78ݦfa[b _ObR&lFyTmr4V*|1DM^K6@ &- eI,B)4fS!pL xncG?-:.Wx<-dKD ەUfÊxe`<zݐGkܑȤtF.1_6f~[/{Tq?֭?_oJZk|??~ ؎rvXw$G_tMV>{?>u~7A/qp5 JA6w/_])ٲ'WwkŽ+~\o;9SĚ*x==O޻B~e ,E8u2WXͿ@~M sgwmmK>U_]dsyKLREV/3RHRTl `KpfjUuUUF\$fX[Yqv?>a_֏ِ7T 5^eíҊjY)$wm(=@6֯-Q\WzP5D/XG0+H^? 8Qs&%MA@mwDʌ:Ji gJJ^xrL\P V2xeRHV,m)PO3C|%d}䞿i.4#@RU~,'5ElpM["ˊdAԡ%8r&ݍfMڵݽŶ, ƃWO LJ1 ˥⿊mjVJ*qW~x?ƿ֘h-]jyٳs³y~^jmZ\g)/#82ZhիY̛J)?4Zyۢ:(#xq YC~55Zj) !^8\ ̯wsjDC!{I,tECZ驽kmgupv$tGٙ;!hJˏ7xHq*Jϕ$ؿ[s $Z\4qn=\y 8IǤm^\$oLTD0P|,:<#KdSVM 2Z?")eRV./sM(J a%yw缎@^5}Fa ,5>%H';OX#]udq9:i&Oh1r bAFyad!4&쮙\\R+c$F.~`1*.NkM !GRE" G2 .xͲN<b12O$`,S}nX+Rg+%it< sRy0'U'֖0"'Jc1#pU)9-X˫@2^*Qja++M7`eڽKdONxYغD =i&Sפ" ,N{AꋧeTPL |̫pFUEɻch6w==j޼C_W`k`ޢl,{UyV?0M RToٺ۞lLgYOG?̝טA0rm̌H^K&!\Tx/M)s*% >,VPncԢCA'1-3q!XQi%LZ*0R'v2{N2a-gഄXJm ':ػYEvTkD1D&Q뵂.L:MA"RpAPKF{ D`,H˪ҁͧ/sJuCId08$!leV2)sCYVQu30hZvCfLm[ΆϽ|(1!f$OBhI]Ba9 | aN1j0B$LԦx49YZ(!g9+W; %+{UqV-g bZq ZЫJZT6U;IU˞5 j8)'%9 cJTB0,r|٪K>R˝B,s䦥$exJ^P檸l-{#PKD21|^kb8m"׶ӢkXr*u4:>ˤ)*fcI ;olYWY_E@|꩟=. YR! F TCϠy Ur9/vSiTV%Na:$a*aŠ'{ K]Pz D`K/d"Iy Uڪh \'+k09e5%Hh\ɚ55Wk-zFk?:eZGh#ooraǛ͛7!NqxkW>_Gn&8ݕm?NUj=+'EV'=k:mo,rzk^/t9}7~]뻺-; 5v5*2e%Bau})oW~(voilbt7YG7Mkp9NA~퓬Î}[ u8vIVrV#K}jk|q^MfeiϚHLpMn+JPʁ_uIY KFV VAZ]I,Q2-=O/jK'(quByLNl77N ilO,Ʃ-R6}#WoKn ~nY6]gDgceŮ\\w}lRktɋ,sO+Vl㏵T;,i-zn[q滋=OxO L+0p3x9Z{ӂze:y^m٠\Dn^m\s|*K\~ zWj)./`ivrtѿmOQH$t9^(!ekh)-G讐–f9jW@o;.)W)ƸdfzLr@I L.2zǠJe88XeF[4UYʆEjefX򔸡smH%)ml~3hocV]MD-CY_w$]2NKN I@GY0` "oχϽz~ R`rRN垼J5;d}Y揎@א9P\lvJk񿃔A蚃ȁ$yri܆ͮNIqAeA$ |l0*f6G)u Bk$S \{ǣ1J¨lV~xOSdwMa0"Ia/\Y2|z[sulTWLKKy1WuɿLRx7#^6Nmq;u[p{Xz _F6Ybx8\Tfʽk` l~YsǟWh,esp>8I?Ah,dY5hra ^癣'\Tt#~sH|V,6e;IqjyPJ}oM6:&ƣ4]㝘?)= ү{TtqpQ=_Y[a5S>DSi.czq&N4\OH㉫gc sza14s4Qk Es% }EA |?|Nm+.?(f X-OÛf tN_4C x$%Ve7̭94 ~Vh\|4GJiY*S?YQ6OsR'r /sӤu^n:=XNHu[M%еFRBzxv>ޙe0—2҉Ti\>;kNa3Ӄf<fdA ~j|Nn0Ƣ:V{JߪzU/ӪG_^WӒ:8,bҨ +^>q٠^kuJKwccQvzh}HK|R/mx g_S갆n;󓪙Za\. |߅c}7(kVP_08z[dz Gg7A+Te-lL|Ȝ̒)FZpRV)GfH7Q=scDJ4q< pSW dDPgOm.ZQ0mTHsTqmk f$|6Cn; )v r-;E"jgVHD0gY&Y,lYq<*K!'c]$=sdF;kΆ І!>e^^>s)AݐYZ2J=IRF-=CMD1j( Yi5p5l2x~Ǩ]Ԅ `4,w%5[`jb{d!G@X !*1s2rM4M^Ⱦ8= IBzrm*'#ODF}ѧaL6>f~]jI.;=}2Qp"T>Fvb涞N屙ōdàY)OQ$!|A0ZkKe Y+ ,x`"w\V#ѻ)i18&)!hKK”!jOHJ>;&;^p9w\^d;T3=fEpij's4)p˂ ̫R>̨ڛ[^QO˦8%`ݣddEKdh:I mDr@%'*NIF`x4iϙLHj ǀZf˄7BDL%؏Ysn֐&!H H"{G8Z2|$B##'<%GP7M>];JwJDgxP3Q\UeڵՔvg+* Ӭ= Z)Tćv]Ʃ4H jgdn@\F<1/ׁٶgwGk]`Nx,!PـP%-Q&(UJ1+R<ɖA|gOVᑒ-^aM2eH"v2Whr/bjj&UqJcgsWGڴn咣W?ґ3uE)x:\n`XJ]X q`'w n\Wߟ~]kzΕvtK1vxFi2@-+O!!I6ך]GoÖnz3StQthl@rҹu3tvuV?7sGt3vWuXj}hO'ǃ󁚟a|2—: I)W>X]''O4^cw-hhgbO鴶7 )zkH5FW[Ij3 {{/>g^vz7)+B aUP&ZK.0kf IQՒN-Tsce]6h6vɪ\.(KN8Yc8@o0',XMf\RIKHY! E?|GrJ>HMߤc|Sכ[3 6f0m*5ubyfZ7lh8LV1tˆY0䉻eԷ |U7vYF6RۿξYV6X)&Ac -S6-9ØB(PJ"g& eqy }+`ixi^vX:&7)6a#mT8tsYD [*)t>toE5-a6y* TuAvE5TZfRڛ*z2!ӚkT֬ ܻ`M8ʉ5lC\)+ѭaPf Y`V1jzBxC)4EzFBtPiNBעBAecj?6Ti*DU L`<O#B #C(:`֠3=!j8#'5RjPX#¿- IJ!\Gt PQB!_#3.#jsܛ{ C]y4/ Fkjt"Q!RY!jJ7G-V4wYQ[CS r$h urOB(f(߄I 3 jڡ-q)Z(0u(KUCZɽ>!g2N4+&.yq'k/f&R`D *1 Ȍ tRN!#rAL7P˦k @- X!lhE,*Q' i:!QV"cUpB#Ÿ7vۺrXkײ:'(Lk"f7Dji1zRAamv" ʖ3thLPF5ZDecQǢ+2)qo1*Da\B2ZeYdZOQ@o~vpS3`~RCrrD{\34zr8AMUWhQ整ҽZ66PmNJ. I.HXN5^nWwW`@C!=^t P"r1F ^=)TĩL}؈V̿$Tj挬 16ceX Y%S=kQJwG $,~L3QֲlSSl肄2 %(".Ȫur aĥ g1N] .\3[, a*)YJLG]D#Dme#nBA5.kd B 5k-5Jtwx^AdP;w/q-d)LWHZ,1V ƻ1VKLC4*T:_ ̕d,\QB 7#]DG7r"*c06Pv+͞ A1/\>W*t#VpTH(T0&F)jJ(ZGT]"Ÿl ;4tY(#P#&ۚ 8̺^2ũ@,j PY }6<=va@5@=-"ITrVZٚHnˡ+P`Y3k/Qu9#*dU}\ E xĔK ~B%K)vzi>m~G:R$@Z(muI5";#6'H2eIϩŐd[|׺m8o'S}/vv%~ <-oyKl|ދ)61lsֆOGog7H[L3:L3:L3:L3:L3:L3:L3:L3:L3:L3:L3:L3:/aNM'v{8,2׹mh_;+ -pX7o: 3n?z釓/!S? ha4jDO'Ӛh['p{o=mbmh1A-7 }{=0\U*SHF!grF!grF!grF!grF!grF!grF!grF!grF!grF!grF!grF!g@ȑS&AAȽsl BJW{r1INן82CT`=U# q̌?7每A7؉?=5WCRDPUbpfJ9׬DE): u 5i"/l L| )(Q'pPE_}t {6 !y0(fv0..FOu!PW>uBAs ECyMNΝEn~m A3AygPAygPAygPAygPAygPAygPAygPAygPAygPAygPAygPAygPEBN!ȟvံ_O g8KkxE=A'g!+ڋ|yoLbZ؜]~v'HYLszk'=6fS 6A=hӕo&=5lNY=M*r œ6e>} !jF7MiQC9g׏r^wgIIٳ˞3CƝqӋlS[|Bo[r[>\xև 2AiP2ǣ!`O!WPZA?Ew~oPS ZxgoNO;{?y*AߖR8Ys)ٕ}o;X5!9t569bH앶eij2֎T09RZ :^Ҥ:`>{ߎZ1^sO:o>@)(R:4 ј!ų9ǹuse9+}˻3߁cz񗃲;(OO7QR]6/oٝ9`I~sY5,n5M-ft}Wxo8+}.+/P#V(Vut3.u^܁[ͽhfVZ|ƬꛝFR\eבӪ$ڍJa 9 =u|ZܚL]Υ2=;Zk{Tjeb]IE&Dr8_JlyvbWgip ~}y?7ACɂ MTgBc6PmtГ_U*L4,$DJݽXtS Jp]v锱QGa4޻iH.N6Pޝ~|-.<0B&6RL^'cɽF7"-#"w-?N ?@/⡑n$?|;Ç^y`p.C+uab%[ahvO6c/boIɉrE b`Rx[uҲ˂U$ ) !a׭/ooݴ_.YEHdYc([2I)!i#VTcK.-ٗ&nQ|ssoBϷh_v'~kɞ]`cLr"]bx>'ITaUsEV8p=FrMOѣǂv|oFY3ǞRow] ~'^_I`HI7Z-EiDJF0q]uEJFhh;=H^g6XjSK3z#3RŞX*g3 qyu3m\:u*C /(E&bnFK ܜ2g+z5nk?7V}+[}ѾbVUAUV&ـ2m2H#%Odu(!N)[M)?*K}Z]+svg^Zݗ7,y[cFrD7o ¹H3qy~l:_eֆ1* p [lBzL3T`~| 5\s$qG5wi_紷::勋FCUehkt64bU*"G-IImq}q (U%V*d2e]AEE8e Qg)l}UJ9 0In2٥llͮb1bee h0hky)3\Q1:́bv}TCTneٍmWM0!_NE`UƵbڝ张߼U%u%YkUu7c;=HP[::S=`WGh]*5Ls{r.b[ߝe u^ij*Ԏg(c{JN|HLRUF,^#.5Ow[$㍫p(VodΡJ%IX|)(sʷӣ6PERUakɶ LI)&KNI wm'ԶA=_"S&o]WLfeC.`PV%iޥ̊fZbpʨl7J@t*$h1d]\IhA6>.1m:˥-6[׆6k#(!Vkjchҳ50)ZǮcua ȗOF;02l,Y3L08׀zMϐi]nYjeI}<6~?- bjocIVTt0K޳Bvߝ5T ;F'Ơ% ufLC@^>l I_`鶪:@yX 5.m|]JG'IV=85_\jer֦z!'\j`]LЯ>{Ie;~/ emdP77voʄ?ɷ?;~H;&*EĐim~4#<LٟS^?~1am&_^ίO 닫Fp%03>,bZs2|_?|h,DCaJʤ>֏,S?̣kW 2㟘 χJϛUe&Rrъ_Gvdxa#:*#|WѦt'sTVPL2KG1٤T9RA''cH~!V[LDP e<֧Dގ~2JAGdз7Ȗ9d}fVw!lLܽ0Pۯ=(]C33@UQP+#7q:ٚ k>_5'~Qw9?6fNM|pc]:+0'L2mH叹x8O笞f#l #"o/<(Mȡ֓j[FbDVcmhz,w#Kk4Нz̪:TͳFZQjHڛ{,Z5:џޓ?4ѻ˫}o HWgJ]x"f1irpt\O~'~h3m:@+jymqYl*T틉ŵ;Ir4@Χ RT]9{SC-Fk=n2ZX;p=d5KW͡(Kqچ Jb{˞e**9ٚuE,\,n2,j6߷\/ϖZ*g>7{boj wSj'xR1Ktz6| fA5iIkAdL5;UŏckXU'f|a @ͦ.Ij$^H.#Z';ŷO=1Պ?,ktVg%H*T%da\xcnxl|*1圬_7aW8TFjs<#tyXԔ d5d%mM)ҧXrf*#5{ގ`0`ou~6(;5h󍷗Ѧɦj+yfIdCd9^ I+3e~ɴ>ME c67BsW&,))GAaMf_./¨/n"}A0٧+? ;wndFܽ:oPgobo#l7WC^֤N': +2;Iӳ|qO?/n:l3-L=ky>elG!PHև T"hTj q纻x?OV }{`OVR6!. 2RT:CyӢ0nW`bf R1^C,Bv$^ j_=ƤImVLO1#z6|TlrwNc쑰QjLDQ P DL1s>$9Bq@5y-\A,QhDWlAyE$֣YEhmSʮJJԨ[1%J>u!8ږ{wb5+LAy?0)] r},rp2Fd>adVj Zy=d9Ry$ a:\z@NUґys,)֌P Fh%hφ-ZCM#n+Rv|ak)cO W_yO2 TGqxXB` cŭu!pK&iM!,EWm˃!ȬlkR˔ vM;rǶ!5:Cu܏rx dOR!5qWe"Y3eono{Tfo<(kTxKJRii: v}+؟J$MueUAzNh_(YymA>-wG2Еڬ`D0ΰ粋^+6)bVp# D4!TX"//pXtJ*E- : bJ2SA򺦜 xfHF4˖Qúb#.M({bou)wO߻s9z*_{@ OEdn@(b4qxzy=ymoH)P^iJ0 s\QGĨvRrK GHپz; ǖiipf+z7),|->K!g7WL*%quJYVJI ktNURRQ*|yq60l?u#I-&֞ FCPJEmٶ *1j5m1< '1v$Vrwj]'>%l7B.WrD `"jgr!ca >9RNyO4pqˏh0Ge(rķe- 8޹# ye[.;K08g d=qdEs_Ŗ,ۉe+i:3VK+^bAϔêo>IMS%5QOD=YRc uX[R_H;U 6*t%Qj*MSBME='_*KzUo/mTݓ *8ܧwirH!@uITzػUbh9:ZEvb1q1eAGx>ԡvSS*eku2>H)T?$y|8)wh0d0]/B~9c"l,,J]vJHNFiMqrt}3<:9kC{taGˈ{B(E=A] ׅ'z ބ)BRzR².B+w5f lle+t(ªGSP|IW]CXOBU bU.2{:F]j!Tv l}}Qd ¦6D8VFLotVQ LD`Z$ `p$,yI c&)G<$%vɰTOB4RUC8)lhL6:r(\P:B>ɘu*)PWFtnvvA[, V-~bg&P6%Fͱ&@wޠjgK2SYf,J)g,w>W$-lٯF&r+Mٔ.f_Ol]XV֯trr,ti9'I'l/Ow<[aAD糨xaKM"F" Ys9=}wN9{zW6Rg+[t('fɱNn`Z,Xr3l[m&]p=Lf Oe0gt5YAM&wָexݝ_]!jt]%ŇicWy;7ջw%5ޕE#+Ww~rKCUvE^tac*%,.rc{^?9vljt^Asf  )3F"Jjr)*dюhLZ}$rwpzW r `i1&GY;=௓̛" *aԦTq\ ;wGʎfvg=N?ᖧg/ >|&fbВ-۶dJ#ppܖ 3tGBRAf%mIbr, M[Z⒊de%őBwAoJc|݂PAIwOIhqvvZ㴬ag.6%\~b"JE_9]2gb[#U|$U\C@a+um0Tjɻ-v rA"CJ>j(į~@o-u;e2 Q}eLe!9Kg<䲲E)[X($0j[ʸDȂ/}sIvm;drvh%jɹ&Al]&cҐ:ˤLօʠ; `w\*M QLڂE\T2(CuJj sn<~K-Ü_ 㷇_tOT;`_";lk2#rpEX6(VaVRa}vۧ bc:* ;A%NtUı攈(Y6Ζ\ms{ f6}Th)8Sr&f Nge!n>T3:%w *L;K<.LڵIۣO\pn'rۭoiNb!c ![.:CvLu#XGk*U1טMuԩH -H+}\wtΘ59+r$(":e1}"5A;(c5q<ҥ<;+m/Yh/$~C m*}^˟*2Dd#xJbȒ|(ơ ZSm*2~3K>FU|I9DZ/EX1Tw(-ssu˻gVf`DZR?XO]%紆_uMs_!ke~qVjc݂[(G'bON^9X4^09%.FlNFD(DhgIKd?n8 X2̴Z[2hQ !TCbŠd.iv&ښ !}lKlkFdts%ku*ANZ(0%PrA9`JeY!i}&GFR7 1I8E0"Lز {΍P'(jޏUfa@u >׹bKM.Zez^%+ҦH6A6]C#ٌ0֢Fd0$ @V;L*1kXesX FD60}E~Mw'0nR:=A#|4O;xvZnj⽮0U18'*+lQ6W A5+0K^;QCZ#F&2JRP IP[k <.]@>lb؝e.ǣzA <߹ʌ'tv<$8y!Xm "| &_ A$+pU * 4bődqZ oi"RHcVE)K2zSj(QFtI.ƀ#dtWΎ5V&f>gbO\ ){6 ()!IrZVhC`T>{4+Kc|kRa j¦?Vu@,WL f#ƖhC.B )jGA(.BL0,=QG71} hO `E.=v.5^_7 C6()+/lJ\H%@ZvF9G ϲ)[%-u&B:WhMt%U-EDr0r`FI'^;ƼߟzHz'T&2|@!U];]( !-ji[(Wg)UJR#] :9I?TCِ"+QՊ GLScJl*\U!GfCCR1~*dhWCKdON` B;L޶4&'gC8[,gg!' '@Z)(FhcV7;3۬ECד-,]A37sDy1kaD\&o־W2g/?X5}dww Hlɭ"]/ߖiW,Uoro;~!X|D b5'o?&l4}ju#k PaϷ_|<~ϗ[ڳz2rTVXB# ϳo/Nov9Hěk[ bӜ.>5$is+kj+_"Dw'qr1-߳oOW4p-v\xuy }u`u|27U1nE=&nzOli:Y=ʟZkqċ[: t}߭^-/u`#{7gkzɬOKoտ'˜.W&([InƜS'4=kE|q5_Ɋ]6|uUIU&8aSRTE`E T+=wVfݟQSuZD^XL|)>nfuonqz~yZb5iskۣwhTD7[n>b;`?۫XQ |Fؐ !H8z7h|Y>"E.0&@`a><;qr>d=WƊ>?=%G{xւLT,WgNWbva;GɉwoD}koF7uZ6&D!bp *F ޞ@܍ Y-y|f6[<Դ6sKVa~:@ݸy$oRsvEdQ8{îa"<4>*g=״Eě^bv,O瓻pJ>5aG˜Eƶ[ǵ}=3qRw(3Rjc}[.& glUI[ OxӍӷ6cݝ2AZGn\DV Qb/{W۸ܷ[aȧ .s_&,uĖ<m")Y5-)+6 l.VuSUl~>Zg_\\Ϧ^K 7UECm:Ӈ-fqht~F-~EUp݊\ҥ}QAQ:I7{hd.+G`}`L( ϔ"OC7@7*Qq0% n$Kl[sq+H|AE0gU1i B};+huѶQgE1C[e w͸P%aM/c!eӀÝtTHB&7M lg% |J6͞ 6׳*'h9Յ gH"+fi}fz^IK4VQxU[wU8 8\}uoAyHhⅯG0)_(l@*gIٖ[:i7'?0hTƓM1&^]Y VAEOE`֐g5dqfR:'S *Ÿ0\dM) ٩PW1!guOh;חFJFX9C/mSi6_4uKkw!<J%WFב8td"U06 HwF׊TdCTJD&t4 *b+e)Uy1BP~> k(]>]TwlUc2BE\.ZoC 3CEnR:o_Vb| r 熑6oud uU, ,~}KOGe9x pg_FCjIkPfL ^^ng`H4]fHeU̶fZ* ŷNK X NAԶے1JtGWֻ2$2]F@2S2!ؑ|J ޳>oI]`VZ!/T"\I<(⾰#gjN2ae,G%ngF-{:t;عX"h8 AM@IPX5OcVݖ;)^v/X8[ "!Ta`DexV禦eK.1s!y[HTqө.5]HL N@6*"" tw,dV$(#+y-K,M" ~dItT"~fJ4 Y3y)+8a1[jlb5_ 4^m0Z]K % iQkU^ t+FW/wAsĐPV VE3윤`\fj]S;ks'= ={N&-T#s;-蘍;t$Cde\B%Ucہ;2<4˒EP,TϠy *å4H܋X!X_$6GdqoW\kWS EUgֈcFu-2B.\M!v{u{b$M"C|+Ju:Q5ש=rMyPK6UΣA ̢g):6~x`7e|xU:$i4Tcu<lFJs@r"Äa.ut2*HWC0: $D zXȥh5FMZMX-]67ota)jp!rd)I*ՙY+ư`1X Paм\`ĩX}I\je&>d/em^_ȵE>GqrgcĢ(Ϩ'c^ Sb~ggFONJYm1r{OX"+i,d'4N4Wrwͮv~eydHJsk"ejFmmN.1tqy2Gzn1(P pHF51[|8i U* bJ:jaХe1⌕0ZUkF jMdӌ<7Ε!+'%h3$=/wW@C^ hSIJro\-$I7NTQzaVK Z"59kE3ͺE_$?Wph7I v u~k>?pp4ʏ}|Ksw[E5syTqZdy/U˴~O؊E~'݋S3;~Kq% we8W=gx+AnѼrE{=m8*Ba}}*̇r|(t}oi5Y+vڜMgrX䗝zKi|}A%il,,ŦpXn:MtcM$y$7%ro3A=DӤ"zx ބQI Кi(kYh,5=`~p' S7͛tu1K_(qynΦ Dٴg[,Z_֚49YMlhE{woKnu~g34~ɛ>/7?ZozSO]ikWy&=%ܸxq:EK6B]֑c%~>/CXo}N8i»O~NL!\_]x:Y ZI;wwY#3.1O8eʽ;WrwOE7:k 8@u.ʍEn5dUl P{?49CyL_.H)[ –Y3iچ+XC~YW)=E!0Y=H֍ m- ^2(Id2s`2鴓2{=+M)UbĔ #H);%h eMD3,LgZuqqp qJ}İj+;ĪʻP~z | @FLAƨdB% Y(&e}`J1J !gO//W)iIA!M_hO:Nh_ets`UUjļI#l>lz ˛0=o(-_.emI1x@rPs_]1Nʞzf3\MUTSf3. Dv?XvJnjʆ2<$`Ie\1d Fݷ˵59 * Hdia%+S m٣bBf;A‘MXM,7(jۭ2*0WXbw_JvGj%3Xiew\A Zo-dIja Җ*hF[Jt\S{#a]b P!(%!vX8@S`ޖ)J8Pg/"4佽$okFts$tJ{kUc}Xfr>5WTOȫ:Tb){ݗ 9ClBk+? Q:,&2eON3c9jbsz17ΪQlc ~HNVeAhAGXb%F`׮ m2{Kk~E4pJ)fY.pX^پ&7-eZ{u7(Am:g+1J73ҕ{WvC"i1R} ^n=Ybꥡi݅;@Y;2<)M0MO^ ՃC#=Eب`5,ާ-FT0X\5 ' *D85ECdc6C 1ϝ#p{Rvۜm,ڽ7 }HHHk`1nS<7YZT1Ŧvpp Hҙ ޵6#?KH_H~ v(EoZ3@+JI~-K ,Z|Y_UWG9%2gm{$Av>i~Fbhji {;yWCvZtgOVKcZ%N.R Ɍe$ )kl(l ʸ >]S*:qYLU#9U@!9 HӔ"duYovFoŶg@gК2Ĥ%ʞEhTJlTRE4է\u.R :#0tt!5ZxXp,],wQnIFb͜gߩ/?[SZ [Ug,ؔFKZF8U|8U|8շӭ Rd1(|tPs҈B=+Elbby/n͹#-7ao~kKӃ¤EcIrmƻ`^`c1jr6rɆVj[n´R'2HU~V{ˮ:n+v5r\/k{Z-?[-*ZPEF\VC5kEBձ #iB LC03ГLI:F0k C^qrfIijPϺ3G~櫿[Öfzv-s~n99~BzbvGmL̊ur4"dDAnDŖ2FNpvr<F]:w_Ƣ~Ar{UըJM-ax,ƿ'I_r2yo5ˆiRe(蜠`n/?(~~wL=7-"OE+t=7P&'AZ@6_{:i4C!("Qbm0@#dZ'7Ff:XZ)6 s.4MMGj:E4]ɧpFQ{0OhoۺPQ`մC-nb^5PaR{Ԫ A Bv8=@kj!\#Ib (ɺ)B4NC $BeGclcb42#pȈpk"u_('v1^@VU!8Rr\ Dr] c"%fOGKtĆPu. 1U[ʲ1M卷-;r4’)cWXYlc5YY)jb . OU01PI^Y8: "@Y5ΦXmv8Ԯs-?bc(nZ=3b-a eF_NqiJ  )hEQeNG3r+(5 [[])AGlbZѬu hRMָJlfu1ˉ*G~1@|>?咖A@jO i΃&˩v%r\ M bTb/ -g=֗pL{]5zM[csZV=msHFSQW Ue-yJj܊?:-uRB$}J.>Yff-9-6W+&*Xl71?7QoG{j^)UTJcĜTV66XŤA4<8PQ+Bv>gC 5]rbk 1fՐ (+ŰR=>${Cꮜ`b^jf[֣ɯ^Ѻ/scg8aw!Ԣr|\%IbG,{ 7Z_q"oʀy' Y?'kYg\0b+kgsrŚS aCJ1j.jiܠ4kx⪣5B5XےM·0"Yf1Ic1X[D3ka<]sq&DCxrͬ[ coI׎ EJX\@ѲmX(D|)jh $$gflbfu"r^^2o.g24Xs*K#5~^ޗ\'h+D&*  8T\lqLd :|1\\M\>jku}/&ch!߲c1_{Q :ġR/\E}%@.:*#ёUbΌ./&W/*ƛ7 r[l?sqz'fX ,Nwl2G^T+3 vt@nHmFVBhbȍ#Mv5sKl?q^^_M0eFޯ$V6V5䋮d11Ȁ0b|B('U<"%KQo zjEZt36G1D*Tk%!dtIv7+~n!5&=#D$s$0:+Pzep\ElAz{=)>ޢ^ #sH:.GWcnbK-2FZ ,[vMd([v界bs9v-t/0Ě[Jq"d ^xT#^kQ憕x<;#}n@_r>Y햬A݃pr>e1)65=9>ßeee'/.=rsvV=Zc?y=ߥeGWgG]$Rb:j9,O8_c̡Xv"ˋåwQ`f6ڗͮ{WP/'uO0>|\Ac:u&x²-=~RTd~^tڝ<=uU-5=Z똸މl3;ww6f9 a'{rx(a|fwxmux 5$}H)+Q:g|뿥Z [[2@Pln.uR~Y/ ڙiq9*J%և/K:\1\,>|شRdrzZ|w;f,Yn͓[՗fQkM\}2Cnb#]ʪ!m|I@*|2en:;ZozĿ\ ^,݅Ϋ}V, 8vw=-п ~h`qF_klv˺OdOhP7i\[|n絾 M?ߤڣ:PpC`s( 8$iC%B$m6W&ls lYJ2"cZ)ز-Ǹ&9X#T5ȹ52ɌADn[8P s*THՄJXtiOrV ԙ]q3hnyZjŐ"R輡>Rc {KsV~\Ɏ>F<%\)i¤?B/{/7q'\t&YYjRڬlpʃ/-l$ޠzφ _7{Srt t  ))e9 C(cx5 -7QFko'( +z85Xk Pƈ&{DtE(dMƀL5 t!Lz(5ݨ =e=WPZKF}C ֜ =G,%,x/x7V7_2չ9t:X$k&*yb2A(ËtOv4@GvsB -q oKJ]["hzCastOζFj>{ICPql qqxq 0e7/Bzm2MS޵6r+ٿ0 |Y`?ܽ@%Y,AGvcJ<Ŷ,?FQۖuݭn}XuS 8aU7UԅTϖ-cq#TFǍ䓍@b4kt)ke( O2eq1+;Pd;#!i$H}P4yG&|r!ZJD!䒺"mKI887LdZafoKLlzg}g;}Og1>W08%pC0f:W׫/zaO**CNwݑ~JGjg=XH,1 {P(6٨$"dڠ5&q7obz?_*O.Mp3#x&fj L@ϥJ]Cm T8wdRDcTz;/'GO'Gv,~ݦ*s0}N\!5Vf  2tlL/ʢLX~M Q; S4+2EݚɶY(5o|ze B9wfJ-W9̇n?8;}>gwwtJu"w>6;qifNtO&'nҞto;% _ݽ׮L1i ~W߽W^իջsrvt='|>Olى̿+ |)ʭ|{_zo\=m߲{{/Ƿe Q*oBN~G$kGu􈏚 (Sɧ f䬛T"OZB֞cMi28{dTdr"\z~}GƸݬ?X[W^ 6^ZZr%RHr˿f}r%TI9$TJJWoxjuzɌ2mvC FK$Ss7R`[e1:i#.^qčk.sCB겙ɴ4oӺ{ ].aZ ӿQ8fJ5̕BlQ\mR"#?wh)S(lKjYXQ/ ՐքޥZM9;fF}_tIk]LUnV_\9mC*rA(&HCo,O߹]fnk$ةfnX%(U{R?B?lTrK Zp"8nyY|%a!ym3+3 #y,4厏*/e)9*>p8:u"acH13,( QcWf\ ޘa-^5|Pτ%̥%:y Ƚ53b-ᨤ#+86ݕq d'\lϸ㙨ye;7H<\-$B§:@Q7‚Q8Lc7m8\2DMF2{Ym1\HFX>,Z"%rѷDnJ dX~`pG"zlGE2Ԛ\rFz6&6 luҎǚ5#㩸AA h2=;,ۄ!>d@mӍJ%%U*bȈ"Ю3@&r j,cq<985.{TxlU5隹d BȀH> ]#vdxv*Ǫ.xWPi'#hмɅ(ʬ/qcQpX5$GtPs69)LXY6X$i $" ܴ`I֪0X֓ES8j@ͶoͽKG'GgVvhQ n Gmt(|19IB@GO]c!F ^Ǥ61߳ rb=Ca\SgfkPnu]_xlfTgACy?N4wQ,!O5hʽ390S|jz~gPf,aLn~9x|J~~u~Ǽ37Hw!$&UIwi[I~C:.?ņ2~xu b~H Yyi6APW3WP͠z.?WS{Xv/ {J "Jg)*]a&wyfYYCm ? ݧYh/ 7S9-LG%8d"JTixޥ$d 8$dUEl4q5 =RQ:IL,AqL8JL: l }ح,]W;?SJ QÇsؗBǙ?rCwN#(]585H0ƌ(cF1#ʘe33#x&nacd6\fr(I`}J蠢X@H$\%'9&N੒GKvPo?O:+TUk'/ȈQr MѵTLR rX5d0*,%("èƳ;q)2.94 se3[$iL#:>4%6EeU!UH70wŬT4nQ܏nbt(W$%:/M"et@^hy+KZ"0쪯dWu $Fp-`hTHNbS&d(O;Ht桑4XqDR H@ t ۬I}pX[sq}{c1dHiq4I|vM -RJDʥƟR%h.նW崅`޻RNFsA6ȶj`^-zX >amK9w"c+H6|m[db؇a8e;(>l[`#%J*QI5e\䯟 GKU3"I.e$vE6'_w o8p>3H?L.ǩg:<>=y_X|O~o :y:a#EAP+5mH㚅YpKY[V8z]_w?qO?N>2Pݼ[O۠qk;#xRxLP6P_.r>uZyoXW %Wtz6WڌX/%r&:W?:=Ha=^%/=|/ $ȄVSo1V~ _zVCȝ(uJUbFQLFnL6ɾiDI拞@㎚ R.FVE e֠5S1+Zd(:$1 &~)tSMl#Զvq MM-f|_u23T!pD ( "}npc8DmƝةA;i_efKYkysؽƟ}>j*_c\_ڗuT[Ƅt>*ʿ;\5{ CѵDw6l6&Hbj(XmhKjd(1]:W?܂ƻx ޕF#RӮ=l!_^=cXAri]w0TRU:S*L=tKY<`5ʘbYLm' ˬIf,B*YQDrtB1)+\#?:|ge:;i4њK2k77%[?\2e}EiQE?x 3R~9o鴟]L]ЪGuz6OЕc}m,߶aEE&|&u/݇o{m@rkvfbο+GeSW k]R+_S> /^hkä`޾ߌAz >_N?? ]w W0ǡHcgvۥb׃9_vaN8l:}y_ֻ9K 0-q.E7%\ F g3NNweɕ镽0E4q??jr~wJgv T}1!)RNl~dDʥDI*jCR?5#Sk)^XShsݿ֒w[?[;[ZXonxWOX?t/T (bSGu!Vl)xPX֝w$!l!E+fJ LJ~hD RL/J+zHсXUtLƲX MPA S' ?^pߍA*@JK@s 1czČy_$ ܰ(%[[,Tu֟ij:bYopYQp\&%\ lNtE(RZN($%ib/D(~(Tؑvŋn^=ڿhkjEu OFCUĨQ\mMPZdZ%Oۢ6(D9T5=Cfؠ `(R*53lUJQ8&1pU佚Ă<'i"8P\@RqXc|s*S5P}Q)ZWos wVS,H:.+itaU)[rdùG1ײe_UQ/Y);_>,nUȭtD޳L*T TuΪD _19jc1IIyiQi,T XU^mjZe\T :+S`$Ue2<2pd\1e<+,L\Q9VYdH'ke.(rDP'T4)WQK-C4'29LlIcYL5,u}2WL6"pΘ:nZΰ#N\s}Fk0AamXdL=Ԋ{\v3jKg *&i LЈ.V nT+tKccy'H1IH0Wm mDFγ)Y\K$^p)+~ Toߡ!nYjeCK7?uBdùGͣ;CNk6&GqȲ3{)3S5﹗)"j-Z(:!!3r3ݩ_nvwTBmpT>86p>AEaH@")YBzw =-eN@ `!Fe->h 6S6Fjk"Ti㡅NMekAOfe;ahBQ0͂ea%m K7L6!$^:?Vve?ׁ9|r gb,# +b1(\Qڮ8铍?tVS@)'4Ū$,}HHIjY*5yFb5 K{BMtdm}dd5]ґÚeVkL}ybP|>ABNX`hE Ef%]:|x'g75eOýӓ7՗&B1i5J5:Sl.d|eHY}dm*Y!"\92~ۃt&lEW@0DU$lI7*b@Y;=Ckb䱆W8_ksQh0GgL"45ũ)l Pکb$\3v5^/<>Le:puhG`9ǛyIշ(*J@UNц\%Et)ƄtKBs#-< JȗrƝBNNiN]gG30{Jb秽o^ 2튱ek?y'/$_A+B0Hpd+h1EnnyS|ǩ;_pߒ Pu~[#"[T$1" 2x fAJw91^sKIbj9[ipIVpU:Jf-9 IlVS$j$OFJRgXy ~x/-n^^:^Zң2kxuu ^֩pъX)Xۇ'E)/¸:}O'#ԓ$N~w?Y}e{ǧgM"ƭ j`ݲ;NNFO$dhIZA]Av3 ,+M<7PˍS4Ts⾕7^a]u{z=eL}/]6Z°ɒ Uc DBY] r7߆aDLk?yp= 89J5[(G09jin*lY]k͙%*{{5|q(cM(|W~Zp*~ɧSfAHI@QI[#&j??FM$cldHkmJ% 9Ku.LW%QfZ8,E6&㉸&b{?whdM$ho }]-_HÔCYn|zlM%t%``JtrE~+o' <6d,45\ wK%҂DFJX [4@\8Y Ȳ<ѥꬎ`փ`v+q0v^o/).L^36i&4VIau!K(m<-qtw,99“)g $:~!w@)% cP **xdY;⵲jWw\8\ Y7 Iuб&燌$ %? ms)4El@䜤%hv:d!o$xfJlpLy' PيNuR tf**QU2&~F[+dkIQՑӃ7ݾL@y=@+{|{|#_ōB0,ŎRz|{|{|AJ=AH/b/v==؁PkPSGȽPS/ 5BMPSG PSGPS/ 5BMPS / AJu4߆lm a[z^c^rrƳSa SmAߜ589jh@lPLgG(E߷=;.3~7K?3RnU)ҧrE;G|NCx#ωZp|umv <B `+k5~3[[v ן#IԒkJ%*%K+}('2l=zv(V; yuKwOZN &DݞL\FZkb2* "DͻcKS t@6J\T?6k"*}&tE6Lp2&d9d@C'B6"ˊ_^CWM޵q$-6Q`읁E.xd)1&4II߯zJmJMJkc򐨞TW5=oNQq 7{V52['c1@ 2x`8[Ile}[,@e_6c5LqLFi8^Igk!hJS&AJ}L͑{<NђYztҖ..r~_NeDF-ʆ$[0ˆ9Kt{sbшbLX[زO_zIH`L9D6\;悏JXTl*`9F?= 3>31ؤ9d_ub=9& ƌGG7đhpGa1܌/W &wc`H3P! FQ1T;1d{,`ZQJMMGr'^ *-,*)Ĺ5`aLӻ`!,[,! . Cׄ-kpi!Χ p: Jj9!(k߲;KFD})&ˁ4f0m}B-kt3tx %* rp#/[U&cY; w:'MɌxr-'c'&]GG+mVo8!h9%* ̩AEHQ'&e%: fz:( 8UȓQͺw ehNm h XEZ N֏:c[A[Ax8vx]H`v4_8|Т:GhX漝~Z '˒G~_]M3X=Y^͛ Uz5X\[k:xn1H2;s|fTE/8Gͧ׈5sM!;՞ZV9;$x혌.V@2ZhLMЂ>}IIz4`IP%؉ bo /M)'wRy hZjO2}zWeݘCuSchU2~"Jx{.kHTڒok<)|h6̅aMBTғͻ5ꧩը}[LPQb4f'+VqLF.~:bz>-'aDKLK\V)W%{m*RjQ( {'2mW%:-FU)4@7x((݄UvT,e⥻5D擛ԑ^H]\e$e[Kg]ږ6!gy}t۩Ъg%f<4$UT8dh+˖@,Hs@UW5x\U7!FQ`<7ͼxe=[oخoR-oNg,GV]2 *ώDmbg$VӨ%Fx3$+X\^OE< pZ&a-ir\:ۆ3'U!scUⶠޢYjo db(=% $ J5O!]:%4"˺a+2vV|z;1jg`ӌnT!+'%d BZ=z}`P3QmT }Ƨs%rg!҅P^ƉN%!PMPJ^Pji5< I 1CNX@RIexBH@Ơ "BĐQ?Q!":yۄ:py/'U궃 2a>% 9s_3y-8MZem8kl|Wك ;!7&*\rF'cmȑQU-{}L;{L1C6݄𱎬WIW?Kyu]6(He(MSj #>x.'º`٤EI'(D{ۗK'Y>N-5?o$з$IhԲ*a¼*0 Uy ">b~KGʺCVqv*zǮLw+%%tI&u.KB,}!us YIti] +ũK6مi,z}zfqysQXNM] ;Ӏ64dP@BIQ{p-: (#[) SGUIRfؿ2NR*Ok:\5凝B!GU|Ka%pg儖b@C5Qr{&*y!}8O$2CY(f>v܂G+mLIΜ@8%Ѻ^TBR"WN'oWn 2@Tk2\~Ui_L[%ˇ5B"#I aXE CŪ$h7tCC׆8qkJkҾ+[ed@?S㠳e/mY,MM$ɘ/7g1 +c< (*OU7_u96錚sG%%/$(Q)3>*vbV _0rUz<yVڜ\bP*mt][زiʃ%D@i f೮cggպYmnG$=CD 9(C*Z!)67ɔuH,jr$)YbJ:d9_n֭9dDtjI2SCH,WFG-rH uj"8'Y&/.sz.) nwTK,wyj23cc!2u'4hKݤ%ʣ9*S+u_~V849X_g5 WA,tQx}VEt=bpFJ` rr^os_l9\&tT 5LSq[UQ~;M{CC.^mTD_z̄חE-$2sON*A2x̀ULx땼wdˌC8"-V#cUK\˾nEX( +MFU-/b]+# [fgM`2:Dr$ǀ[!>:!Pʮ)3gR$K`)sc)]LĪU>Bk\;i'JHݶeӲd5Ԝ)Z^-~[?UaՖ]͢[ҶmjE'oo6s8{9כ7o&i\|ގ\bGqȮrߕ+wWEZ8N7")ў0K<R/Vw >vwi\ϖ{ 5kTe8PA]dF k;}_n>xjV,7iZT`3iqsޝ3r8C^, J =xw>Mwiϊ#]aFwťRGFrÙLd%(;UPT$R ;B[zڇyq çNBMF n, Yh(Q3Zn{280օ4OCcm :^ Z;߮}YߛeG;5nr[oVp޺bWS{;'/nOCYGx`s%Ϸ-v{s-~Ylx UҸ : !\)֞)Ǡ}re >@|bL)ڝs+W\|t ].pȸ~A_vn5!^.!|x\h9^^ q(S$|^R{;4D1Lg#fه\KB [JfwL\ ︌S"\9VCNśEJqp D|y6v!ZuC:XقEo"1m2+@pI[Y-)ZÇu3/vb,8|CbH/:_?u>uᧄ._g~8;?;^wNz<B0<•͠#wNzl9$ώ+}Esb"UKAyc3Sv>N"bOg!5<{Ą>أc=6j5̽ _/$?!_NGNbT2(%v=dT >2#t*qz-N#g W)iIKc'mB007ک`K38DѣdUSo'rq>BKpx(N#0tʱ`)[|w@FF/(?Pcbpjt94%AM_+2_{;*qPq{lmLQzEO+ޙnjʆ2<$ɒ]b4A F˕ Q77TI $έ)/buD,%X]KTp^̏s{E~L?s8y4V焱>/Fe5hG_.E^W(Rzέ#IL}j,0;Kf_&k}+BI_$ER2)h2 HCNWU<[g[U`V'Ɣr7*uOwin & xFNE5v+@ R#)}Ud(nn=);=g͙"vaC+%aͱӹIA\x}61*'(U kQVHP_;yy6tlBMf+Q%_>LZǎ-l ,kh% >B+VK %b&i`~lϔƛtįjn0d0QYܻ|K[TM&4;9:^}o7}|=5 uhq b|;6<;llle;t (ªGuU1du)*B ʘك!RspCNEvD udbL6u!ʶƩaXciy*D7gu` JPt-g1"JsS;mg.`ɓϺ]p]@r;RϔLdZv䃑$+YgKFSA B*AE| Γɔu**PSFa:Zc`FPje}%{<6O!ɍQx}*RũPrAWT Z$V3$h}Y7kEfR~Nx>NWmJ|ge65k;{=Mr{=yu=IwZ%g 43~<7-۫w/eQmq–E޵S51.?kNXߏum#nF?Ŏh>,$%b=y2fN3JNO)׼ePZjY\jfVM1oܾ-3ƛqVL_܎3G't_c/|3Z[͸S4a^73ig7Q4h~<,véYwjPqg7zzTf:x\ӵ0W7!sǥLǏW<Y'/R!8v& *x ږr鵲?,gCs #֒s*AE HV&48fZˠ{[2%xSrL gt!OꜴ *qSSgTtC-,׬ݞƞZT<^B8;E1? qTOjZrl=GIx ia4eݤƶ֪,Nҕ{u \lDUc^f%m:)Kɀ)J F5ie U;4+=O {RCGHt0\%Wa\0_U4 |b[SV`3!-hjߓJ}2v (lD W(kaTR.Pl;[D51Ť .h #0 Z1\Y¿xu9x+ 2Jp-1c+>dw&狪2JP= d^Y[<8IrU&r5$blǩ(Y 3lf9O7$jT'I@(y4%W) 3=%&"t1j7f30Ɂ0kߓH-8l_dnwr;9eVb&Pw@gCޅ# E#k˾T$A$k-d ^OomzzwZOoЇh8fpk֑躥LC\ﺎ? D@QTVX/5FQJjVfwh>h3=853I,@*'d5x~.ѓ$&],s9dg aǣfLGJ4npԘU++H` `Gh(A,%|;!z* 62GGPfTFF/}ֱȊa1h@P}ǒ]klӫu8m-TG!7.Dɋk4: - ,(am'A/**q$q?c A<'|m1Lgð3(3,#L a,%{'y-2-`)mE[K/}p"P/ǀ0 QK6grQ: LKR#뷔+҇(&3v+e:^-ݰ\Avz= yU63$t3Du^F@4vh1Uʤ:wB繣=\z{,A&MV2_'HEY 15HhVhcA3Ԫ:%_KM0BpJQ24V-"jB%īQ kڦ³tTߒ̕2hf@fz)fr}yQ^eߋːD6z?𨑢EW- ) [).5'#ǰb%+P#4jU)fL2Jz!Q9)1jHIe]KHݗ[?YUfsŁh5RyjjTM K^zQrMmhsW]]KnCYY{C?uT 0ZJaC"Eb'U5R~Jw<f<CO2<@l."mkuPI)|BGVi.R,c4ނܜ]<sÒm‹ŵ付gY=&LEӨ8KYU hr*[!+_*h-QFWM%eaIcTFb,EJ1(,$nrk(F Y,L5ia¸=v,t"M^QM6aXNQ{ȠnH%WX~u[+bc q#)j FNp@wh j#/.IJAbtԤrPQ",zS{̓]WUooq Pr.݃gCeUN CV=Uй 5[Ա˺|u#F֍r% )C5`FjL2:@xWdLRRVG6)8IKq>j*\!(d4"`K$[)U!D j`W?{WHJ03m/ŷ .,p, `@2<3 Slɲlneo,K-v7ab=]kh]vKbfL0>dQ%.pu?&ٚBݱ2<:GZid=Ɋ|Imݛ:_?N]?- 708S0X|N%Ne6(L$,1c6"0JrUHYzkWA)XIt :B̋!ł}/v9;) ސơ}I)ɺ}o$ JU1E,I0 UͶ2*݈` LeE1eFIPhYdII0<3zbLem^+:[Rq =1v xAJFCQaN._TW3ºw+3O#IQ$CR@D2@.ZǗ(i<rae]v:=o[؛ @-PM۾~p: ,jt_ىR~xEp n0xVؽ t ^0tҪMS ֫)-T"(Af|`y:OwFlm0,ʜU I@At50  Lh 4ZV=,j^DbȢt12l LR%0`/G*F}P2 隔!$L(. hLcíڋ+јZ}w}o&ڀ 5;>[Aҧb/ZA?fwIHG:4I@y}(%$S\ S AVO$%Ke-h1+S0/^x(cvyطpyq*ŏl4wk{^#o9v>:¥^/֛pΧgڲ]oY[9^֋SmJb6qtcqv:]0>Z~烔^Jz{֯x>×73ʉՍM陸.b»Ezݖu-֢]vU8:"kֽc$&n#:k&DЌ52S ՃV5޼n>k~Y65 Z}#<\}k~501 "!ZǴpT5cgy=ѭȼn2A%aA؃88.q>tr~0?INB/]qVkL]ҁr:jR]n>5K;CVOS.?fG#:|waM#tNCt_~cɒG:n]4*kh\Oi~{e"=oNGEM&ѯyo TОU4_=",{s|CEE˕A ˶F r{)]twK÷H_!m'4T̛:C_|]Ks~}}]%kYy—!/fkiL—L6.%.:#7JEE&Θ 2Т>yAg]^  2v=rr.=) @ I49O^vv|lhE?n$qNg{&^2ewU(:]dX`~cҮ:"|[ibUcׂuj2"AB3:{<_y7%[pLTÉL6\G(Y DTl ~0K?~e>8Ŷif`O,r!OFO!wH<\*[6N[WD22Y:݂ѲH,xcNϗqq'bco|6BDBd%DiS & #K}hTx Xxӑۦ<qV籆ڮAdmy~JRl2zFۨ)lx) ^ %f+>W<=cn9&Iouֆi(ZLM^(S @pJpnη^:Qc#;h? #Q/ߖ{"T Lkm\`!cAZ?DHf|54tP)\08B{v9=hfz{N(},~,DO48?nZ#Ow)ɯiy=޴UJ?ϛx2xͼn|H[*9?5yOJ^Zk՛qͫWU{av,KˍJ[/[{!6|\+[^6</XfRLC3@Vg|6ᫍUAdTx<G3~׏1SЫo>n0ɟh bHMCͲg ] S"iUM)IU|_[(2ה9WD;j&Ib`6Jz.*D<+psW`f9ێ38kZ s)rmPcEmnd|Z ۵;jw*ķjwvݢnRV͞[aZ%D(XDa[RCxѿMJw6|(: Q`em}t*  oە/O`牱rTXg : !Z`e؆!& ~U]_lh;ŠD$!3׼{Cp_wŸv@%K<q W+R(-? y<;2Ynw9%8|o\8 SJm ++ORD_3zrrz1`Li3pz܇zKxmδ%f fCҋyKϸbH=nyd省z< 0BU$ӏ<۞sgL,Z ^sxK V(ebdJ|P*A%U[癁[<0FzXӸv!}(4P6DZ`DJgDL+RI\k(ً$̇*z&7nfHg/%+jH*+&%$i"<!5O"E U2{]?BfR0C.,m%սSv|=)Kyqd"΀QswXTtQ)g,HK]N9lqҋ䒈Ǻ˩.gQ:SDP+T1 Y!J֢fhc &,,( 2\<鬘C/R"ΖP)vO1=BwAHHʥ$ ĤfH&(%p fdO`w #<hw]z`Aɖ*uUI},:%eC:H>F1)޹5@Q;21\xӲVUVLM9~D{I,b 9pJ<'.{2}cY|8\7o G(O;1hbQdRUQ37sK7Ì9S4atWBTm+'[jcde @Wɑj,Hed5A caF4Łfx͍rtU>;>"bJESBgo|لPуJ:7(m~5} 0nc%u&&1AU2bI\xI5H V2Z+H=H 5&:%Ok(d4ϋM6#xNU+k*ĐQ44G~jlP#A{"Sej; eSYr2 B"D%nE?F5vRx!ޤx\ yC9xm 3UgRJ@Y: Vp)0g d0E.[Ĉ 4eL 0 NfZ̙1) #Xh[}4vdFqVy1B-E˝K>enkoOK֘S*+Qҽ?w|޽ k~|`*zQ)yE.6ilתM+`ɻŦSWґ+aeLw $^dV4'ezf̺ǜβ-yGWWhΔǜ?׷<3/UOg'}Et;WomӆtƜB4]8x芴,WܴV.cʺ>ȺC槝Վw:sjLXZםvr>ZZuLsw?YWc^_nSg6観t~cYE͏ cAbLͫ]{ߞ7`qx^YR>j>C4@j%tPQVxdY= G݃vG9X |xF|3۫xżT}- xypqItE>NJb2(QK2rDmfƦd$8%H2v"Z3.R)v{_uc-= uGQM:UqJ";+RBNM1TYReD`:Ƚ;FW,euNRzIX2odQ%)L1ר* [;T:xrb׌PFdŚRب!l1YԵJC#isG$u#H!fTXI%(4,I_hPE`"?p%IҔT o4K*bsUY L`[iM!jeA@C" *EV Yt~w ]:@%)U RNF0kp,!$ ?p<ż2? "vq^ARHZ@Tie#~2l-ITM[$K6ƓMkEϥ FlOuaV~ cٝݔ~^yXl! Kc o u:SUiA³]빣]:m̴2 ǾF>E%UB))AgkHkZcOnWS17ZGM B%[V/m;J[lIc`>'WAnD"+H[Ux%44xgggkg?^9\_xŤ6%VE ]_MJ34g@s-!+dފbKa:ZZc%dn:qCK^x ~@{8AEKJ>T#t]rݍ.[n 9 A,Kg例xDlPy36,8!5^т êrHA8aFn.gK'D,^Phh\m.FJI>b]fP ۞. U+tri޽[tW!3 w6j&|@JAeF"k{k7K 3[K3Pp%ɔC P_PphMt&@V؄H` me/xHNI3 1hxۙ-Hwm~Ց*>:G~{!oU҉{8n#*nOrPh:CoQQ;={?!%mLQ-<-aQ1JᆲgJR('e5Ū$,OdII*'Rɔu6ojIh@Omd?YBۡ_%c>۷ |\<=tY%UX0`=PMeTUZQ)1E)RSYl`װ 4.לrj)!brN gA6"rƋ!y/ qSޔ"raʪ1=TE0* K+P c5-= ҏ^>͠KTerN"(UYPr~pSTU|B8;YƘ~NEA8iKDL̐T]R>;Zmɕ߄>c왡k}2a<>dYHޡK`(6u^a _{ĢWo,Ts>X.q}NclH~ko:3 tJt`DshbDh{؅lbyǢY͍"3ߧrA# 륍]߳lOI_pq;S8cr2 (%E]/}h؝ˆM2 [A}x^ߨ/1?X1_mG?1 YhBBCd E}ZYGc eUپQkQ$H]:a$*RkNHiVRjQ.ښ>к%OGwH]qi*G78mJ(>eD2 l24}iSp./{zk7}LXoɒO޵#eg0aEV1@^vz1X`Sc ^w'v"ݝ^ߢdK"ٲϱe{ObY9$]}IatX9ZwZkJ'9r^pUSN15T) xA"(h[ MQ.p\ 4ઠU`޷oc}P4hgro=()(6]YgPZBbEVroبM*C 'iC|֝7mp!) B' ޓz ÷w0OH- S!Ɔ'[!mW2b7*bDj>7y5ӃӄDZ}.[ֱr%\I|\5 a+pت\zN,bpFԊ y%N{;_(֚G;S2*$F4(3iki[a{ʦeT(rw34ArGp/D9J %#̫lyO9 F4()o+ށV kk GӲ {4xҠSE3Nc у5t(Nϧ_.}eĻkѽxpU񐾲hDE+|S}\iJS)&d+s.p]Qq'Wj,I+M9J9咈BzU6Ȗ+$s5¹*:oCq;W\>og~y:slH"= :P;uy0VMlbuPT]D,&x"WhlUC8[A0NQDl0U;ӜxUN*j1Q9uE1 NCH[W (Z @RQ\$fGo;9z|?₟Ewcc2V@PhRT\15;pQ#du#nX$RD!&>rԎBWgbg_Q3FGm iu\s 16&6#-W<֛nFQSn=?pc`On_}v-i@χ~|x;a=,z܆?{wHZm4ߎnhf}R X5f1.kl 9Q.q1۝AA X;zóK4`ހ7`3/u>7מ?^$89|٪&rWgsg ^4&JsQfߨ5?8U+O G%${h*DK=]^Q*ĴG6onEHonr\=p4Wᷓ<_EY_ooʎcyw.0 !1]J1CښD׭'stEF>A4D{i⾇![MX} z=ooggP/:'g}9o!X?޿>A=x'wH2 Xԃ740ZTJ;sES'j9$,1o'yZ{%:%7WeE b?/}rKޟӣ63vZ-Ugy G@I4U(aA\#O44?sYw#~ݏw=}iyZ+DЧϘD*%kAQ,R+g 1h[kPapF3B_,m8F&4vk)R`Q4N+X15x#rhNcs >7-W!}w7*xzh&\肠"zRrMqhu'RZ)ܨBL\^0|AQ:s"9njC|}貓6@`WRF{ ui}p@{:LBؚz<^#z2x1D&j[05BPQ-L;K4TbiG8(Ɯ%; E5,h;tW j:Y868xx42G ڟ+եĦC#$%Ē q+{* (2i݄(ȣr5`~1e# DTmId$P#hdeopqn$Ny*1uܗ3qy8CDԢ!am@ tĩ)rA pF"Q9C`6!m>)IL1jvBC#Eه2qn9:IE oF02ox#gYZfj\AD$1(Hѓr$3r1sZDbQ e,'!@~ rBbnEwښ@^..ҍXs;o')YQɽ83k8`mk\K :a}w9iA ->VACC gE ?2g,qX){D%%xk& G:}ЗbO>uyd`1XN- SǞD7l61st6` Լ3!kq6jROfZ0YAH|'oZ1حND):aQҖuhL窑hs;襲ГwPy"J'yi oFWWЗo7~_k}V\ ᅠAț{:6P6SB)LJTaUA) =B: +THu,ƐBnĮ7.QZJJ_yW[Θ1l>6/+*=ig xCE#z842soY:TvGҩy7~]'~=T+lTҤr TKWv9kj̈́jxY3y4PqhJ V-Q>4]cXǖSp3Zz Ŧ)r!l9k=ΞUx݅G[ZPgW8Z7sjx9hN1i'D-FFʍky%.k)wRn^s |M0ۢ `]x_ Ϡ<"U]QܲQQmQd́k&,2];F\aO=ɟ[p1O1xZ!=TՄ\\3oB'{˟:c;#\C0yȣ:xO,VOUz9M}+5'_^T 9߻W\{4A"KޫŝSuV;gcT:BQhlU5WpKAX/aKzÌL.V:P{+7-x#V0ZQ _s)%Jhe60o4Gfi46ކ((:1t)/l!|AʨKz E,5ZpGf"EĖLjJ Yk-_8TW}@T]Livpʁps׆.S6m [x'<Da*JQ 0Q\證Dy1e56a-l lFJ>{mr=-sl1›@户 # 8mGK"O̩5_~>4J-X$DTDgG8\h/ W <xy !igEgwC.0%#khHZכdO&sa R;Hm J~j$7,ߔɾ/N``E :%^|@vJr|(lP1 #Trଈ (iy!<^aIȱ^C0:L3:QtŚyܡpw>"c5&0eQRZ@CYKXl`G.qn/HϬ%w?(|쁴l@c痒~-x7+!O2F%Al 1(*h塂aRيuAO=/l Y-הjh"Ĝ0VZK!FA#_0k&{e/y#Gy_%6à>}$A!~\vH8/_2E7?έYE.b`gݗ`pΜƞ8#ɹ-dKXHc[Y_*Z @F S9U9$P'Je.'^pJג)[ܕϸGY^yH r Ar qGhlG|o}aQeRB% 2`ZNnqq#GNjXxTdmm&j i1$-P.&(ؑ5n<朗/.Z-| ;ZkoKڼp]\3C/ f>J&F)U8[CTIW_^:%~vftyu3k :OFtU;tkq;W(B6RnaF}3o)+k/UtiS`!OTsjv20lwGNhha3a$ݷQ7) Φ= F KT&"5Ed+&.ao7T'SO**V:uE(ӯ>55NfEVaWj[3\E tlqRz1Lv=5wfAaHa?`pV.4j袘|U4"9I%[J¤ty,Nku FmC` EȞb%F]l"LH d[{oW}]N8qߕ;3Hfa Uzt޺%5=7xʭQbm,0X]LHWCjy*6Gܔ]_\}_q?篿c1,N/Fl..k_$ֿG_#{Y۷{R̾LRs&uyuj> bi`.mY 󳯋eivg'o%Z?[|+g|B/E%faZ 1R g+z2OO,&g+=?v^*=\WaRnam5ZYMw1s"Jy[X]< aGw9iڡjDT6USP~l2<ՈZb*)lP;kk=!(895}~5Dva@j{@m&H@H<eJ~Y4Ot\cƚi *ӡud% 5S!-ng)YF^WgT:*!hA-ߜ4V97{+B?0~xgAHBC%|[3_&go{Э>CNh-PTL* Dn!mIn\Ycr86T6@ByV(IVQr6ju2ȝdޕ`l~8$vyCޟ4ajS}~Z+_4lr4 /?v| 6*K(WehR>.'e7Z?ˀm[; YCj"w4`})O\.̨y.%:;OKp_ Ӻ/?oWK^_.ߞOnfn]u_3dvr;5Zi4q[J*4^Y |O.MQY}q2 l>CFVcP L1UV8"Av;É%d~uѼ_y;:ci>djوsFAB *f%w:[VE qՂj4hS8\igq=B]|g{/?h%SDS5sq`EZW؂7*-)'[$ϐ/ڒ]M,y Մ2u; % p9dKKϥ[uxfn?Jܟ[P i >97}R!,MD9ؼy{5" Q4N=XDlc5ߞҷKAaKξVa8sbyıus?9,ichQ lq`[HU˷ݺ{(㏕tZp5 8{l1L|5lm 2.2CzF24A=r IIB47ϭa95Λu4k,!v K6 %!N&^Vr "E ZSːoDl;);^4_؜Q&gr6 <[J!B2tʭzmG_oYFy?<:ʜw7 (tH|ujz @%[ -oH,QP)W ٜ/Jr  Ʒ&KeQXTI@[FKr2xò箢vLwƫyq@!_C]B-n&TA`8U[+*z# |[+%a 5K8<< [ۡ@5eN:00RcUVS#ĤH Kv pslif0Ȣ'g{7[mh<ƜkUPc *QPZ,MOA:rרkv9Y# ~aeRzZ/T3M-{V:ـ1#l U0R!82ή;tԉzI=T[Qp@%j_6~2 AN:a"bD,@O'9Idx9e#kҹSr I)+v&^+!j&=yp20xwfA*cmkw-غK!#DUZ"%*nJ1B+aB"jE dՠܰ~S'KaV{4F$NzuL%pLNF˸K1uRfR))jth6e-H*Zc(5(R`!R4T_a|YREd0քRtҖ7\4jsڅzފ} [ L;Ptra94ht'e_b1QgxNCR`U$L+ՂrW;Uox׏z1R[{K5Tb}e.F2e:Du9;cwMG霁8DuC.{ء-~#+Gl 0d*\dF3[cu#xu#xu#xu\<j #@ŻwH4䒶X䂽,lb b֍,)<p-Y5j[{ƲR&V=U*"@&D$ щeSS?Dp6eZEil4>($G=5`9GRQhޱYwv7o=2w 3m7l-WOls-R%[BPTW%$AM*ZLXp}eGR%{ˍMs7odkDA@ mӯ^Cv)zj"A@DqdR WSn 2X0qgLrB)kUPIdkc}VęcY*OUVjz츧{C f-$M#gGu@[BH$`U6VmS I x88Y@GC?4o:FƔ4厱Yw0zv9•Wd2(8J) L ;r0ϕR1CqNYA JMv4VlO8J`4=a=[IKrG2P R[ R(`ޒ`DQ*z HhYH#0%IG2rrKw_^Ͳ"7ΠbN L40(e% *[*:߉}qLGG=7}DKR+ Kt|2 0}*eDי[AOg΃c/0s)f9J (8:.QH<zux<:>sna?ZiZb ̊a0!M5agQ/-},]7;ǥeqK-A͏ӷq;T)`7w TKYu8?,*0N:Ϲܮ^4s郉nl`ɸqGWW0վidv=T|0L 0,2Op^/J3N\&/=1rQIꈆy^YymJ88 RV~11 Gwݣm:Zl{ V(+u QTbK9d=}f3<˃ٜ}/dP2')Us.5RBDiCV dGmK[]E4@ <^!Ѡf!?,fӫ+8:mDtE5 Gu&,I){eKʧQ^OBfHѵ5RT 0UQɧ9@p@_.Iem>p,ܲWG"~ͫ2+;:nvs/ <" S?AZsU[5|Yoo|&_ߔ}R9xn){.3~kP+&ѥ V3A`:(EQh`1Hٛm0·מY\7zy4[>c!uZזjLTy]=N -R"qkITy\/6?GKr [QGo|&% ɈxëB ZyUP.\X+/(j !g!/l[ϗt\hDcFd4h AIeBZy˜GȤ* =7y$7v/xДvԓ'"'NJ>C{O4xv37"Mboke Mufb_P) f?y#'V:r,WRfG D;%$w8zƑQ*" =x$ٓ#i蒮85}8nEiZ yrÖ;K}GV:`5IQ%cw3pR#\>OIv9P6@ܳrԴ`ѳ("#a*mM_Rm0"&hybe@9dEt୴몈uTů%YufUI &X_UCDRգU$ͪ0OM5+-g_}i>*qRz֯덶^zғׯWGUvT 'e}Ƿ8FOȉAl6>kaí /\^{ph;}JF֭_]Vo$:Uf>R1n\xlZdPd]-qc,>>NpVj^ lV3Wxqдr3*6s>ކ1AҺAxJTx4'Sz|>_|bq/_afw}sQY#hR9 H+g3,?8F[3ȍ(qƼjb^'˱H؍)ґPcUhd`h`0xңj,Zb<ͤKJ&ń]Ԥ'c;Ȭ|0=䘁.H ynI km+ C'ֳɣAvqKϓβE5Z~gvV3 KӝUp.a+c<<[L4EG::;"9MutV׊vPb cμ~{x KG>ųHi!ACV-HaKsKDL;ZpK">&^ m7̷霞z<pu.KMtjPe-S~8 0c& ]m֫ky庮5qEQ4":T=m)V ӏ+C@Ti^-q8]>.>:Gb=4M0>v °M41=0Keok'zY?^WO"r> ,wo_Y1Ӹ39F3c_Cٰxh}١nWE(}'9yV钏J/r PT'n%͇5ADי  xLN& ?Ki})yd? `l]1:xI'S_N/N:$ Gҍ$#dC$-:!NCIZ`U3/ρӑ;ͅ2kQrcT٣Nr-(]J3'ʢ]bOpKpZ`wDs/s Q(vkĪ'ODI!FPi hHߠ "-{gAZAWֳ$BTNDDZ~ %F"9a:*B"OVDc`$x6"9q&3А;$6𤥵ސ% .M_FfD`* -=BAdERsL9Gd Q*j/Duh4)A h<}C;-J1!2p<֕:P 4# %!1%FyifiVQ^1 TׯH S>eZG== >y/2#6ϣr{uytz6܅OVs2N/65|uU/ړӓՏgSr-vK{f'eO"%fE7hF˼ukd>JwGG>Ap63a%!j|zՁrX\muOmbB6}3{3Ks{kӽw'+ݾNKF6=]z_֯jrM|2,;yV.y*Ꮆ&8ղT?-_S,_{Z+N_=TbHR{7BĐuwʬnB -YX?F{ٹa7\br=K1 iY $V"r>XE4)BLqD^J-pP=76=#t{oyj.lL/gzݞ^ l?cU'%[aM[=Enӗ_-Ϊx#ク;=>SǫG׋UVv:[ѯWGoGKۅ&R\5NjX)|:Ww/~}{u%soG畿O6"'E !]=ib 2$03tW[J4?S`}|lug%Y#Jζ/6#I{Xg2g[HFԂhOS6p:= ϯy/W}Xm*^c4ڀAkH78 SX7U]aBvxYb;訪!hv~D[Gb2bn~̕X{Z %C R&"۬Ђb& ?4ARIP3.fnyLM,©Mpƌpmk6d"GP!pš#Uh'eӱ0P^$m>=oX<} .6p NHnBr@u(D\FZuM@mu,fd#hEW&f5(ko%fP1P&Z͛@)>T]]QV)R4.h…\TwZc7nm挲^ց2!Hr<*DZ 0ͯ|U[wggxai _&w~ZlL pi o~3ӐlPp.nİ}opdIV~k K6YkI ['=^:KpkU,e,F5l5H"π> P9B1(ESZ6-sr}S@E_3TfI%0kFcrO# QG߄ j m'%&TB H?Y 1TѪ& Ҳ3{tƃ!lc+bā5 #Bca. 8xYO^T:(NVCmVݓLOmJ6ec8Xn t$ ow!F/=ny`r9"\JRrzsh Q0 kaǝ=!X=` "aO넡kVItZ;YUbU|6chpm5f?W*!Lgit@a&f02-'Ħ_VٯsQpb3!6%h58ʧhTF[6QZqg0 m)AgP3X c൬n⊉:Y ܈2m{LYXnfpoP)̃9G $.SKG[i7?KDcjR*?v{m0 1&I^z(,ɬ4)X={^]^Mt2o fcڭ&`kP`/6B#@YIuqo ZT=ϦLLy2治:+&DW9Mdx#dx@"t|<zWxYb/ώn83Ȧ~kB{uOO-LWߦ!s Mw?_Wl,Xm lEa, ~%L}9^}g_W9ݟ_?;'A^/Ww|sįΏ.43ru\qݛp~c>;>q~/C|K vh[;}G :І'Sy!wX>E9ߌcg"~9HZJe@y}p=y_p lwXwÉ<L*ntIhsR<ιC NVU;8>_?Vwt4kc+>h 6X־`uG,1{\{Jms:Ք/Z{=ubTQ'ϣcڽ;z*>!J qhNBBJkb(SFK.ڣG "0iБM DnJQ!8}ǖip_%HPyhB}`feɁ+`F0Pȓnpd:1@ 򙿼m D i|_:3BԦ1 ^Hz"ԶQEΧ\6 O ~޼2xqOhR:8m E?Wa(Qi0ڐ2O605._Eq}^<]`\=Khӹ4`Ç^ʖY/ZEԵH}Hשm 5{c-[ 4+W@{?ˏR"`oݤ vZ/׶1(<·}H=%`Ҵuk#=*!i@t;.T@thNu %B!f,FoM6R,X<[.VD.wXR=P-&s@F:~mK{$Ȫ !0mu#&s0K`"JPIgA*dcM֖l̮;d}d ;nM{Q&U2  *͙7:؞61٣ nBzPemS_CVi=>ͺOj.k6Z7 # I.2ɮMnDY`9:X(NКI9Ji4O/yS')RZM+[Duӆ”M4rVnw;{.o7[^z{~o.~hw/.ƖaeY>B t?/wB֝[IҘM602 jKt,ZXT8RcuJvϳ7;qvtu>Q~8?V: Yw{]=kEo]Xp^o#uoC<JgM_=&]ȁ{mt]ܶǖHEYFwN^r>A6ey?uBXZwݼ EAWr~ ruLYgAINA d"l o~-%w`bwX]{L|{wAQiuZÿQGEr|X1y32ULAx?k^oSqo_'1' ~ osdqۿU!wl2l[9fҼC3hDJD6gX7-]q'kwGb3/v3Q<(q\bҌo-T["~s ?hQi5?|N!sCکQ[C|u޲$BJ >,?*BV)r**Ė.-21e9&ܜT& t94\2 >AUʹp "& cJFQglrP)YK*` Q 2A8)xqu ҵ:PvL,@25$DΑOH HS,J{dN$a\X TSX%8; zaIHzдHRՔSX%3X`rHp[ t#`TAlkhpxT.ܜ-*XJ&&"j#Jh1;mTm&7Q5eTX),͊rI@4YVYvxcaʀȅQ 5hl +8Qa-;D5( pc`KF,uo HMی k`Y<($!|Љ S> tJg Avˬ4KG}-.BkxxgB5@MQA]VL2K6(MiEu\Xf­-i0*wLwPAyFM@Ԁqd 4*,km1m(RʴXʉ;偠,!0 ^]Dpsq'7yy6ԟ6Ez`¯WjVjR(U%2$5k#UP-;w J!DQ-HMN@+@z+c 1@-pV&esAsWCXY3H2уRjk6/((vXwq?W8g<_1vWyOHkbg c@ Ȅs b$Dn> gtF"0Z$ XlP "uy/\17Шpe9"? ,,4[W  lmZfI&֥:*ʄp0HnL 9-BxVp kE`T;ph4hTըFn }  &;/ȤĽb)Q&A %+\ҥSVubl}a41@h 9)|!O5А; PPk(ɝ{~@X̣'<-d#@O5݀6.ڔP>5|ulDi  瀇U@Rb-x& KzTPbG-kAD5kQx,(=u:2Y$ ^e #͂(GsZRZ|Ep( k#UXjʱdPFh%t@GF`f\X1(5ި.İzL$9siM7a"4 %֨qa kil蠙>Z` dQ jrgP(BM K0Fw ?:HLU$o(6M>k`A!qa SsBcUwmI+B>.r_qa `oE_m2!8N0}>(& [TIJD}Tuէ*rRfY\vn陆v@ pwY@)[a1d6A\tL^1볶 )"XĢ[mi#T 0$5HUL[3| R2;'> *2xueTY~[`Jw{P!Ai2b JkxU׀&|-5cǖW(a*֯>J"R) b`ۣ|mnHxa40+uJ l;eFX E2XGmWVOPmFՖueRX"mpll3@^ѼuRϮIdz(,H)d1ZCg_fӏ?x J@r4 Xv>V!/ Ɠ?5qXC{r w6aD1mF ݆k03m'd5o.3޻OOp¿_Ͷ}UxPK``6{%Q+?ȉƆ?(VB\K"rB !'rB !'rB !'rB !'rB !'rB !'rB !'rB !'rB !'rB !'rB !'rB !Bv@9̙; WCAhx!(焐rc=7A;0dc"+xZp,rVq= ?L/K{=^솚Kӓ)"(,rRAFs;ڴsQ3$嶆T(K Uafw{;_ ly8+Rk"Vѵ/`bh| =fiE) $Ǵ] ̋ӧ2 %cx]mI[ f};.VH{^+#y#76 M@# 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y 'P@y[/;$Pz+ 1J3+jSQ~;'Pz29}XGfaGRn0RXCGo.' DkKk@DooF}y²>PuQ~Ÿ2_J_>on qf)63]]'x A8YOKhhv~棛tOooS2pѧfL/ۯgO&sm5rsC1cs8IPgKg/D)s>OswܳW9?=4NϾT혯iO~JM2~'i0mR9&jtϮr|tu9\+L8he=|tvy1X%3?lRY o79uJsMOU9..~⋹o|<?z;}c绋-n]'o'kemtvyXy2w֊3. ~7Ҕ0ﯻoM͏ϓ 6ƾV2|zS'r=5qmZL[;~?:D5XKTcj,Q%D5XKTcj,Q%D5XKTcj,Q%D5XKTcj,Q%D5XKTcj,Q%D5XKTcj,Q H|ߢr a3|\wCfsAHX!нJHü"ЮS8"2 >ÀU N2nb).Ž~ ) OC~K0,k{M|O6*( bU•qUWyef=P!E K uXi-(kͶ\2/`L| 9L-߶>ݢY퐞#lm)b=%}I>aq|8SY[W/Ttxހ$:ZDʦ 1*7]k;u,uk_wh]!n“Txj v0++uY"2t~tBi]7 oo|}s"W4iHS Uu|lh(9/ ܶ*(*ɯT&#{]Id-$"S֭kO ߸QKޓn]Ew={jKV3Ʌ`fUz11G?()IRiH0|/#;\dwG1;{YÄ:aby\`#'壄_'ÍBXi69e~ze7T C+04yA˚2"#qQY  $ݖ+{\vĪn2'-tI*d o1+:٪cqkURNg-{=CxwyREy,uQ*֛VJ݅czr-óBĐ)#KH}Otv"DgmL(ތjy͏uoW&'-;Wr]AMtS ފ )<(DRHf R[ocgj՞wǕ~Sдߴ~Ӫgf_]awF{i{Cd˾`=)gCU|4BSH+V"UlUƔrFn1U6^鿌gs#zpԞupۋ\Qsnz1$Qq-o1,^Ug6.YΡ2H:BS*v^)+k ;+{$8"̱LB1Bhm*jlňr-{^!)/3Yc= XD%S 죵5-Ar[P.)ҍ]QeuFf &+Έt4BdQlI[hW^!;^tݞ1_tzKs㱾;{oUR#X"BA)Tb0N9_B$UmsrȨaYN@X‰pnVY'Y尺(J?K6Yɣޠco1+ÞqgVɜzj06aP1oBwB\6 % b"UY3g&I>K:ZӐ>eN&P"/ r(Z\s;`y2W#m BxecYV5\bTcR~Ecow=jC!N+S%<2Xj\,BTR Ϋjw$GԽ3I}h^`۰03B#OnԐ:Z67x6)Rb)jHVEVFxa$B. KҴkzfﭥzXf͛YTKDX2j9?nO^QCތ98%7#Q2*ђgY^z}󲉟O|BYP|jckncm=1&j"sDZIN9A8& !m)2"_|e7@ciX<Еm1**m2a !0yS&q#H [58pׯeWcxV/w%3yZlx{jaku>2dT_Z`eՃBu:dn%|>nWy0OIs[yVt(۬y8p5Ͽ򱾢V}9W]ş㓢/%FWt@ q-﷾M)aw݁@xh0ϻދvpgQF셃ŋ9αݟTT~}__;)JA$FKTr.j0d*d\?_8Sll 0=R(s4񺧚8=" pO)ҘXb(#1m:xIF4TbC\KLX¸5$-3S௧syi;EfkFňw1Ƞ#aa0Q[+5f^4á4 Αt-m}|Ę|j>o|`/ϖVf4U-]'Q1†qFCi`2[Fu*1L 0y)Y w* r҂&./X䦝7'e2.7P3 7"F1Ki'F*V$/',86 LËHM^rZq4Ž,wt)9hW21 A0又(pb!n@0b0,m2qU' UW`:^zdX rS{uJhMMK< mDD}R%{M{dߔd jIaH|hgrO?AD.(F hBq!=H%q #  (dD0Q%𑤖)8I=9AA`f^(TJDHEBsmLN)xA!ڶޓ"+@{{cfDp˝<6(|\ D};X!h`2j@"ˢE``i)ZYUoOǿ :|bX81'9FT')Ռ<Z5KZ/TR6"dX D.BF1U.4XꭱّW~D^;`c&*?:;(~w׿=ůUNdR?zoh|S3f߿?" 俇42bAY_O󓜞苍*UyN /Zt{/&۱/kQp-`@a%xnK_OheV|+)9-55[⌛Do?Rg[WԢ_oe.N ^͸a(ʎ+%x##|Rr xÒcތiL,t@!C(~ag_TNYzbFzO3/!EU-s|&.Rg@f'0L$!cٹƝhud3xՉߦ'5V9)ИzBs&?x?B?'FZj9yx_8s<O$9屷IϣCG'ǘH,R.MbNNUKa a*jZʥe4yh)+S6L!;  aX>k@ ڳAy)-"`K}b|,|*1^FkT%6Pmfƒ!F4:n?KHkQ] `A`((;i7) wWm_94(0T)1y"EM=gq2Uc̀/m nn&0m++ZY1@O=\-8 }yizF6"0à10v[ٹݎo/M~=An>2_>?o;?=`?zlF3K:xwlC׀KzEǟ"n,_'?prvM'ߟ\4B /?2xqA8*!LF,~!WX8&Jyv+TM1 h997"1De q((-hdu~y#bgᄈ_Yc vQD46I,36@D1Lk0ys&I vYND覄+vq n^A4f ,~0% V2d1y\p)dRDc"@=<" Eԋ *{꫼O^T* @hTPI鵱2Tm%M ">5 ,A2GU2"r(pP4袄SxǐBiX=v` a " 1m \;9$*&n%ї@uM x yA\f4kChAsG8:W\{₉SuA`PBd"INsb ewf F+viO.G(ʙh".-Z5NۨARSs`z{KO LeO9(K%m`yJ;AI+ZH ]8Bit,=' E#D%8lEh'BA]3;Oj 1E1aԩ^]Ðt>LswBsx5Y٩ S?q Kt}D]wh2؏"mH\bFXb47e*M%}N=^$<[$$xL?yMlY$톿X.֬d l+^r&fqL9wT稊:p?C!%-l"GWew:~t'X^9xٝ;.h*A^`Zxҍ_U:QvM Bh97[~l2KfZc$1xx9 3kb3 unct9Zr:|3T~h-6(۫ Kmc#VɭvO3lv;Yy'^Tl/cZ"ۑ_饽ERfK{fqYHT7i` p;UiygEQu/S/n~P]L驗 2L:<=!=݅ZHp(q#Jl_a+_Q ruX_؆.z"PIYNB4KOednf]I_Je5؃@"bV9&T$s "ak~3Em>&} >s.l&Rbw\;jvZqάFKrW&dV\VapΐzsOWEΤ^ܥkÌ, IuϟV/6U( Ɲ<=.So(,(cC(#nx;W^_ڝRj`k6RLnŚaLvN21`Aрel86^RXbX0XrCqӡP@!+plס3xY.-" C.eui:\Qͮ?2+\o#,4S}>qbk7{ ©9Kq$ Vy3J77oku"Ɲ3OqcŒ>;iDUo[GH{T.ElvŦrZm+ pbH뺄dlA k*8D~*Or5EHxKB|vV7l8mESfFef&6 j ͬVHݔ)*䖆R$tS>86dJl@H5Z+2@X5I2:֬530(~%{w>`PuGp+fr>Ƅ uc g5?\ nϺ_-Yeq&ژy{Rd&a>jiCv @56w'"um-Jil.B!!#F)u`Zݠhp~7ƳW(@9 :/UMvs:΁%| DŚYn] &Ao!fi |Ŋjm B%9A FVDtKixs{ZU'iԪJpockK `e8t-Iӆ@IZKoIGs%-tշ!T)X Xs TbژSO#i1IL%!(I\Ņ:SN桱2dC͵`K(!"dotLZ%pV{eBѧ_A%UR8TR"-dzX VcSՅQ.erFHyɞ(55an>+}\^5sɳ]_"j-;bs7DZ¤7o7#Hqv@;7ý/&FV:%rB%rCv&s&Cy΃rĢ8k \r/c]Ԓu)^z{1L=w\TF6ws$Nѣ`,z Kl'P>[ӀSzUJaERC!-PMKlAmgkYx M.\H?9~rO?u󝇲:K";l !쳛ɞ{)=p4u[L8`%llΤh& .W)YK-^:j5p5\A.K;J:cO233yEA[ؠ7q!r-1'h$Poֺշ2ipX w{3L97 HJ]q npN\- 8chrH* I{t5$A;"X\&l1Ҳ SE~]WGaP4:p kդH 1l6 껭#&nq6˴X7~,jyh񖢍iR$ zmxTSɖ\ #`xuFy+U[7zw&5o5CO[uu/4nxnjOq["]<0T-ƶl/|h^ۚoMΑ,!2 E)"KZŦHB yZK,1PչT?Eˠ$"@E)RUKM *gG76FuB s]71~l4i$oCB{2OySuX2E΀Ut4]ҮV58@%=x$YkB0 }0{`:^-1\VV"ǔ7ǒb̈́ڠFZfg͗ ECPr{=ʦ5Y B>|*닐Dh T-χ}Df##`Q ՐY:>\#:@I'.ɐ-մF"[ҴRj# }5o:yGo=~1:6R.{qh;.|գȟi6 iACdd{.(JĘctNd}:^F2H@KTJ,(tΨ de]֮XkC ^V$siI'-ى7KXcMGqޛ5GY/F-rMΛZ 9;:\́5g80fݽreL+f8Y/דlإI/7e[ׯw |ub&WUbH4C[?Nag"Y0c+2KK4ۗ5NUwߦ~{2GuX>/vp?S߮Ze!/"PKdbIAF1I~`CBo%e|x.z^|D<vu NV;;:.̟ l]LZx@'Gv`Y{@-MrHRPH`b*%FZCͦHt|bH{$w!>LȽ߆{RÌݹț;^} ;K GnY):a`Q;tVCXJ%͖k۰ZiX}t}q8{8 AD$Wb'hxY'_Su޺oo \1\AzތV!㙶gzgYb.&VG)Hbm4] 6eBH"5DǡCl<{9)SDDGD(Ar3ûWˢ d #}aK _-@zW_G>n,O̿LgK4|r5_9Y~Y< /6Rf'izޒ/?哟&_F4cr)ƭoߗEwZ/< J fn*/jG%_c}\Mq pssj=/nx]8M\~9l>_W/_Y=tv>ft9ͿvI9t[0=Fxn˓ѢPyr} Z!]U&O.lY;HN& a]Hi3ɊH ` )W)hERvQ.]x}xfHW^O @Z5 lDb)oT:XeLo)Ϸ{u`svN6'ȴJl1_.AVay\X޻[Vb׫rmՀݟ=[#aS3a#!mL(;j\Tw<$0դHM9v {UwD#T]]ICNW]Dm9y9Q6`m=:{i ֢C!E{0,f?-XZ颬7C?Q/3DasVxp]u$Z2a> 9Ks7%nF;%Z~?gdkvv8\6Rl*~ d48,?PЅNљ./Ll1?M<#<˜N9S` )0c(kWJ4zuXӇoxV* ꩅYvSqߣO/9h},q/NCA.Yͯsj뷕~aROpv{麶F]wE^L,3cQK,a-VAx홰>R;o _OGuv9wYM]:qOw뒅B\MuZg䭧*('+.3<\\mE,IhVZ$3ּbzu&&qkCۃ?7Ŝގ\>Owi;9jS/V.Iclpc\ٻڣWx-sp(AԦ\~yHġR~'$L|{nMz`Vކ,)ؔek& cp9A7s bJ,Ƀx z#ҥgB_Y ;g,UChŒ2 EJYbs҈|uzq 5!{[5~낯<Y&CXgڬjB@& I8շhTmGqLI=3Cv {P8 .&성VXȾ'B̮Ț!YP9;e9E-e]x4lT&U,1'bVĘ ص.9"vtCMl]YmJ9  ke\R0g2˗k'. hċ,l~cL> ["'A~*ڤ#R5@9lW٣Sm0sgD&ZI6v=\ο6Rc\'GCT.J`;͝મY 1t?{H",C"sMOYd'/sk%#9bKddi-.[nYYxvf}+Y2O%' !IzJnp ay, !IA!J^n-2FHr=+Ȥ5luW\|֛]2KXxG"KUR;C&gqA9@-Pa#zg+Q[X *3cb_@`)@.U@QSVK$+gBg 瘸@Vém?eGʛ-Ua枹 9G!Z# nlfch҇Z;R%h\>\VjUtܶeDБ|LgGRT1eiTWQŞ^> RWk$DVNҔ 5?X%y]L-~fK1BOxT1mɥ9 VDR&~g7\F.8QҎv+y$lmUծ{Su7U9X-2wLi,꥜5Wݬ =w{"n2m-IE,MZdlJ6Ei^R%7ZKwOyv{4OW@mo4^O|OSc|9r;L.uy]yy^y4qJ*15gM N4J>/2 ᷜ6\pӇ;4=^ۡ+?Ѥ Me$qń|S^(|[?~|]|Oi>Yn8܎=1 b6&âI3mIޫ^Swu򶐥'5˫"DU/=^(qfx>3H2ꁿM.sa\ٻS2 R)%RK-/܆dŐ*kZi/h:yyq8Ciy,ȦYBe8Nh981eD3W3\x;t1SWWoM H,٢u4q굗Vrד)]r:\yqVtVzdjľu0~y1]\T/7C̀z6w$3ah:hG_O5}Ĥy6\mVZ:AYU" [:ؚh[|EbP[oϽ#{33ƞ=8kR1JqJ(";NŸ4xh sE ""ɔW?-!$in@@nYJ ,9 hg>,2WRBܜGh:?VHkDmUYŹx )aynU=ܪ,g]YZ]4J=}ԺI"`!D+yii 0VˁA6%Eщ@Pһ-Rvzf v2zYW(H36ѳ+'/ F!_<3O+PZyz1ddBU"+jHAD_\e F[rlHH:4KsVwσxs 'tqM#3,&pLY{.`|g)ƫnC :sWho0d0 z_P7:mzp ެs/ 5 źhq pUl $a4`cX/{tءc@R=2: fdEdу+"/L"Bz/sIɹBD[Q%l#}dQv5N=U[W>!N/;mAI_[K +ŰXAYr|oD"΢@S<r"?89F󊋢"/lmH2iTt\s DA( u&Y5de8FJXyrAgّ='rf\LJ!M%jrߌU(>½,FjԌFN#e_!Û"a3~73+i{hx}雅3٠!iֺKtl'x6er/N-c?2 "X2K`CM$ibjsvj~kRޑדa0,j%bmbU$S K`7I] 4؃w | &^lnS+L+oXex7cxߊ?h>+gןGlyn8#MhcNj盐+Nv:cꙡi:} f@@U}pf { i0Û5f:`Lɳ9'9aχ` xF/{=`d%%̞d%TG'28x w,^'N!8tNTZ.Uc9cfkW<^@s{F+١W`F&F. IIDZC<21} qЛXZIuaΙIp>9 'TQD GK} \BwiV>}F.>HqtDJɮւ7\ʏ˕:Mڑ4NazŲ*SgtryS)AgJ%x!x?};֜@[R F)AJPaP*bvzi%l Z1\ hL2Ig-Iu*L{erlv5aAVvOn"7En+M&䲋=Yjo)0 0\(5R1*NRN^]K}=+TvzORЇh-?~qj'$5#=IMq翰CT'YFH"\Gg +4ӎ{J,$y.FG-zp ȫ8iyI'"jD*rN[w4/C< 2Î'- >6X5Z:1=e7}\cߠeWː ۰H_t=ktأH(\X4OJ 5aaK2H19fQ^ Jp\%mklӫuMخG!nK"ś mJmAZгTNedvĎq-a?zpOxV2c7ڙ-hˮ,#v;4D1Oa4w5TdT'>T{!j1*tC.Fx.O< LƥX<a㴗՛⠞|DI/ Id 5fl # ."w<(Y5 3BFY86y'ۗr:WL^[:J'YA0lAT!^ OfЂ0LN3X:EWrJB`T)&ZP#S83NqRq&ΚCFx9 ۞`엱E~od5J~+)|}0R# t`4l d9fTB!DKÉ:10 WJ1$U*Vem(T;g|DϽ !FɖҿFh[BꡜUhfz8W'5'hM8'N 801vRE.0zCbaDYkv$tÚO) :ء{vR!EY::ۺHY(a]Fr>F%U4B4IF烋Nܞ #zNj1p=Ё(T{ӮJ݊kZaV0f!;hMNZ1'S tY€+[p'R 5*hkm#GvC"Y4X-fE[r2-ٲ;vv{A`cK!065+Ru(h[b&@5 yt܃ِ鸯p:ɘqΓvJ2s\\A?[ym s E_"T5Wcoc{jK !2kbLQk˽#7bȐ h"jTq>:F6E4= &堔IXHPHsD6υJDf!y%yW_[ݎ ]=Ck׋p\REdlB-fHHk hP-DB"i 5ؤq"覝ONN>#Yr#qi(Z]dD-*F֜ȗ#9Qֶ7ig{l\?6k y q_o^yKxˈJ0 Kɘ$2/pRU#BmЪ?,ΩqgzN,*+*G gu2wOXl^{QC-sqsDJ( dQ'*],*蓊ZK'SDm***&ټ9_RTɜjeEA$2db=9O-NV_:#0h!: "АY_qGgr?Z" ]k65B)Zc0 `?9!g/}خ}:g@A^yi#W`{S'g?#4Ka:*BX+[PSx#x#y#xR] Fje(- dlbV+Pd^M|H&}hLBJ@!䚺" %Ƥ:7woyw,;6-Kll[fJY㻾h\d_iWh',X:ἧϘiPVدd?;EC_UE];QjIV?K Y1uɅ/*5\+^SvB jܼIL &XC3/g(o,Vzoo)%v oNz)u [odI2Պm}f|U7!{*s5`(k[(owط|٠mO.gn-ϕw5yGWEyp:3f@0h0ɚ~&AIFTsP FВ@Sf㎢&]M|6{tC4o-F\J$f v4>``lɕPn%;PFbiQUl0&C!WRVR:G?mz;o2c4CG "*IenNҖ21@!8h:.+YDTLʂij2Bhqɇ( )_$:5Tҩl'^Ҕʓ& B!|t^N6NR&[}c%TCPdeq'VJ;F\4wx))2ׯ=m wm9wA2j>u,Y hec&%MߍtZrrm>Pj/6Kk|7uKkhY{ۺ~D4o*ON˚OEf1FEvAwuC߷L?n^~^\6yF?S|V/IctQ=!,xࡻ!!K4#8P}#SF yBrJN|w; cB191*wQTTW-l2\J|VJZGM6fo+eyy4S^ K0&i7dlë{S& @}.$5,[Qea]<[0;Y E'I\|l#%|y^eۅ~6`Yx5 IY6ՓłhycB`u;b/=:oɓNdX9PU*Tq-ZJ h"O"w|WCDyrʞ z+;Hn+4&mXX.[s०ӕ9S(?vmz_[0.XQbF_7wl[wgÑͨ'bVE?I"n=EW #-*=#nSnq4{O(N~[{oj9pvYdêoj}ʃ>aMAv$R;QD5%B]VҜ߬KTiɯX\7_*=?|9^ Nz*HcVE6b2ʖ*[nxh̻/|-7XBS2D)ihZ޹,j* nHZ\PmuR}@S *)K+\LH.2V*ǞzNQj\\Xב yT:q-)J)gxni1JSNdg禂+#\qv^_]hr1^.*ØxxW]z>?^B/yTj XL;G_A :uggT&Mw<}iHd絫]V4}[{Mt<+ݝ~  F~ƀmӪw{EN*|Һ.s{c lmL<%ŶTJlCFSEM0\X:az|Xl&0ɌyE;=_'/o٠)d%7[% ;Reܵy:C;32]nwկ얊'1MH6%3V6YA@u/89ӽq]F7\w^ RA%αL%RAc &s3 YrI l8D8r]94]orBCRԜ%_6ưj2oQmȩ(4"K gO;A~!]`n'mN8o_nA%cZ>mDi`Dt 9ru%iK 3ZkN%B*֝4HĚ1dr,Fz(O(s5:5@=Ag,8rP|΁]xB |Zz,: Zd"V*-CbfwX`4(:%'ߢ$K#J|eY %]GTX2~Ыdms:N`ƵvFhGʘt/mM1hW\UgRK8,OGJ\l9Dn,Ӑ"M-tlhus5bЛZ'nkqӵu{NjI1Լʬ>z~p>mURةP=A)sd)%YZ Amm:Y-Cup 1L.'߭9Bl4j[wElY BwpDDt\E) fl 6HL#Cjsr%BnPa,2l}!@E~0d 8SHX*9#1l7 `y^ڵ$Rv6{/nm?V 6i$fia6cFe J֋5tF w:xrvCQDWyEm;Qۨ/;xi{Ѐit&WT9P5֢&Rɔ^D\5: ;B5yVf km49 IVr>]\/; # Z/?{Z~W;CH#PrPCiўScԬstw$9F;3b=a OJLez;SѴ\].sL!a%__Oٍe][*+DfETEhG>#%Ib LplABKm cVJ cʺql`0/HU|V(Ŗ0h0bv(rϢ~mClm ^]vT"QQm!Fdsf(9o<1"RJqܼ9E@57C:U5YĐqi$ Dˑ|tGxcEoޅ -XI`ŶOL b1S+*`%/耏hnXΘ+icp9Z\Z@mk)ʺH7K=>G=_|s!`LJ+055 PCϪ˛T =$U\>:@CcEh^{AfjCGF2Š#aqZ4 БCy*E^;𭙒.5G1J:5fK7m/jz(gLJ5&fŇbO(iM\'.p%{$:d1hu3QQJNpHi*:UHkVeL>,'~(WEP㴗M˂X%ZhhGfHS; &%Y,Tbʱ Di*S=^&1}О.xgؕ[xygCPb; ( RePOIAPb%څw]-ξ`V (@ID6謯)y9SGLrztyzIw u? vx+&d9վB.E)E]q/䩗rEB˙6aK?^rL`zқ =ņ bɴFeFV5##s-LɚT+g0U0Cf)!A%6n9~CeoNbhmtc&s5?0`٢fLxvVG\|iWU6+[^k⋲:W'DN}]7xʈQQU1g5lcic;U@:t .|&W2$ GFLF`nb kg4he- ti_amzp[4=a]V%TcƦ>]_@@VgYE5}s5HcH0Wڂ?\(2XwX.'dSK7 N\#AjTuR ,%]sI7^@JU7gxcƞZkOMO0g0&LndBߠx;\LNLY9TeUDiӜa< ]]}#<<س27L15Y &X0F:Z$[ d b]nm%;9o3$3!zv_}".IS͒WE3WGwQGt:r#qhnCBWb3# tmŵbg*M(`=F/=c{hof93" ^sSX8_gA+G6 UMA4[QKFFFFE)9G-aΤt=Uu.R1`삦A]!fK6_-*Kg!cL(JR,PS֛]@!'\ jFՎvOցFB }?9A1y!l*ЏԢ"BG#FVsUZ?ڼ*yN6ao}.{#\49#6ATy/piVK2WC,@Z$E= "tsF06`rOi8 (9;26Y~gFdSZ@5J EUbT+T(ِL*]1 v͙EQ~d|Zo24XsWTG*j庼- ˕)"[#r`\L%ꐴ x $+-B jr5wK33닻dIk6a˞ QL"k}PSkdb>=q ^\X6r٣J\A:u{3{mc;xyAmOvĆ-\wJ3.< ^y%6s΅} = ag#D>a2NZZsenX`s a2 S+יSN?\ZOo#St2˦=;>?@Xvy_ +!'s[5T̞t]O~/Ҳy!cmNKw\/Oco^v&ۛYwd6ڗGw$jNj:'X #IY n9ߝ|)ncc~k^4|x5҉NIVWVVEO&/^^ȗ6pò_6]-7=_e.f̤E(˦Y>XCCicWꦵ'G"~otfaoOd/ɂi hv7ƚ&io77.Mֶ2ךɲO%}l=$iCBG]uMf܃15[!'guz=e (1;jǸYQ  UUv rmVN0pN78廏ofy(8mUwIAU 6ZpnsD_2/BGa&{E;{<7u5V'uA2!.Dƈ2 vzUTeFŰߋ/KkK-( .Zꥎ:x# rJ2E 8omV[7%#T[&K,G͜皇D h)!J9:#k;gkB،š W6!t6X~`Uo_~Ë_%OKbT( Qa5\T\m7 ŷ_Es_ޖ/yzq~>C6ʎir;IE|/!Qq_qH!<{tqjؖ%%ESw0ԉtGp~M,YWG.p_f?Uߪ%+Y`2U`B(;0A0 j j*7p5Ɯ k}A`ipPx01OU+f¸<ϥX<~zϣàe;E@ݡhq6r"sWr}dNoJMPye!0]':2gU26z8Z`uZ37KDdz`nAY,hgI͍#Y؜" TMYpM*7mSWYsl;7lm7IV,sFuY$7,'r4FpgCs |_rwux~=SaS٦-D1 TedʠAs$Ĩ;lDJ;XHÙB,kgɨxBGN,{ !GB ( +es%&%-LQ{ᨢϋ_$z@) р*HC}b<{Khh:$=C&"!19)UrbnG(3%%q3*G,f%NZdK78Ju-3VkeB~AC8H"{ZPY Pf)

s)2^u)c},"u5p:Bn,EVah<;\d02lH#pyF[Q $h)8nXbrFv7;~XӁvR{]fRd9QbXKzzW %6@l`n4]çɕd mϰP>F]An`.AG-RfK$SxSkݵwod_D{!ݍ H.P{ݚSW:t+6q 8јmVB1;H"G.bXo4E,#jL$jͅXf]/k䩆D0bw g;Rz`kϩ;CxR2'Ǖ2ad+4<<3|QC[Y7(9bQ\ HD”K q#֒{aȕt9ԛDY) Wd@@TX[SRH0cӬ "$'^ۖdEl1#:y| ;=(d3]t@0꺗|eYSK83 _z}}s7!dI8,0pdQ6÷!J_`SA J"ʈ$M,{NlbR(Ri[bAAL1{tjqF^UI۲?L l*q[rr\&P"0Z%b߄' ׌:Pj\Az̙LdX) %BcV>.HD4Ԓ; Jr،OB.Y / E> v;{x;ǣy#{կCv <4 NQXW)Pihs[v[%D0GȈr|BVCpt#p2~\RTboiq~! e8ܣ>) [OaV:ߑrNsXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1#8,Ԕ% ¯ayZpXk-z 0wP ~9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘bWc`k/a2Z9,9#esXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1#8,qk%ț#? |h5xV|ra1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sX>t_}?o~}5^? mmx>.'Mz}\F'5=3yN>ù7ͪݿZƯen6e :Uz{,ݟ,.rTbv.:ӑ3LlmΜ32N :{x:vqghO ':]~%@X}+]]\.|Xo.><떍֘:3'(Zgs#&O\אusIK;#'>=s1r:ht=X_a< BwDQ̥m+vBZS(VK8sz FFSPpm'hOG" s":ΐmF*l!.߮d3/О%,_ B+..򇻩>z&ώD. 9G9@#fn}!/PvNܬB=|^{)DHyup rxCbt(xrr{az/;N*3:|{Xv9㑄A_]\ޅ`s}>c:(ѵ}t=C4~Xp}F6w\D7qeN\|d^437FcOES㈉6*:wjFGufTMf[z(-lZi=1F*bu*oz|O$@hFOu0C -βnvEL58ᵾ<늵G׌; >;WLVV<5˷u,NvhxDqyADaw^sax}axݦ&+ݼ1 (lWYĒP=Csh9è~x[ >.cΦ╶wx-kRD$ *3"DYƤuA4+۫1Q;*ww3喹ノU]Ⱦ7UI"[ij㖦25zKh~hNol{&0U)&LKׂG*9Ĥ?GQ >[?N/fTm(S2(gY:pՖFx8xO>G!Ouct}~^nqQn8V`v3EcjEULأ"q$LE+w6#;CVUV!N&LH75$2-ϞY[IMq١քU Ҩ[Ô2zί5Qie*lzI˒JngFR</smKE;"O:DAj沬A:/疟IT6y\l=~S!kN'mTf*&.TOAX{ Sj/Y}ZFhv"!)5!]ׄƜBJRNO^"7jfId[V jѨ0[q*aPOod9Y٢Դ1Rnr)igR7\gM(-"x BoVqw4{@Kfu)K NJYHZ8^QN0xŧg+eWQ`t§&lSl7/t "lZ%ץ0Hg o7*MSkn|7ȺCʮS7uZ6|;`+7q"*#%B$rv43q;n!TwS1\?uKYPo Uxs0T]?IO<ַeQ-gk<WCwnͣU|n[٘Dnz4N;y"FW_1s9m- 櫹4`ޣZm]D^ uChT>w^XȦh0x g#Uɷ ᔝ"$xJ5-My;3Yǖ/L w˛ejc#Jp"%du-]1|crkD%խ%!&[Er>l:%Iѵ3/ZwRȿػ:n$/n,yb>k2FrOBje_`V]V)d1yDԕ#"kT̃ gu !˱BV__NΪ@Q4Hգ #7t(M2^]k ^],Emmzhg r6 wѩb\X0抃t3ޔ9=5͒.ށ TN+0.]cl`u!Rnwk6)c`r(VhU(v~Ր7M&,+ЦkZ9K"^nh9nRhs#P*Ws<+SA5wɐ%8f1U1Ebฏ8Ucysxf2]1y9>Д1ps"0xɱ)ԐgzϦzO^:13MC*3Q3f3*b3;Z5 8S|Zsy9dBZdH>~/Ro>E5b.:ō'%3`z72-8W8-&tc!˥!\yxu|?}2”u?BJy_-O-1eL [RJ><&ϝc6&C~o߼Ykߧney`)>)3$饴rE 5\[r"i љ.8Ѓl?9wʳ1B&Eˇd>,r㔷[l?s'4_;>"9;W|#7ھNȖE|!+K'~]xа؋Z* ݐs 21)Zge\GģsU4.F7ۿ'V~g?] \5ur`~N5WZ|O&""8VRfbИNi/@1+}٥DfYsME2f vXDh" M";GK`S5DI%LF6'cgn4S J𵶁?\!.g" a ҇奣[^b@*L/lVul':c6S ɔogu$W'E=gWacD_Y=ӣPT+s&Vy9bk"!DSp"G\ݧ*OQd/ͽ"%N`)^Z; EMڢayL4ja @'I™H5v\l(Ln1DW3R7Jf1^JTw \0)ݾ 4|[sO*uʵZl"@/>.ۀI=vϡH* U\@:%(B=ک.EivpӺ煵WXX-{Q'42Or]Eb]$kd02 K5['o[NG&b)nmN%[+;GWDfjV*:q4njĜAL#Y= j 챊N\S6s[}S9CD 2b̒GF;k$9*`١`/+6a՝Ƙ`.8UHMq(s ӊ:"1pVj>: lXٕ!y`s3D[c}00![GNA\_حHCƆ*Wq$ %u "hj|N[h2*#Ui\kW9*pT/ [lu=nI`络Gu$utTwZVV(ٴ<fV;ڵY٤g{I˳tPSvgSEh.!U1gP5]p:) m杖7uccȚ:dWtjJxhv|E"(qJ m)kʦlp 81و# w#@^C>PƋϙ' .L_}^o.5S\PBVКUG4BzOfXk@W!õTsP)>S̥`LI۟oj7/sǏB9l< !93,jbEZ$<;K s}l"^ Y[n}rJf~Ќ=qJOoµ&c7|~I4}rܻ?՛?FYiCZmk]@m*@keoo,ӻخ?s#FZA3$5,e*{ML`>h *}kq!G+PUQP H2n[wr>*S`ە-{l*סmp97^;-Qܐ9D N4K+RoSz\͹3 h; +_IS%z)N1s`k0U?t,35}%c+P-e{djh#vґDPZWQ<ʆ/5IWo.ic9j\u19ƱQkEb8|3\m RhѲJ_\wEqv+rOy wc|>3N'[ёoT"*얃UBT$%A="7"^yZz2KS{JH;7EjqAf$ā"4a_SwoވpvaY"oc}X&q'Bl)uhR~q .A!y>hHAINZV+5m](UnWaE.B3!TURBh ^=@=CpjUnS8D3 Ǖ15%g XFf́1rɥ(-7U}ҍ{(P*h61b*H. w|t´B8^VcU[׉P6aˀބٶQtT!TcA-vL΢n8KW_=b0`n1c @'QeƹùoSobcH`31H_Ms@ed,3HP4+&xvŖ9Wޏ{ֲW}~b"-=H?Ny FȤcȀ4$(sC(B}UQIU]'rZOaޚ95it-I=լ-pqI{(U8t<<y VP}քD/YnTd0ćmjl@sY"6|P}b$TdP;Wf?3ߤ1\0d 9ʫL])դy/0_sYhp8] ^֫MñyY #ewzd6SJ `яC5R%zTdG0f84$UC4rzySQ̟Ўz}9p[^psatkzs.)KOL&,|Y}9" jB2IUmaUX~* ~(_`2 NkCK:-ipۢpx\{[6ꎀWEoU:>Z+:bV"qs:mknڍFQA]#.ieҢZ>g`Kb]1%h8C]Ut5$ާpW.%[8eR&$ cƠ HZDZNYgVIjズɘ=gN8W=*Y締7Q-.W9*6J^ OX߽V6&sRS;FZN޶6n/4%!K'd7]j_g!>Cc1͹P9L+Q5B^#Z_0)q$̨h"@c =?_Rs:*"/0b *j1Z2ʫ&!]S΢o*g7Dh/!kUN]NRo#xdl:R:[8JSa]R׸'kQp9y_[f&E^uH'΀ ԫq^~\dgW͏BFc٧kkQ=!tw)I->Л◔3~TN|7{"Bӛ5ʱGIm;pNft ƆYʜή9j=$nKFg3ԂܓJrVj&ۮ PXcg})@4U)ECG9dPs\i.=NmӺU)H B )3ZG~ G^X|밄[mVw~cJC&tEdR K5PdosXTC݂Ys+-VE AQ[YBgzɕ_y\zb,` SYkɞ ؒeI#ٲYN[bSUdլн2vZhcXKS< Az1v+Y$‰4ޛ{Lo&3'm9~A]՛el. tvǏ?|* ߧݕ~Y}Y-K\O^: ه}j,e6?݁x2{ʷ_y ?Jm7Ǹ_Ώ̞[70tw޳- VH-g|;]_}_8.n>-Eu>>OF7Np:"،xL#lxcwjas's_!Tl;vetm~_.r}-"wJ<¿N{ZEgDUT!u%m!_v(Š$,E[BuXdXFr/r|̗!gRu߭ ̿M㛋,tys;2>3?1INfh;=-S?c|y<,ϙ%ԮlY3 vu~m2TųOV3`X,s9!YdidX|]{&#í~3,bfG)\OGsӓE#/uQׇ%1CGg2zx> DkzV8r9rCg;D;ifJNZCӋrkNm`3, ޒ1K⒵V+)_\ԀU~j(&:WS#JEPIJ"Ukg$tV^U2Jcpt UTh!BRY`R*m;=6)oW..LPϧ?=1Eђ@mvLVʹtb^ juWgjAcdP.R1UiX,z_oyev.+u-8\evuOm'+AN)Ĺَ-ovK3ɏ;duS8:]=!dWGJ)9ܞP4b\h%c@k*ĝ6w 4^T[Fz2ҟ2M*zC3`RveΡJ%J&2aieDcd.^y]sBTR 3:)+ERR 1's,9')tt 5ꢐ5~ p<.=%Z$7tϥk\==A6bfٯ50QzVu9V| oY=jFVfL<LTkz 3fm,2iL:k6zCɳ$d5p{O6@ V}b Eߪl[ Iň`3= fAB^BB^dJY2h\êİu(dV@ˋv(ړ ,E$ef. ^H֞^ꃾ#H1lcԤjY>f%%YЮm#][~,znʰaאX"$di iMW6XФg:[wҊz5A˱6 t02+G3\m8wI1ηqe=IJJQYH*dbIIrR2Y* 93d-Fh4B)ʞW:ءqln`5l%s8s{;sح$aZ}xaFj;q<O'K7』 e")kn[*U..f$ rM(ۮ)IO6`qIߖsۭh J AEiL\.`J*)X1zCo]2Xv5gP:)d6g"4u eO罵"(d-wV)g3~6~ū)ϧΫ|靛`A'ZsIRHR?k6,bΧRj93mhez]s|œ-).Z+&1hSVi@pJ&"JK1޺㡬5j=’=d¶e)lhjfޖx@FFoT%'W8 ıYz-[zijz1{=\ho!/*X(R+m$^F} ]k \XfPB1Itz%*& &ARgt;o-:ހu:L@zqp<-.Gm ˁeo;5.n若d;ZŲ遲)uUK= e~ؽ>ڬ١gTĢ!szMP]SudvsHWwqEbXM!ie .)vf*J-=o~x/!sd[׆1۰.llZ{b.6Mu2]gg9-UK6f#"(*QR"rt·$m@kx=1{MOyR=ΎzԢ!@b+:QSlJUIIeG -TQ{ 4n@| lPDLS|p&S+P2Vj P9Rƀy`o!x98<©JvSK5,|ji9 K5_&Z/XAF:9658` 4_Ơe&P<96+L9 ,iJ,n8hwBOָRm n$㪒΄ 6aJmW6 y@v<6iC~6h (d;quqhe!uLeI}fo<*5@͌0W/Jui1]L&I#E[ uV:T[Ҡ}e#PhS^0FYPՁf @ 4(RK.g 30/=sDK9:)ƈIk/K Sz5l4y;UE5K~xuF1YWLn)ǃ 5BF,%mQHOb@R<qooeZ)Juxʁx %xݩy[ᄑ$lw( \U10F5 vjAyEYCQsrKҕbPBA.d.2^)LAa yd jo2|14#5nu\:X+yۧ^RI@3%a""!RNS6þ9a9yy9RR@Dح5kU=̖}2iſR0F PʅonC%wIm>mRiݤ,dhQN rįeP2U@xW#+VHk"jx$%U5(A$S**bTؔ[% D:Re`05&[0Z,4 ɒRA˶kI:нuH *"}flU=5(La\_oyd7FQr% Tc~ȑz˘_E,vn_ eg Hƚز-v,Iv-+t^b>L6~*Vv*i4zAX)c rH9j#oԎakRlNG)e*liVj!Ո@*4b ؚ%y'2S*GFl pvo{y?LiFX]N +2YV+1-),I+P(S3U5$JB$__'uQ$%kbW<Ӌgl0<϶WvA Nvjߥ*'ت Ios,,JAP@VBr2J#@ArteepQj#KUfG2hʟB  yRNb.6j n'ah e@Bhoy㕠E͖Z=dM/ k֦*B+w5fݐY|"E=z1MCE']uU zRPUXK*Qs>aW"o6ц(5NU #q~~XF&_D ^$!b2;$_35y22*Œ 6R"%1Fs5| .-&J'+TENh2f]J -ӆbҹEf(p^ 1f2xTȺORrRRQFjHVLd_y8fHfz3߬BqO%OVlǬY+h<Φg)=ha\3Yglxgv4ra\/zv`m+ 8;b<__bYu-9˫^`S,SKn`h _y0et5ZAM L oܸex ;}aAuu;/ |,d|޵s6|3Z]^B#J:ib&vfK$ ,ŖLi0?)K")ǒр)DAÖ@51PDCS;{Cǡ~w=Hlw@"]>rV'~Q bs"Z`31-OG ;=PJ}uFh] ;Zj (-#SN5^ioc !\-:Cvl4(X'j)F(lth66}Tvv;xi4*z=XO<>$u^g̚9dVGQQk "uCcMU*k[??=a/jsyL#Oi/ϕ/OмTo_whkqe6y-pFtxTQ1 S%Zmmp6kY8AWkMEofuAk5BP+ZJ rNiN=]zgV<۩ذοS?Jodݒsڍ_ |Ɠ\>ۯ\ke~b^w;p+Dȉ02HW'bu#J^;35ڃ3d Z(چ0%PXU98&?k| v2{Do6i9Zl3[-$Alb= ˃.&:r-Iɴ"sMIDJBV|rVgkNʎU7c@odr6gIz 8g-ZJAH߈/2$0h0}_;y7N;7@с~_}ֆe:&שl\n*r3佈%JVMǁlNl6$5o~CF@6CFZԈ $THjg T2f +VkaRȿ >`WQ۬N0.@ہgkGȘ*ntTUAFڮ_))֬x'pg%FG| h.\(UۃKAe2$%Cm1sJԠd1e[T@Mt>7㊝fyXbP0Pemw<= p N 1x؃kGo|4"ꔅDhHKd?9-ruĹO=D4V+@k$N5$V #ej;L5A E9Ck;X60eLyׯPY@ٰӤvA( br)eUA`d@UaYYO)Aj|yjT3; Ұkhe#׾?W[0X%temc"PbYI~|)W܍, lNbĶ~YK+\W+RtNXe«jt,jXdeM:ZH=o/pVdvuQV+1ˏ9sm*ry" -lɮ)j5,hgX:6/=b\\<{hS>+yPRX UTh\;Ji9X94jU$7zG(RHc@ icu+ m53Öޮ-=-=} ͓pi ppzl&G _vyf#3|!}فo?t__`cy)&@:?uўIbОߡ=;+&E i}-B-EXU9HI^{-LfIb߻nkV`b2,a/92T(ɦEY eRrXn{݌\E_[`Dtkm#I_0Q]6`Ƣ%)r=C3IR%;if=]5]z#s5;ㆽ=ɅNk&% 59nŏAȵUIJ_ w%\Òg. $Eq*nQSugCKh+Ƚx˨f~P`%obzVn7NolVd&79ngQU +FȬ59F5օͺΏzo37?\לOji#ѝyv?_Ə[8!]OH+ MX0 -%3%u 7_kk}V4s3M-*8ζ9@m!bfЈqT.9xSj"GFkiS=k|k;Z$j>% VEV@A@Z7[ PRxMIN2$&}MJ,-P6p8i 21j%r=)a cKMJD9 ¥1f,=zQXpݷ9a&U𾽍l,K8?zC{ <|&{ gf!Shptӧڒws}+rfኪ׊ß*]Vfk\}3q=W3h+AfUyZ^m]8 TAQ@@ 6&cȌWXAX®lm`jR׮ǃx'2b#p("?ݢ7KE.CEr?m%}hV-e/C)*sn\]?ttdΫ/kp5;JDŽx %r9z-8- e{fJZj#%ϑ$QÉ9+sIѱ(ɪnC_ b1=CP^o a.l )=kʎč&Ahi)uRYGu3P͖~Nȵ@wgPm}JԕTEzZ2?y 3U3CA쳮S[E-$2sp Q7_o]CmV6)$:T.2rL+ICU>$5aQk1:JKf ˖byi#GY'IL/) }eTc#>օ\li}Bm3@ ]eU12 sujnWꜽ`_i_g15inm-iltq|y?t/&.io9=ۭ;"zB+̛3Ɲ>}O^>`p5i;v8w G\.Mgl,,u٥+w{4vY:gM$i$pD RqN8p2 \:XDe6ښR"T zK`!𔸡s*j>Y(t+{f yW1d<Ǘϋ|+Mߝ ӫ*ԥΧ| !nWOFEn<߮&~IL>M7.(cBr?|-covb5c_ ^-~`t^Zϻ]8ߎ`aṓgII /}lF8;ukE8fŨ\Z ޗϸS"\ +eix>8Յ *Ki%Q|:_m$]2NB-Bxp, u0H07ڐfWsfϓԝ.]-wJX`RÕfV۠ -N~̓6{ )cXp@LbT2˅,*d@Р]G-mSQmN8ʏB0.FoHrvLk7E a;/wR̔X'鐄1r{r\gswlLe&Z*#LEEY0 \J" RRRniQ.򞙩ynBWc,qXw]Aq˽cNܾܓnǀoX60r$ufBqAM)N BD<<,&IB0JsrgOf.o+!nXU_c|dk%of=_:yM0R0*kK xRQc7/e7/m7/f7+qB씷,8PKXLե{{$:fy&^Y !|R4cCtszV{{UR}\߱ͮ[Aؾ):#л1{%.$qI*U@@P*NQ)LH}'B}Fp 7/*o'1T|7Mv0Rs%o DoCTdG<\ P0,bQE_3bNAQ*5J4VX`C,Âd}z{=}JȇjgkOQibqg .{ +iUVZXzrPVhõc.ȭ%ȆQ) '_H7= 33g^qK Rz- "3 jGAP}A&ǃWJI:_R(ocIr'ŏϔ'GAdT21mR(e^ڤӞJ=(iR4+iHTmtib]&3t䷝vma5~:}_XyWw6Nc@m d0P@;z5%L@-'>O@^:"d8Mr06OvomS$Mt6*HZbZ VRZfK-,s *iL1(H6$ \b)&R=K(T6³x8+R:'ʨԀ;G9TMF$.SH;c!kk)#xeogKK,IZ&L(P^[G@S%:>*YQm*)O:df: '„ߵ9/x@}H2b&l-KKdG|s J.D wsϱxF5rO:?r;Rt-(\ĢrG x2Ory:d>>B|a永vv{eGy'%pR?7^j'SRMSm Y,..&˷_˘aL/9 OӒk/icpiHqh{eM͍˚1TKqiOJDyӴMoj")Ymn|g.|*q~B .baڔpb$ry޵-7_螝-5n{b<8žH )DԕVz馊E:."yRx`I',̚GKFEx? .WHM=Hzs'*3~=c2k|]_Lf/]\.;^Y5c0ʏ/-y]y nZ g5)܄KH̄̐p.$/E9Rku:E7Lhv:豻({u@O` J&Y$H[ϛbt1㑤Ξ-K?].c\'XffL(%])wpzOQ}UBZZlriYMFh<&/l+O=QT_ؗj?pj;p1!*hluJ$"W﬜drhnţ񻈳æg,NM͛(_8٬iǓş6ޡyH]\ k򤙍NY?M͕߼}s7,kMɛ7͛kRt;&"5&9JMΦՋچ .0ωfRL/:Zo|^zt½4^ɔx2nW]7!u X}s7Oո.OZ_Fd2>Wd]WQ0mR;aVpn%|^{}{_v63uOhRdCe4V̤0 Ż=:|LQ ypU/AŃRx]?K"fThϤr{BVZWLL>12V011,6 Px`SJ{9(7(">w,JWCƤoĒx"/`E<{& ]*B9犔$-3(*zͯx]Ơ?MT p<;@$(@::*]k)9~s|؇;cImZ E33Alҿ{Wå9Yj|PDŽ%eer̥%KTtұ%'`#B?8<>_f.3Pg+gwܽ#c~M}iM28m@Z? ڤd`Qx5GiF.v<5ý3(^VH*+F%$i"W/o!"*zvV?L  Jk-)h+251;OkGX={v֝1M#d:/a%NP3p6˂EZ*cy*EtQ *]Ό-7 IU5qI Uye**hb ٩hmmN٠!%Fe0xlTz(F2v.Xa鬘KCï) &}JVL[w֘ BME=I\JjeA,KDS1Adzz_1$%E*2CI62 xR Y7bP/ ӨMnz[ms #X+2Y`(_* كpBeKSIɬ'덉)^GFi%&K)N/k)ȦySrXLvqKL4xL?(v'컣)ht#л6g\GtۆXcTѪONt1iېlGQ37tRVg 7K];N_tg~Y;g]}Mk"`U^6DžQ'8c__B`cHҵ<}]|,2)ˀܽeڃS%dgS*5d!7=+ nwz0.NwS>;zBII3HQ27P: ! RxZ/X_e`Q5\`HE,y AaF󖳘SzAXc/`o nYA jE<4햚= 3}V֗h !\J߯kW?>ܔTw L5O (|hդRҐs*P!C >Q.ëbX3 嵓OZk'M`_.gobrBo[z-P h CPlg(3x&n^y;$w}ٺYTGu=dҮEP*˼J~ĮqvvhR ЉBx㩠e ˩&Y[f &FZgL  ;@l' ^1e]5Pg.Aj/ʫ|pxDb,Fjfc„V^j1mM* `6*H1G%r1ڋT+bQS1kF/,O;62y :[eGuXOd/^P/ D~ojlw(聠=A%X N5UK fv:==04Ac`5dJ1Z2"kM<57E)F V%@:kR$ Z.!H_Jo.bԚ 6LR+'5 (LRY $k̾?a:tZGsnZđ]1j&SG7)q̭uZ;}>e9N4%]Ctd k~=c*2ojvQVf9=%J>4ٲ蕘+U=;]BoT愁?":fv@#.Ɉ~4jh\֥ҌY[lU|ٙf3<[/:MTN`~7jn4?ciItW$NqW;}12ehι(tݪ;vDžӫMfZuۤ>Y -~w-Bwoc^sRWN/iѪtzK,p.s;חbV{M;ml{7ΦwS QqEVٶx!٘˞WľšM>en4Uw^:.PK;g,P,.,У%FJҩV*fD$O;(9.k=*:0Kz}Qh]l9-W *o6HjZ hvRQde)J+ƚ:2  A/.r Y“(N:#I,2}1Fꂘ * &X:8Y;%P&MYe т*KIuHKH!m*2n3::BtYh^g$˯@K"7NG/ D$GRIXS, &ٻ޸n$Wc]oE&A&2EՒ[)ݲ$Kbd >Y *v*IJ lDDȾF;G7b첸FtSmɕKA;[EZVQ 6H.P<a>r.&]eLؙوlP&١z̟]LRB:;h$wc5^Q[a /Kϳt`ٕq%I-CU%em&ّ?D` T`-Pֶ=Ubjݼ4k+7Kכ,?!aHxxF>nMB'MYY\LIu*:E.t9JwUu͖0iτR d:fak.>o\y6AӸLՇ VH6HgM0ř&P|+<"oD-`EIHauEV]a[ɷ*mU"IQ[TЀ v'TVਹKU j<0m̑(L-<'k(Ҋ!d/ 3*Zņ\x/SVՐO7Q^$1N'+cvV{yuN"ェLȇaG ^;Gs˃I4PVA`,IDkNRȮn[ɓtE*1)U!,(\9njjIx{ =sww01g_o)bM]4)SvMN)(sJ$3V+ʡNtvW}w~{2*lT67sMD1B>'pV#p+V'툞o7?8;ގ`AY/,?7BCd|І)\#|\=W«^-*]&<|ʒt·~)g=F;_[Iq ן-Ҟ{Қ^Zof } O)4]Mw΁ͫo%s7Efg8Dzh ^ zX^?K]lW:kdz^bdŜ0zZOapQeMYLy~ٸ<,*g)w?e5mv|wq<<>%m7&$Ŋ<ڼƩn`J@d /T]'wtebiתj%+%[#qBNAn `쇠;2Mϳd,W2Qv6:2)g ќwOv0)=&eQ("!@`Xa[Z;:X.[-VOpϣk>^lY=ajC1;磁 gW=jԃZA z`,PtaE:AK޷ˡ+VF(3!Q)[~H!\j(^곏f.#Fk!iʨvv|͇ Y: I$N+ܳNyA  _WjHoY /jCXib&}Փ}7`HF(NBfNI7v09 `.{&0\̎R`YchLjt<?(xh&O95CϜ>z\SS&T&&F#8]>cH|߲7 LAk =P˹YCvXgʟZC8ݾw]_}"ġ}0ˠs{r?c*3e}9k_*C6*iʦ:^f/d:oQ\cF<]m{_@d)rDo./~z|CJ?YcYkIV]4H-3pTy"޳x긓uP`%Ȉm75䢅ӷ%ݗLj>E{b[ֿ~nV d@m1,u}ǻ'7V[4_~g./}QZ{^\mvŖw81[_u?+7䌏|?iA`S`_}w3ץzv%ƫX)G@{ͧ XOd?NJ>-W͟oϻ mtƐr\J]HGog[F'Wua>jp;3hfU T=`H {@RɪR"'_yOy"A'o r8']!Lاo*YE=N!eL+ .\bk`&x'$\ 01".cz˓y>93-b@+ E7ۮ`Zbm[oNgv铡IfأkHmTO+m`!T`>d})] "bY6$$Yk GQ@:OqP>Śۡ+sACP1(kuRԱuq[Awb'U6:.C2P $kv9#;H!  VkMgav qkV%LO``/8 /̹ګt-sPܒ.;BrJؗw=lQ!EeFW&D"s;갏Cn@di*룫};i0*e)t2pZ*|o0ʰ74̣0XXc XCB eAh귕cu&a6|3_TEb @%jݑp|o((R$,JEL&+ ajTH@PH`(jJL4% V3XG)A >H k.]T0^zr'iG7u:.3ӕH:4d3B Y+)A|4$1ٙ ])tX2Xe1{rkm ;zb&"j)h@7ISfRʛE FCehE #pLi] "7ӬuPcoB>wE:~kR|P1dK6ia-] 5i3ρoEdfS!|ȉV-ò9,b.kc<#+FN9,FYl w=@ 6u[)3rHe6Y2}>X-E׭kZ\Q`|(<`,+0:anύA:vQ2SGRvT2>C *Au|(d x]fWpk39mpgrz m7kT^)"i`Y;i;nαv*LMQ zچJ+ʶ*drEaz;ూR%j`淳΋"H9BMډ_>bA3 S]e)\El9]]홦m򔂬ɬJX +$m;xR-)32s>tPqmROC:v锆iIeZ2XAcG?|& 9zƲ.Iu`m+LH8=JxQ0p 7 pvy{m(V F=q{|бP˒LL]b#{K4Xlp׊Hl rOş)J]!C+ѕm%h%lyP&EU8zs7-SEy֒s +Q76[tC8Ixn֖!3S!!LrSnd0`}@\ `(,=8hZ*d] TV>c 霔Z4h@˂rXݞDt@rRA; ހ*@9Aj>XfQ4p14[j[(:-Hm|5u?48|Dc8@M:Te. >ޡUGHX| GsʤT:%(N> QWYyB؁xzR%`#8_|ĂeT_񽇔)t,Ʃ ("$0 b>gӷV)t rq(Ei(9[RbƷRXãCW2:Ge?Q!ho N3а^dsPocBK*dFɕ%!'󬍮a ?pdlxw ٫ TOjHn !;@=i|ըgk7;fosGn.xPL_}cZ];mO/d]]e! vGmg];=U9=wc]oW3;ʿSfuxj22){\ycԿWWGw[o`k^/x#g 8<}/ܬ?gho&en=WWjomέ4]e>g<;>8>V/}kbNas&+@!$aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>ÊoʇmaEC "چoЇcMć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|X >,~C>,8~;BoŇњ_Z|XE 9iaK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>-p|X|3>,Dkއ(7*ć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|X"> _rk|F~/j6M;yHCF.߮9~+ۑ^{;t|s"WOǫ?^lPq6cHno=op~>ojYkjZϣ7={7U_0H/'V`˲nuf0^U\gMj~9{7+ʟ[Mh6!4>#'OO<>Ab?7u`x m>)@ ok Ŀ =e% (0XwOȻ |1WB@vx{yuLFRU"\wgďx/]r\7~1y.K,E򝧿, }Z)V*qۇs'.ê]"#W;C1o[MM(Ck#pZZ7/tU ث(QCVߧO 3V{0z<>PN֮ӴK}|Nw /_! #U 0oe #=wYQ4ѣc8e"Fc„W #&k RZ-(';gkBegیE0?m^O6 h:X5d+%ҧW;)r t4O ^] ~#KK+"?Hg {ؗv-U[Rѻ~!TToՅp = Rz-PbǍ(Fxsa ^ ^JvFq[ g PkRES^DEB Hx"H?IKt+U5d4UR/ȗƩ]v-U}{7kY2؇X aS;'G~ZIK O֝!6XؒdɡӇEkGz>Ue*6ht&]*ͫX8g|'0Rۯ*L#+6HLW<0. 9!%kHkvQS"Uo4)Ȱ FCZQJ',Niv0>ir%/ kp:={[>4KnBDf7^{U[p8{<$Pjlöx%vi % 5mO/u͘A9]0q9vB"mGChc|OFJAqi}h5ɦԯ(&QL5WN~HrK,:HVeUZl㓲7hZO1AKV (ʬ@2e`u/OMά!vO/FDQarKvWe3{KHhf#f[81BuPksj՝إJhuBj,"e4΁6~ODb0` ZDBwM"8S'Vm:4Uk$R('޶Z)n0M~NOLitC@?RVupSo< \mkcJtwOpm<)*RT$: ȁkjMfB,[z~5tM4"~Cհ,q*X\j-:e -23dN{SY斠]jΦ&׾:u*AhA 6[ZJYv.į%[aW{I;-*(Kqzf[wkj+YVCihY(p.cTaBۚ @0Eh+-ϋ˪弒u9 L/ZAEX ڲ.N{zqƃ{{-tiaKqn(@R74mD:3A:PsԻ@kӋU zT@&P 3:?y)aR'Ug%D`NhY~C.JdKlePBd )9m a9~z^%&\< ;[GѾԚrp?/V1Ax^Z[ o!|njPc-&1ҥ)erUb)ή&bWlh q"l!jHOs;PT1t#L@ 1@s[!8Q=Y8;BpJX990-9F8(N oMNt͔*p vHA?b5 ![GsL ,QZ R]Ofbgg̜u?#v۟TNm6-[\&UdQY ~5ͩdCbW9bj ؖ8;M?/wg-Z1t&!Յ)y1 b[\?8K_7 R1Q;{kXEb Jp(*XZtfYUZmY]ءZ .bdk{j̝W37_Qm~`%~江_}/ !] 7 ϳ2bN{sn3)IhG2JIJ&屷{[e#9|`ɣ`w\8C,ׅ!鼊xCu'y r%s_"֒רE)9 >nsG,[k␀ r: rwPis4zCbNtI)xS#/OݏI玔3?~JasC*ڭHAe暮S9w7Gėl6? ƣ5+=RVUak]P@~NK%qλ9{zv;cüٳ .Vi|&_Brf֩NwdV>K#v=ҳ:>bNcvJ,$8c]c+yVwӓϔuDͬyufjDh1 ?U L* +saׁGKׄM{1՞W{~[XmQ[PGW@`[z4,w;Wݛ{mb1 ~yWQWzR!ՒxD+헗ށXf@y1֨gcF"p Ɵ&)tZZC{BIwAe6` fZ`uSbp*e{c$grMt#lǸnk!.DXy-WS y1.~@0Ԗ$$Ma۰H.F]e| /1W~go֪q!zZ"Ȩ\ӵ9"2Ȱ9s{|'$z?6 W8P&__28Z[>L)͙>9qOsjq-Uw:tl&83*!]!:R4l foZk>4;[#/mr&]+F&"R*Ny;}8y$sʙrsyk0 ߎow$2fN6܍i޸恽듅dr<4>pRl]hn⹡_0D($ 0y .yCTI"2~"{ϵױ rҒ MwYeL_h%c]?ɿ~e`~ۨ?'&?k8d 2 ;f\t{b@1O|А ^m~"5C!sv;C%Pt.sxcotav'NpNǕ1(BP$v!2Æ"Ps 9njZc|{/u' {HNq>\vvY} <n/.dm#X$I|cjў[O~P@ow6'$ghX40Ҧz׊aENG:2Bi R Rk\t6ML֯[DN!_rB,Mp4'NGcL;MneLƙn}Dk_Y*떛 RmmPqEQ q 3 :#aM;!+PUdj\Nēk6wEl ` 쒠G*9hb"ȾUo6pWPՌ@%n>'N!yj@Oo/#\_ QLFC|öbM#^%&\B/Je5ⷀxjdqlŻ"Y{e*V)=_TƢI}VPF9A[?Acub8/1yqsQn>5t O`՘scz3X Hm'˅ DU'';%ī/v>$c?}l!svpu)~s~W4.>G/0꓾=c @l% a  5EV<~M!ۼ [UoL#ڄ9F$>|zS )JKF1- ZQ& o] u: _ԆZGN}m7؂Q[aA5Q"t1iZ4 ܦkdHZH`": 8ci8mz1pWckG$HC\OE6?,->K.qgjwC4@yw‹1|`V;ÃbDj՜;c:&3ymxUa] \BD˴jlt-c01o2%J6T!b'j c#K+ cY$]$^ۃ22:^I)a[ n^HT=8TYWwԫh45"`j&VI"apM H;cYXjxDmw+!5:*pFbٙKAz3>FxvvaaTĹ57H Ar 3+:5Ğ;#tVCG>/qe"RH#"UiXeg5rM=K PͶfV=pwwAͶ'> dXЊ堰Z&CvLGOJA. 赶.Fӫ;Se#ʢiQD{|8Aa)+3ϚaiUHw'_0z\s$yׇRLQ D 2R4V2Q%~@:5fiM7ne&Eae"Eqp٦iiPvn 'G*$M:C)2YTU%;'(l[y%Zo|JJ'ne1{.P*=WP2%ȅ*TgyCH+U$dfƹ,roN}!R}-Яg~a)eǹۙӲ \D6XiS:~LV#ޏ9r[ʶ(uYJS%Cc ZNNh;[JBE 7VY nIQf3Fm%xLJ p($$aW67d6-=dK.Hp Mu) JV|U-q & FZ>L?tDq_ct6)f#woW^Ѻ_ y4[YJwIŪcBN ; 6I/6Di{Aߥ/Fi#pqևu|:;!R,u惕otnOw]Egn? ʵ z1آӲܑEg/<;;{q Sz2jSZ?iKʐ%°uɫcpdqS;^ޑ]5Bj7|w,ɤ^ @q+U\pkخsxzC}7e10U^R`k q؝{Ӌb6D|O/0Zײw˸,vn!f'd??eۀA۪0XW?6ӗ/AX(ͭsAyFP6hBLJڠ' ܲUtg7\Ƹa OrF'qBJU~ߓ Ǥ:U6p6H[ :J9`A ]"ڣap ?~1k}d4F~/\}X+cnVzX8l?d*K6?ר:HڃYp.md(r(\A+|hK}rݖłpө *+`)~w ks^[dsMz$Fo]%T )F9l^|Q.Mqմ{A>!%ZS=Y]V:ԇ zߐjwٓ+ v.96[8PN zy}v5) aJ b^FS7]Pvg; c7pwl/)Md # 3\!0E )QcDVB"d\GZ!gfǒ _;tɭn:u' hQETypBʃ'`T2r&a}Ta ,4!+QCPU?Z5~JK"aޓU_X>pJVPp\*!iZHԀ0J69:YVeB r)"N NG.9Ղ+ g)䂦ziEzr2xM8-HbOnRjqȅƋP+\=z:@#宏rAkK8>[_L#TS~m [ȔR6Q'gi De=Jcp_#^iޑ\i0A- R49BQR > <1|A8W1$&jnz"p֏0(^qPIӓkj߾Y1[>Pa!s۷jqvj)` [mW@hֳ]b{]6xA=ru9 //g So6b`Uʇ p\JV$t  N0Z1RHO'ԝ49Y I-c# WFXPHG0 F0'k2";L-[p*Dgk"QJc"O|#}@v1t.$CA3I& 3$H*\(HKS>{2[ɲ/-:[e%:DGR*۷4O+Ԙ쓏9Ϲo޴ӫc`5XNWL"IL|Fg]P%8.$/XMk&=29.OD]eFN]N˭OKbF uyJ)E2/d\qF4I"$HoEx3=k?UEp#>zFbܐNQ9|Q1 Jm%(ўrԊrL (o<*&q^R$ LZg5ZAќF<'〷ኲ";D P4|fkeY|P(TޫXjCQ$, R-3Y[;v֤M<ע=;SXT9j) ɼ4UBҬ5m sqT XB $Ľ!gLRg` Yr଍VY CZ#?N/|﹈:3|Vl4~7닜R,XbXt0krbI|rL2S/G_ 0DD{2L7>7 *`eiqL#c XGʈ~~<>2:^02'=ѷ-ǫ2ҜM|7է ( ^vf{ QXQV+Rr)֚'*F$$jQ iGNt:њ(B>f$t[Rk[RH{)eU>' RK'["8r+FVzqہ=a^iQkmu\צƦ|1 !qS \$<'Kp# ) |2[@ll O'7<D`"Ї #>yve2#Y(Ǭ*^ K/ETPz/L 2|}eG͕G@zH90KEmCQ[grA⡙wl`uVGԚcՀubNRnȸCr&D|Q73\l5bVrrQ8OKkNIR萭 *VNg#VdR8t+zky^cogU^] &5uk]vBqnF1Gs`*6R`qq'_/.]}}W~w#~KCFӝ/'r[Cd)K-Z{3ȣ}ۚkQlcA<#هJ+A5dCÑ@ 䤔uqM$qPգ W }7e'<όA^:ONǎ>Ex Άr5S3j̍Fs#ṑa7^dfU 2;mFb-=`Q|'\^ IA$`2dޜ s`[Jn䴝}9>8Vz>pJEn65+ZoŋNo=X2@X5JzEk{3ηec[u?7Gv7Ne̶&\Tm$HԒ9aI5 fB{fаV4?LQ387'{w9;z:LTl4:1 tw_jm?RlA"7lt#o=L=\s;z1n.k?WV}-[}oѾRSV{p~@U!$אI &")Tm1d;TQ;}~K0 nʖ*M55k&KҀND_>Z~3- zzlN~<>0߿$ KnfS43MfXpy|^{m9s`iK5i_-紏PKN|VARd} 2*&"Bh>N[}A?ą:S֥cb߸AGW#λ 4k];j8 yyfN&d$߈$`KƀcbPWc1/d8Hkk59q%C{8 K EB.=(SD_͡fJzУ+Pw>bd~V3磗Թ)ǣU՛*7ȟ1ˎdmԼͩRjD|OjoPC'^j~=-ClYdN:Za%n֑niOI+pb/(VmkԬ~TXRnB!MK(;6{Ē\T+<$H q(S#VU$"< DZѓzD\EZQ4B)oE3&jFV3%AnBcSAQo؝UuHKsRM Aoэ,=n"Xqj3 $JS@#Omj1< r%MݘL>^ibFb^u(@&5PI1D 墚Ir̩zm[^4WjYJCr_w[]Fdnq3ܓӺOO\,Fo)ژLJ^!Yk @98J*Sd~p-5 }: )-J"*a֐mhțь㵚&֌ܲ(ӻvnjQh@F󋓑9N!1E  Quk2݃1EmF#RgS gǚZ Hyo|NH Hc5ZiYZHIWig{dLBSL#$UuҞm`h80<j̊zez ƔsGSmՠ)=k:Eiш%L f!' El.gK<3{_g`OSܝȥu}-5\miмJ y5|e2;;2+dvDd1a$+|<ٽ'_+g8=8ЌH`wŤZqS;ęֻη|zp?q;)s]ei/J2cHaۇ5O3l #"I}yhэN^'9IeS1Zg^&Kfdi 3;idfyY#-k},5H=Xz]DWshuE^@~ )<3xiaǶCI|/$wbn1Ն֦Nc֒﹋w4F|3*Di} 5vsX!w ݆ZzojGb]uv4%nSdN&U˓%Ln 4ׂwLv}_ & W4?)25(fW0X [Luf?Fߋ՞f@I{sql FMNL,9Aߝg+MPp ͹9IUJUy=4J ͩeI_P#uX&zvq@k"f ٻMH $y4^@rxMgyى{Xn=ז>u7>%~'NӢZͣ> -d>6kf:/||rib)LO9}89~ yRz\"$V\$#4sa^U u.hAw h?_,NmPceOLՂb : suRŔ!0 zwosd˃TZŌ*/؏8 :Q+)Ѱ!$e2EWS)si Esnk*=fP(B~uN.kY 5&|82]ܻq=Apɤ810c 0yE7>PrDt(v"o+RKoc]ƦҰM@%{)wSX˵Y`]L;HNG揢p\^xY^x??їk]_}_t) 1l7" ޿kQ)+ NNH8xs)0˭IAԀѯ|b7DǟS?uZ$xVO}/դgmCX* ˫˷7{?ou7z>6d fqtRYҖNdNrh .wB*TkT`Yb>;ѢsyL n|ryvED{]Nq>/$0bz4ceu|H2DهH V^S X)vg#l b#0Ȼ lF^>>ގr 4 ov0וgukzNÀŴI).3+J$IcriJ1klO?}FWE [Rи3FM6y :\I]#JI㶀͜i3 Wr>k&GdFb2P ?bdCllv͠Fl3$KS~Àke!@ʼnMcSP8 ^,ezͭ1bN6Xku Avڊ9mJ/=5͌#F!6l%-\_Gq=CzJ$a6:[93R<VLn9vejY>eM7}+!9,;jha(DH˽qM$qPfHټz;iiqJOD~(8<)>Gז'/|=bF\WSlޛ [BSVRR$ q jTjU, 1g7J E:acv,xرϫ&ɪ޶dz 33n$g\{^G_2)rA^]±uM!TbXЍSrCnvx8$$jG&^`g%,h1PCZdm{.e X'j?SHsabh?s23Ы͝qn9Y0XNKj<l ֣tj6#ĒOiMq],| 4p_׋Wgxt7X7߰kt5j s9rYo|gQ1'y^ yKUNܛqk?;_`ꅣi݇Qfhީ,i{sM$(\{woPxOxߚ\K_e^͙~Xd168MܓSXD=e~ӠN?N?PP;[RwZOFB'oVEB IN& P $F.* n)NrP[FE48Sz:`Ӷ.B>]K@sZdL$AFsB*kET:צ%ƾzO[PttO_%uX~g#U z-qF A Εa0BX&1ԽRwψ o ؿ`+_z?;S Ż#-pf`8}}@S%Vwwh"_}!$_$S `_o0r4mMRFL׭_2 0ۊ=ϿZ!(Y!1 X[l !,Gf[N;O,H)ϛ՞gϝNr}~2\UgK?Wm,юtS%x>m )+,"NN1o 0{٨SWU {;%I͹( e҈PA#JeC[/=漣fvIq^=8 pY9}3/i^ NF"IB{`s49χ,NCGȡ}@shy0R7h)98:W6dDUTV&IM46tN"pXCRR`sԠAMQ!w.UΆxɍV觩V/й۲DÓYoy%b̊%g2smc.guhL2I4)[Tny\vI.J<ɶۉS!.ZB$10E>`hNw@3J-TrF^mÿnE[Q_'m1vMK=>v~=˚NHB_;권8I,q7JcoDn2`o|>VkIu̧>|xI.-xR?MJ4zO~pv fhoevӻX˿ "P"TZ*Q{k-ģuYNƒTfϹEͻQd>n[aŻ,ﯭPud+tԉ_ G)yG/LtH?hc= ؇({%PO$-Vk7FI㪘ѾDBdQoZ \%G@}:kimG^p%z J,'%Z'3p% dOb#QB{϶O(kW 'mI*W`w[ xQC7(nY_/χ,y>3f_ž6~tQrx.UyH"竡NBOU٪zS:[Uꍯ;x="Z9İ& pП lf&1b'd#|3oí3r~ iDm-p[ZI9dU͆Yނ;1&5S:uxhɔXR̘hK+*0vF`P˨N&4 dY@h9׳zeukV}/I.nɇx h6[u老{8"bDWA''(~Ś7̆lbñ+&Q0:hEf㫫puNjo 6e?zc~ՄSb)hcbL(t6C-١ڢ؊]xTB*kzlZoR:CbXd:JɐĬ**:K(L:6ʉ1mh)SerRuD#- g8iu|i~˵r]e^m\In24lwo k\H5H>Ciˠz]nO{ [9,h)[[*vDM)ZYߴ0`['W7E\fdXƯ q<6fD>hN3QEUCAܵ5Q,A4dhKaJN8D#Z馓t@˫-P[],E=E]m,Z]ySLKsD#Xk|Lj]֍~]QMzխw&MF<-ʂ6n)ctg(\>2jG{U"kg5zwL[!,I+vZ-V':d a29(c1d56ֲ) )lu̧픫X9J+ ^C[j3ls8\BA8ӂ[ 1u>Vkd@l?3O%9a) f1t1dJ!E%e39:JjƗ ru=U݇&6x#Gh{ޠ5YeZrcku@2,7}=&ٻ֟Hr$Fqm;"@:ݮnOV7:G'? E?v5`((Ȥi5 YNa7( ($FIؿx&8BJNlXdưrx 㠬 1ke]l%0v-Ma'WR%Ii$*JnͅF ,30,@ɧ B~t!#6d &ۦ+rM<$3a\( AoVa k %Bv0nJXlݹٽCN,&0 Z5eLy=~/wVHDTU!328,Ka3hB>ǕtwL sʠT2R|vï2 T-c #֫,ym]/VVW)t -ȧA48e[q. pȲ]77o1OeVy9toNly,u3V[4(cXm~}?eyYXϬ|p0zѬ?_ 7إxٟ7Ϛ7Vw~Z6,S6~:<|$}?>.'#J(/}Zu )6 @ @  &&##hB:dz̏FX2;rlv2K9'\4坤_/_ǟZg■$~׊jN^=|vu?%Y1.\Hx.P?_}轻YǼ.=# ~ g ̎K9?=z0>%usQhƫ˄p}/xeպ &z[l`㳅4~89;xXJ˷9ᵴo/l5a2W絃g`0>9dd` /feeˑ%ֺ*ɖmLj2?.-ehvpbca.2Ssd%/e<eZcr\-aЌ{)|x0!~w?d!͞znEOBpg`dφ ټ ds3+-y\T<_dx{Ŧ9" Y쑒#dpdt3`ԴS,&d3 <>~4V+B̚QOSB#cLq(fJKWh9:͒%ZpЄ|>`0Om 0|Fs5h}p9Z~Jck)ʺt1`S ù+S22.[5n2x"ژ#Tϔ#.QO`.dXhBҤ2jTLeKFz)M# 9 (~LUs!1TB{]Te2mA5>{yXB,Qn53'䢏yp4'򂲘ng4Ȟlo;-덺w Ls14ߜ#^3,(W%YWIeʡĔc%w ^h*m#^'1GMwDGG@;;1쓜@ q^NvHyNFk!T-ԖSs@xPmŰ1-x,)kK5% L0Pj)mt{\_wL @jϱ8dUtM^\4q+1RR:Cq/M!(r`LulEkV.X23.X?6\PKa)d͎]p-&kRT \UQJl 1? 2YbNHo&t4@͆v'5sS%O.\%7/RT~5oM 22>GLeHO=tY \((툪!  ^\ ߈̍0^ k [| #mm^teN iCvXXzrN@{̮Gȶ `UHUCp`-Fڄ/m8X5],%ǦF-kR}# CKZft=6φ"0xco\&~H.ӹ_P{y{;\LNTYp4O;EA`@/Uߜb3d'|h)&h&X[^в%ز<>3>ba0ã%;U;23֝wVhdwvն;tˁ,f?uGH /5vz9(1ʿD.tmZEh9 u6ն9gOv<ǣ%KLuhxǀ(]8Oc{a쵂@NapM(SleSɋd7_oH5Qm!D!\`R}*G:1qC1h(咃1m`}mT-P?bBABAz7-֔%3?Zw7o'Wub j'eDHp/iWoNd{^[WG[TjQrrsuΗPxRV(U-/yoUI{MML l@LF!G*-TZ\FKEB0\ZUWs%bۊW"zM-B$cX7,z;{Ɣւ‡nR6=)shJ6ZBBIjCnjeI%W alhh`{v%tFj2)yiܗTG*mߟL+Db*QB4[[/P-yM}"b'z :#y_ň6fV=?\@ɖFh6 NT"k)\E}Rus&t(:{$Xut1vo|7'nfv|;~d2Y~d/zGe5Yzo1'9 G|'# t4ʭd.Xmun fDWUxb8ד q Jv% :iZmv"Xc+* !OHpDoɚae#jA'n1Bb 3U2p, 1f;x"ΛvM j.8F0ɛCdME^.9U:.YDg+O)ޣ~47n̈́e@@ώN 9Zl/bbgi ĄO„ڝW2a3\^`jn1KŽet!T 5幇xDcwn=n+z]p"dс_$H$Ƃױ5Ҭ47OQhn\V= ؖZV]d},//sW_p>ÆpF9n4.Қ,V{WW۽+heg^WeiL2+< ~+CO k;M/,6w܌-~[V47X'`枥1!m3j!({'UeoF%iA\f%08q2`svR^>?ՄfqQYS,څ[DGm]?iү+yMEA y: ;5,Xa4a4>c!,?>6RAI1rGeow?WfѵMIx?]Nw]?#P`duyT4gYU(Єv,Y,* {0^Y5=Mvwн _?TLk39~j1,ך-XyNn_j-$P6 \=h}-?o5_[Z Kg$TPń3& @TC$YW+塇ͷ^\ØX "&=" &$WQS:#nXZ.!!7KsHМλgKTvZĢP|TScˏ=ft4XprQEgD(m{SJDC4P`Z g/zUQWE9vd{r6<ǭ h>YڢfzYWHbsmfmߘ;} bBd+_N;%0<KD%-oR}9#mb5*oWMOް11{%bȀ¨u5X4V_" ޸oHb~M:M=12Nen9_=cs=׉G  (B5Wgna@s5J٣."T+E4<̖Ѹ}&*ɀ6tJ!7OÜ QD|"HS x*J^4' >(bQzj~EJګ \w\vv;DxɥXC5aR$cx+|0&_|t^?iK z@L֎|>~&냿4T%Oڸ^AR^Oʇ_2u?}w?|v'> >|>V^؞Oǣt;W`AmJiX.B͘X|QerE0џZ= zdu }>/~[p0+jAW3OdST0mPcUᣡ]1o ,#P,MķFlLWt R5;UGQ-P՘' Ny CPh,BKplv|PqsSJ%sPjbne e}-VYlr)3X1F~ͳ~Ozd>xTKenj)ȌL }V$d5GI1UYx]x/՜ Dwص-!;>'f1ӣǞyK M3AơiUsU; &YdpZ 3TuHjOG~{OدS΋OG"OF#ؿ}ΡM'ɗ.b|g8-E (|UhZM7C]Z[Ʒ.koVֺh 2}8ddw>&YMrU&rI:YMSYL v6J sXSl/LU[ kcv iWBRwZG%ϓek! Ӳ$X̙\6jr HHR!ʄ0(r0]*>)~W^{h?Pdy2W0\9kٻ:6\g&*٬OEtN%+Jcu' ͢t'>c-j)iQR1ѡhɐ T $Krjw *roU6tG`?A=|4N9p^Us 1T|"n@ZQG^կdb2BG;ޡZV6flpCN:IP[k<.'Iʀ.ĬCsβpC=w\ qţf߹E'þHu̕|vi~&X]L4H]eIZZ%C- *ewT -3,'KS٣*+ `P*QI5 "*IEg%N!a2^kIk^~P([}+֤V)zo| &^Ksq5ns j%Nbj]Cd1(9🴾;%7áƭd/!-ȳyU-Y^/ËA?Jdtڮz(yJ w(^N_Ki Ƌȧ -+>:$ϵO3ylwiÏ,]*9Em"IkL&aX ICb+ՠw|)PV+Y: ,vgv٠~=O= NN(楰0>*.1ڑ1fa Ws$LrKnE:k!BʤL֡JVZPTCvU)dmWHܕ@6JM3]'0-9-̦Wk #F|‘$PZI)0d/Q$$T-v;U{vY-~%Z^{H@R:RXI:eRF> ]ԥ ct?|;Oz%հpvyL HNeph"Z hAFeC_V@JW*&F/KleJE-#&60v!yZ(Gnoڦhަ{1)9˅K^?ֳN{q&QL˸Wt^q*^ckmHe``璑nn,|,\"ɐep}g(REFό C̰_W}U]-k煥{N3EZuА96sid٣ F?hD㈺f><45x}9Z;%iM:gO8aB>h-9gO8$M etʙ\{O]DB%{q{,{l{4Qy\`C,d"ב]$Ȫ'xǾmmgKm2vYr4_يް4kaDi-0AڽgGn湒Q:f2N\#9(P)ݻ ~nfv&Gi2' Cxv_TDFd׼$)hyd!SP P)_rkA*>>=[ (#ʖsl5Zx2:2L EQ\3* DŽ\DoZf>"7ΠbNz L40(ZK!UZ'UuYϢrYI!J1U:H,I,0.f\ٖ=Pm2WEi\gnQ{> _;"CDPg[̥(&8*OǢ(ǂIPCn1wGgcx!a5s<Ѹʨb ̊a0!]5agQ/,^,];=ǥd~ -Aŏx[;T)`7o T}3Y;?;0oqnbv|nV6ji8<Ki"~jLVğJtUWœ0a>Y$!R d)f"]^8zbO"guYzmJ88 Rrfglwcb2N+/xwOK]3S=\wnch+{ЭZYn3\r΄&M0qtwpŽߐAɜTA'H  [%luEug|YtF5^(jr::54Stn40?р_u3LZ̡E8//7LYl]pnO ċBrHޅ9R- 0YQɧ @p@_.Ienqn3Mǻv EsJj`ɫ2*;*fvy. # ]?AZs1UW5|Y?oAZ\lʺd9xj){.3~4 ǓW[MKQqfi!@JuP**Pc.x^&^ {9i32rV)#S (g3Q+}"B]r,NM K 8&Idl+_F^y~],K=F+ZNՓSߺ8->+ZHkǝ'QcSP _5ߗ.;ի=FE'#N; | aܥFKj7s.:4QlVں9d3W=x8ͧg|8GNFYtX7Nꨝ;PA[ 9X8EV2,㞜c gO I.,t6.X jgO,f,~0bW2,!7kJ'Wܻ_.L +_~E7B}h*؛ 8q\!{U# lcR}OmzįƟv|Q@fF+pV }#)ow3P{O: [b␹Eg􊖪x=w;Ϥe,l2tJ:tvtzuV88#骂u꜀}+`Umi˥` #Ci.TXc|cp{mAY@M{`NFk\+^ߙ<X1N%V4ƪtEw2.+,SQ JR1Sdyx.7]MǤ0 ʀA TupsQ9]TIt4(1q@/f0aDLyżQrWbXѣ)7:d_y)Iv9Pb@3),z$MIV6g_ قZJWq0ڍavT$`EW5ToOJU=Z&'˻٬2/Yz_q&]^eUi\͆#|muķޮs۟ߖ}[?_d<ZiU/}shߔ v}/fiɪIEgw1uXqGzF]_՟߯=w)Y/&)&YGo(|{:W>R}*pƧ-lV3>UOxӔr9FtЬjJq+f;O)xTNdX5s ɠaq?afsu7h*26umDM ^0huu)l42@nD 1r,!fr.ݫ!t29*M:U2Y> tub=[3q%eWi*ڦLO4'H^youϧ~s,Y`{VyS[Ɂ:K51'|&%, ωHad1\»*exH}Jw|=7Ӧj{PQSSJ4Z`1Y:"hOdg4204`!+;O !l6,9plRHwMSz6 L с4,w3%؜) q&@=s km+ B'3<n)x{8>#8>d]rWg{9襗Jw_  6E\u4謮M:&Lj 04 XzG޳/"SiGV-HaKsKDL;pK">l^ جoޯ9=x"Ab@Ǥ!a8^X;m.)4w>\5OuVJK^;O<$s =x%kpzqg!= 9xq h;&r 'ݓϣ .1RW"G"٣}Vp=AӧO=塙ŵi1ѰrRh@``s2J hEbD:]p.<FRrH%KåP–bTCLvYu\^d:rgt|rXce3kρ,C`SL1#CkEYY'eZlJmAT%f X8O 2`ʲA4ƀ$H*p2iF$: k@CXZdTz8IIE01B!Scc`uV5m$Y=<SD#ŬB$ Ae]@xJ&GP7OG9am!A180rcXT.@B-LҌ,Ĕ<b=:PQgp|xyjvP(1 %$(2PYFtgXr't8zGB$km KX)EM&!rÉ6 K/Ib?Ӧ4Giu<+ՑTdޤ\m.y!Ѩ,me]:߀ܯԕM=cW-$Yq <_b#leW{l (J(Тc u|U GIYo3`/0GI;Ĥ]Yo$9r+>gGDl0`{/kd0jI[zfw0tjJ%Ҫ:d1"m0[&H2nuX>VPvz<䒝5ni:JZ?yx~AST/? Ϻ>99u=߷x,l^ UO A7HL YĴfcTTbԆWqX/-HW(g$q%!E-\"xr6HE\$lGۧUa 'aSADX"q5+bX*SFV+STCmP042Ӧ^>g1* MK=JTPQKqX>*SldD坜ɹ,Xox'&jb̑VcpXG[?@= ڶIɜr5g⓸:.nw6 ʸ e LY۷ϊ҈sIH )z]HD9`,U aBXVoª[̂--ɏg۹>5b 54qmOjh* Ttˤ+ zq^"7xЂ.5ֆLk_ E#.FK8z|L.G"}d̓ۂ||BL# S^"9}ݧmŲP&qSj=bP+d yo8lx3fVoxq9&AR!n֜2D}ܲ ˂jB saBOWKσ6+A[C ZΤfguf<,:NׅJ13DB52[6TR\hmPk"wnxFw8n'4g5MPy;]0 Tj P6 B@MBrtنwhʃw*O;WY݀s-2 +R \J>Q%Am%)fMkE"6-aAh{=(dltT9:LQ-gP[Rߕfİ8vs@\:!N[/|^_ucs;9<(gYfgX(Y H]?!vN'!: kKg}ttJy!~Y:5`ꈾG ~ͧ>ؓ#ʹǝb^r6jyQRfW*K??Wox⋮1[]/nRu ͟~dec$yfNy˜/ddC}|ճcvNW{O~7=9٤6Omr3h/͓9|QKOaMƫpqſ~XZNz\Sջ7C 1מOvS)&%'HTB.SQI-7yӈ/|>G-d* ;5z0XsF؜#B :> Z&x*%:66)95ق'SF]DKB%e\{4 Æs&WDx膏HuU uRdW˶a`7(\=kXVõ"A$1s PT.Vc" X[[>o9זѦŧ`[tEuTJpB62 yTT^&,x$ %i&AT鎖QM!!cJC5^1ŊOJEq1IL&!2cZ븈SiFl3Gj-4K0d+r&*F&S!{U]1ēb]1~AF`(1kIV<47j1zZdHŪO}ul'+?ߧyuZcׄNazϊjbk#Jv.A(FJe2H _VvK. g٣9ruCξDLl8l-ٰ$^ 13bu"zy7|c4ߎ%< s8wWˑpu j6\D`+fqb$h|alV|遲"gL_|^2u{#z4\ Ǜy#u[֪OV-5U8[JFwP9ʙIy2*7qr&p%r%iY7eqP¼Tz >XlKsM{?oJIBѭHO${a+.(#lBg5cqF M,%"626R5V-1Ԭ!k4y"L:RժpNlɮ  k{Uy Fe)A!w3qdal % 6Co=X T3%ʗNU '5S?#=H916IķA V wJ+mO.,UIMNGh k܏gXOYwt-I5S| #Æs3ôbdr4;q0)y\f=bLԒJ)D>J=<UE :PI y y51J:4X3ܺ)Y%ta&G{rdB8D)בBFo tF/ȽXƜ->OP|]*{DE,76R)%ݦbQ|vH1b6*LC@Uu>aU MЫl{tS8-9LPCՃL̡>5-#T ŧ 6g|_ g|w{KsM5!&-VcmvLzoF z09reL{[y2\% rrpT^hs湮>ػ޶W]V+@̼4٧EcPWۉ-;/enrddGi4I6"?H),n̲+pr{N;`R!H>Rw/:r{x1lk>_d>3E1j\7}`P~Gf肙L|9u>y\d܇/GJjX(YVrM5y褜LpK/YÒeZ٩Ln0syy-5Aͣҏi8!鷉ҸxuPh@:$H*Г ´Ig3Cpb_eO_O)^6d_y [uVMB|ёӹ'A΢B Q_ UT^Pf8hM/haw&I']m7()**lf6QȬYɉv R3LtvXzQה{o"_~#mrj?~y:85$щŹLT1&:U'I\4#l6e4ÜC?\Gڬ9gTKAvvA:'9>UGF $+`rXL)a,եbuJ l"uBq843Ϗi~_Ŝ :^1x֖>|*z?ƈ4Bvt%ۓ4ށM:STh& ( I1 ; NJ8>`1F:g_:fb1c۶S>KbqJS 5A&)y HYpO'Sҹ6[&$]Te#I!(p@$:C y$ hs./ٳ@UVy1PgGpv/Z4쑈()Jٔ.56Y&le'{o]5 '/3MeHrB+p&ntIS k @%=xqΆ1``t[C`#"bʛcIfVP-mӋٰYek5Pr{=Z5X Φ۬_})חg!WƠe&P|nh oCA%Ccj s.ZV޿FH1Svt{ѽMG &H|1zA~^F> {db2HB)J[gN * 9W0)JmxJtq zz[507b0V+#qQF4 &Z 좴haD#6TS"@^yå-;JɄ"`"tdUɉq2iE5'uM9bm0i@X: FĽۊY? z5 8J;`5vqJ=;e(60ۣr4=L 7g`EEs]-;Gm=L.۞GSus9[O\_1S>{wb!>.*ċק?cH_8CG0rvI5N'2ւ0o]|㳮ik8X"@QX{fwӇ6u ,?Ӈ:1,! Qv!UH [3>͎AS&+fyu؊/.ٹM>8_-Nd FMMc5xrGNC PU^*J6f0Xi-Jɡf[$R.œ/8CGzOs4p^Izh "$"fK"b~J>Nm8onupZݗ;9]=4S&"("4t= w4yjSߩSNRqXk*neH.ؔIKIYj-vO -OhS6/Ve-+~1qߖjOaܖOe|0Ÿ@Z?;%*Ǵb dЂHf{lNIV^enxMϢmdypoXئhbsX갼 Kkƚ0GywJu { NX&ʢxDWUǷ]BYDqzϢҢK,o m,t]Ü*|,X5s%W!+9>Ϟ@djsnYؖm%ʚ7,8;_ae t~ye1&_,?Z~/F 2DrL-Ptb$^Q }佻.n%i 0{*0ω&g)CZy0q[KiPۓ͙" s]I5Bݢyl 1S$:q=Vh;$~QҴgiQؠL E9q U3KEYmpm/yb9skW] cxƃ-&l,:â} ^i n'SXܘiNnϩI;i+6)dMyWr5y܎ՎcTŋ@^9Gu*^ hce=jA+S^ /Yf `:,$3\Xe {}j>.([6RLOETuR0 B 9 ֶ5Fas~?HHٳwcҳCءB ;[Cjg;.ߪǒL&;ߥmCX~_oRh1[,P\Uٳme=H(жhu{]ؽOydK橛&;Z^ 1`_m[OdT)liaPQAPSam7][O=e)Z8li0ٻ2WI (w(˧\cTs`2/e`H:Þ~^fKlc{ݵO;Mj_)&bkcX%IХU?41۰yj窒@Z#B)_YhXI?{C]۵촘ꐓ]rAśNEJ)=v8hп9Pl)%y ȆZJҥPcKϬ] بM^7ltZl);&o N*FVD`uuuaQ5A+#{z5 E1y]1+ɦ]io9+c K@cfwzc v{th -UiJ>RJ],Ydfx/WZ62}(F%$A,~yW//!dLbDQè꠳zcfi~, (8YcS7d5vk{kH$ez+c2q.{ R!>FiU&K@סu+j\)"v@W?`q_-f8h= GwYIJ܀W"bS7hb9ԉu%ϷX޷dA&\@. ѥn ̰l/l:0}VossJb'Y*7VOׇ- H3h5fH^j>̢doʼnk΄il!xн^ B u3`ֲFf:ږҰ9+'m;8 AtHZYԩ #pX$bZQU|{/[-ʣ9rŖDexVu?kypN]lEPIe8b ecPFUz%9`>q<ƛCAPJ5 C+]yE[S}=37(Zev1gD}zCCn|>s)rn)j@}x7gުrۿnvy ZuBĕQ7emDCaN.߿qF8)I/,%a؄b}lNQ tח͛7ۜuJg4Y,/ZIJmŗsZ4nj/<_~ 7]Ev|b,U·{+tw0wq[FAP^=FS^ qmx7B\mAj׭u}뜍ёR`A8\(J8l%A8SDEH 2E"#rd]/ӭ0`F!hΛ NIȒ IGۺy; %,iW>8UU/}'a:K=/VKf|jeVW[vjdz?7ׯ_?|zakG,qz`D K MS(V7 ]WL35 t ԋ/7޻ǣ7`E'[TG?Uܟ^,~0/Zηh\V&ng xA>6#يe|#\a[bԒfoL(>.-~#:iO eLFd{}ſVB|h^ T >'IWp<ˬ~v%U̷u?1((b m)fJ;6(f+Hk`N*miv6Km1i^ʶϖd`>`8?~>ҎndߧO!0t> @{8]<8}]qCQ{,>A6:r+Y%eE0hSоC&l2G2I%g  m@EzR{{~#]t*U=]/'ZIkJVq&.*k3\ p?M v?(2 %lWRT ]lINZĀ2Pcɛ`V/ӥcNeeACMkB@)M` V}Ĺ%NO΅u± Q|P JP6̉b.bgkt+ݱ`sx_>^OFgƠ=k:?o_gGYuN?T=QӃ5h׫7 ~1siX b9K49 Yz6#g/)ףKF?3b>VͲcX7>X|Ό%Wgl,U|`6ʅe 'a\ʰxJbƕ,9^K1ƗRByJ[&4ӏfU]wᛩqʌJF\,v>Xr%дbbҼѬ:&,s@kTvp%t%t@x:'ŷ9@GVnǘn.w5B]sJyIY#-RF A T6JN5m' +[M=[7lz9JJ'e;7yEcxU*uݝ55o0:@8ضY  CVOx71YR)dbҊLr*`H")$&EH%%u#=ȶ}wl`vf| >aK^͚QOݢ\PlO1 ,̃ mϞH^KxL֦oq>{=]0u{ Tv%T2lDD iQՕ8WVTvi)\cD߻t(]xt?T* dȫi[,6smƜ[gj+UL<"*/>+){O8%b/DF_ v:hiYvu+}幫 ?_Ѳ6 %qVF#idQMVna# h^}wY͛eds\^oMizvv1a|隷 )#dſ)pDm|H%)gu"J (-{1!Π}Kns"-DI Ж/(M&)cY"\La9mrD'r*Olcw G$ʁ )+NJ+,;[2ECÜTݑ#rke(a,`lܩN.@_)Jl1ZxtAǢLB,1D:VJ1bW_7z:Umľ |(,OE!;>Z! -W!mDd=7gLvZ,%cTke=ߔ8sP+}B+e`DbWZDWlDMECSaCuG.p6GKjĿerW^S1 mRSK4w..G>>`$#T2zaߊJ+4 rc(HcPuf+m uY?I* :{R}ɐV TA4 $* 0}J0:w` +9G+j֖~Ж(*Emm.:Kb@d,Rt6?Vb쮵H,/,3Q ),PgݸzRS!bBXRX7-;ylBni%؍r Ռ(Šhdr& f"2 cfeb! 42؃ LX-kRXf)Z9'-Ng/eW|2BFjgό)2rPrdFBn;U6^k7!LL F,Vl,:h'AV߳փz 섘s8b:$no8 ˘?Kll4rPlr"-;~a%vXo~+EЂi$`i+aFYrk8kQ>f_'bU"2S*5`Aף h<2fs2hrslκ5{\ˍGS: }ε[ELYsNt.b~NkH$-YV:rWZ˟Y0./u6+tp Ri];0 YP$U kIШ-jSp!,6N+!j3 9q&@Uނ!j̭$N~SNz~u4PaZA#Hޢ,]G ))֌2ڀd e4/gL&ŝPv?Vi=~fmsYbbvHI82m8_w?(@qĐ$%V'Ԫ nEn~g|c(GŰg>s@u8,,8?+jZsXRv8" 7qXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaESpX(I( k9,Ca69,d<-"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⱎja_IP~0wx簼J䰬^+Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8'P`I+ÂZ`P8f_:+a9ɖ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,~6IZ emX >vR|I=bx8! 6nꙉ}5;{57?B0or>3 ,U8[0|?hwQ?C*36+o{j/GwŬsx|+Oid;d9?Zne4|#{0L2kzjsq,w@8$ڔ lv1%-NӒޮ<Е=F-}8Z QG}ҿAQC VKLiXF2[c,NTSwt[ zqA+s|Œjzύ3; }%tCuwK7Кd)EAQkFCٔAA+fUV@2ƂOSy͛c~Q /?1<7Ä]?E>(>y-KזAZj;,M $/wx,\tegn;x?sj)l-ͥdZ"P66gV>{o%ۿe}b6N}d|`jffͳ]L15Uqms^Ul+5U*.݉NXs؂"$!4Xۣ ,\اfeoh'qy50z ?S_\Ҷ1z?EhoOzX4FݒōÛQD2t-Uxխ^mW[DX0ݭMW ?|uVw'[M4W9RP[*lyXisZO*D}ȽlzY\~ȵ8wsn96.Nڦ+/mtmuׯ*U-~,q/[aư^N|0#\㼮Ze}a@Ӟ<]~n,]㔛! |7ࡼq|J]"|{/5'[iPizq&a <-CTZ_FߍNce/7y_Q-?dm~%Nzz|9/&ˏW_yS/VZ>pYz/ܱIﮏE|+A'aQK_Z̵(V(ÇgJ\>d7>3C $gʢBp(]FS~)8rkኔAt"e$R)[=DʺYŰ2Zfuىt^ ^ɚʖǙ5ֹ y7XrbNHT 3JDv RH<ŭf1FJGl!!6>Cϓ/Wj[Br{+m myWDid1rp|9(k|I;f?gmB9%;5m VPge㪗ۜI(?P( iaRqcǕwl#%ϝ .T e% y:g)*HU U3 k;tHxbsnH[̀tKZ^n~퍤<'T;-5o͹ߒAeY)Ѓgپ" ?[^t{]Md5vkP]kŊ5k+}Dlq%67>8Clζ6kr"DZE*#?Ccݝݹ=g,]7]W=lCr>Q\5N庂MMƋ݉t4$M?}򣝡|>n"Gb>;eg-"cYu@TALUJ6%Pߺv2I;2Nܴs/+no6>$Nh-9wJC(Htַfym^PF׷lhuhښqfK ٥K+z#sj^'~;~lۯv5 w%zQtN[Ew-wkwעs;;Ldv~C'cT)g&% Тhxcҏwk֣)\Zm}k-Ves'<y .-d~lmdſ8ːvCbb^i߮Yn#Ɋ!+6}?tA~x⽉z;J)db%;e1ɠBT'nBRDctCjDjZNCJtG*/OagaR@_^yQ!?U`Ћtp\Š% j!hU$6wxt3pA +΍vi-DmbtݸYUrg#=wF-e) 6IaNg6ERٻF$Wc"مw1} OInRѽQlNj b1#"22 :g ]\}1K$W!z@hBr*!=(hўlh,h:hYAR :)A{ w(H,p.zS9b4JzkȤDm-z&R HC2W4gB\<_ }5ika|B-O_ɟ$TZG]/6§|K,Ř%$3( H-8C9Z7֖ZouZouZ,fR=mRjh L)M!h3g=`K<}W^&nFeNEht6fوdS$1rk"=$Wa:1ѯCouu=`s_rUŕm݀ x|mWGG1mZ_M+cOъAg-XI*!ʣQN+_&R[DQ1|b$|I/0?Ek!TmPݦ4'pmpP/GO|gnvw[;qz[7 S'~i𹘤|J>k鐾J~`'3D.x QASA9\!?IA2~$5k8$RS^LB''#?$?sOY=i3&ZP!&)6ey]okUwh|*;pgje#G2T^E2I)*{6iq/̲㛳 !hluwtݦo٪c8⇟~- 7TEk=Iy|+.u WC^ݒOW}[7"*0@53F!1G$ZKk iݏ*`s@'l#q3{fQ s&6BløpGSQtgrqv'3 #*cpvnlnM6nm0k.d}'d,>+)6{b Cз}mٟ n㤉tͬ*?ht&} &&sj+M jh=?)DJs @:mU]n]<4#1],?61zhKu6'V .Obl:HYo՟,5;n=UN.bm5 ܎m ?۹`sb[X3%s EAmQ9f8LYWDev =kT#QW#!er\C(F !g@ 4arҾ_'[cj# _/7$* ܣa,F&^Ayx1׫;;s[:BT}CںnQ烺mtFG[{ +# b2ta|ٻfi<+9zO8qYCa`LfE5q8pw_x:bM?"'\dNeDaNm~fyT \&n9H} 7PCPǠOv D,‚C)Ѕ C#F陯7݀Q ]4k`?ۤ}ka.F1CYz졤H;5Z7ljc7E5{₆Nr̃+!@/$[88".2IO*^E39imqԯSKeD]t Zsay8(P8gUkj -@۸Z*p%ĄʘVE ϣ`L`d6^M~ΐ^YBC ѝ`Cb,Y)Ҁ}zKNjN CLګ<)b4L2١ƾnKߩE9kp)+١7Z)MpЫpvS%0%3LCS=Sj0m޽1F*'W;)UkJP\>dm\uC)O;Hy{p3τ3.#Rd1e%61]^}$USy'ZYJ6Zahֽ45bw5BՠHX$ޱ6 Cۺ.O:Cu[r xD( /ZƦ"jgԠ{U>uTfӝd+*g61nClKkXnZׂz_OόbUl*wN pk6v>eꢩg'!&W*ZܖCqC%~6Q:zˤ͐M%NޣZ(d ?Tagus!Xttg3\,ӆ5Hu}~IŚ xM8Smy'%in+k,fy(\ziC}uGδ쏕eE1)%DXuy(hD&K3,l vvV X psIqA{n*c$#ѹsZ"X!5CP$d{7./}FHR-'H \ B+z]>z2Ե'ӻݻw]湧rs??ds}_]ӿ/|'E@.$-Gu^WҷWTiaů ?N~|)^|r n;(Jo~2%]F0CLoI9gimltw1?L:?vG02[&rei4l}EθmҶŝ_M*'p?ڰm((G˾ZUzOG9p jFD ~-=t2usفBS*E-cJ2P1wPSfdKˆ|f(u2c=?Ѯ]$Ex$$RJ]V:J_ ɠ!q+M llz9{0Mb b0;ұ\e> /\Ţm9E[$[ږIdNUOI]LY36L&el2pŽ5v5!ޡFp6H`ۀjaH#X"KY+ xR͐ Y+#\BPƠ2$! dKXNS#ec 5@A-Zޞ nVhkTIT`#qj1~ Aj$IRIVl!B‡,CPtDbx5)g; Z.W6k̀-v-zl?ـ b"o(G:s}w1$+Vj 䋲XKiFŠD$!3c@0f+sh}#^^? hG{ 'k(1pA mT`!A O9fsI5 :遱*(I`^29F-I$yh?$?/­맂;fJ) 9u,IiյAq 5>2+IvҨB55JduJJU[ y}5zw2NF^ōf [<Cˤ Rz@ O. %9m @[OD!&股PA~)G%eκd ZL6huNҬE!;ΆY,׺jɚNYj]bJBT2L da$?\ݥQ 5]pGkYHV@K-2Ckr <:z8sw gG*}AMov6/?7 ާ<0}Ήb<1B)Ebu.NS%^K裈 abqn)go/z271Z33 '-gȦވ]w^k5Luyr-mq-5ו33h#IQ$CR@M" L c?@|ODG0"iPn%iOثk/Ɠ-mm  SC Fx%!`Vҫ ċ,&txcOa$U Wm$R!LO&V?m5y/;Xo WQ(Au&e S ]lxP^E+gHʄfD_mr! B]e+HT%W kvW4itND: PRB2(9$ieX!*)Ox)2ׯ=%=PlGpyUldgz ̧50:fJk|c?ng]5;qq9-[!?599}ks| [.5Tx+N}yvqXE~烔^J<_ԁtF98qp 鼽:⴮şopWSN,ILtٌu©ЄX5<Г"~'U8Hx/_g˨M=A# yE~_M&.y":=3I15#U ߽WX9o-9l8} * + \}S:K?lt4XtZC֘HZ],|F\^'uQwe`]{\"}Ҙ31m<A潫.[_u dnDυ>5Mſq&åEgFDt2)գ:J9͗vp`_\}6gԪқV5.G(r`{|#Wn&i0\Ѻ2ђZbwjrc!R!("64Uǁq +dNdEoBxdqS(pz86 $e@T +bie"j!(F!)6$ gPWǵd72+L$/1dbൗ6DV ɖ~{{7+lSSۧ2g \XkKɖivUr[wj+ ^d*MFc#f 9-3 V;O'X?q&u[v"|L|jCN-h@~)={eo>D:\@뼂ʙh)"gKkTLg]letoѠ]]=FRhv*KYx,t2Y]Q)aE^%"&Qk2(Ja2HIKK# FmrNN;S9.+Tr:7ˁնZǛI܇YОӓ^K5Ehn}PF8oCȸ"UZ@.Z!hgZPT@L(e0kMMCh-9{"O;4qB=`sı#D6L>iD:AZ?}PC!XlGZε]1L[)ʡL:M/'?TQg$I)@Iƃ#D /Y D`Ɩ0㖜 f6ӵ}kdVnrŎE0B>S\;op8(s݂ۚK,cAhlA^R=e8Evݼ/֚KƥR6Iـ^DhSԈ "W.}Ph{}%RР@heʪ L$1|$ST & Nw޺7T;LL&_5'oLA J vYuT=7qOܞ69L'MK߿]PMͻo'/M{Ni2/}L+yB<Òӿ4yLO.Yߟӻ䧟k-{V[>k]ɻwͻOcY<:xHٿ #V3B]fL*5 {#uWkZ;zWY||7׏Ɇ@%'iGË yԁWx 4]b476"ZLq;23};ZC5I;u9JqAxh||Oxiӭ&߾fʸ@/OSv`[|i Sk%!) x-Lt'LU(B$U~uǫL4&dJ6@uj&R8 f%u :pT}մS|n;d}}$`<ԥ{V`nܧ-G 9ܑrTU_Qv1!GdȘI)ΦE'A9,Aa}t* @Eݵ(kasg)^ȡH9*,3)lط/dudѱ3-Ga6cW( ¦2tlh*'ŠD$!=z]|VGPǗT'.VijtXr)hyO1(9+W^G? Ӱ$Z  !KQ!ӳcgJI ;/gZʑ_aL2jʳ d`0$5Ȟ՛"K>g[yxPB"/|@Q7‚Q"D!UZ2|\? vȈb5Q~EFA d90L;I15p|oN˅ԅqi!̏^[F+lU>= +z- xo_$2.Nwi:[i6٪xM?v(N|}[hjt2jnFA] {tsOs S&7n!ۚB8c)ñX倾?A=6"ۢ "/lFYv zU}q^](9Na^v˧DG߽}huK ݿн}6i{2}/fy^&6>@PAC2ĖK O6\ L qrwB&B0RL'Nk?,f6[{Сh-ːk͖Q0GJKC˯)35H e*mAƞ|X<@AF6ۣ (*&dfMNڑfMd<24 M0$7ǐc'OC o||3fbT _'Iq9ѿOK=+*/K&`e5}vY,_//_p^d.(TtYz:C??X~m'x_^9"[?OE ő hԃ/;|,Fl@|5ʠP]mp^2ſ;_ȶ;AP8im[|*9+,I+/Oڳ.-+m@^YUUGխ֬i\Y~@U0ZGVF;]g}~ԚpM6,}gG-~?z(9&9)28&'$i $%Y Y4ՀQ:\LnԉNT=E㒳- IR0 4` O9)0 uꡘPѠ޺d_ظ4N?ևH[Բ%?l:EV_պlK! kKIXA9 gt%? 3>ćTNJ)*%i(LL5Yc%$URTn9v]7N ىιV`j ^T⪭hEdH,AqL8J:ZuM-ڼ.nv6ˈQ{yYy)yLG]9Ֆ r|rpF'Ơ5(f YbV̺ b;@7wnA\@I +Cj%tT(l(NH$\Η%'8]İİ=aɳ0Ca]aUhuOʺ VNF_LP euEt e2`+YTfG%Քro?-IUMI*ƄA!_N84yr鱌#<:IZ,lyyl)t(}HE2Vv.lV觺xO8) j&iYx"9 W֣͌lwS}poU 2 p^ NeC4-f=#[޿yB4-ao=l^iQ6]*:sYVmM%Ea5d#I)ȁ0?lWv2` kY+)!|2r5NMnt-uӫWz}:3'ɓ4Ů? E;~tR88s\MZxqpzQuԽY;h8tTǞG˘<>jk!b|3 x-Z+Jo;5?l6hk(`jz_]g!_ܿ"R+NxLp{Ed 5PKa]T"L-Yix >, `=g5%<)C 5cF)Xd_ifu{c[~̈́i슷BE4Ȧ.\ʎs٫%&,c,ц3k-Q[g3Iⰽa]t6bŐI"˾{o[oX-cl2UlJJے'ȯ rEѠ2cNޓv oO#6{O%{(QVO֤E)1}HPbҰrtǕ@{zKGZǖGި{Fcosho)F 1 Bƒ $;UlUsVѹ'IZp ~uy!Ӗ50eRt<c>46?QCka˃QyNOF=y)ח#]ߙ-E`R--Ĥ4)-a~GE<`}w#/oRRr)@rq4yGn4wAR^+(X }Wv__Kwޟbd-*", P:#|MRf ՗f|Qe=wQJ,0tO`Y0D|FODVQ#aEBʼn0 #ddq(qH~Hn8pO2|iA7 q+yW|2VEQU)l['S*Ԥ} DI#hVb|(U ޹5# UO+%B ff3O0o>)$&꧟ 61\T%&m?`ĜB$Wzl`)ۢ֊6c zÖ!&-s@Ҝq)zɂCB?RZ)B59V9%y,al_*JhH 6odp#d_sdeL*6 <7l9[L[$cK/5gnkΜ19gmPcWSd AlRE7`7bj+$J #Em1ensw0'{%T FȰ弮#0& Z0/RRӱhc`0یK&tLÞj-hDBjheVɈ]ɵDR3C%WS}X.\E?ÖVdpWьa^c50WM變}%t1Co،5r1%VmojFZO!ʁl5-k̞L5Tb@# "e5cSԮ"_E7{u# zn5DuŬHւ1bM";#? "YE3ɓP l#yBn][$-N- UyY4f<$J0)ynf}\CmXg1yKQ S$F SL6({YFz"L4'H&vÖ:R+fKnE͟M0G:La71je-Ac搽 3KDV**U4N*pXӨd-f4elohcsiEnt%NAZ{KuJu3_2K@#v5Y9st|`9ZLT&%hnU9*j}'|8Qo'5͓U(Wۛ ><*:ek`i8S+ͻB.Xle4-t кd/VRKqrN_eyDϦJV4A)ikF\cOXrN=0b]q&|fQ:5o 8~~YOg^׻ٍ!ʤۯUS+L*jV\7FW-h)_=wGEK͸:pݛ} :[l~+Q =CUXHY>d=G[1ǔ#w, %gmǽ7雱ylʹW+|[F`;&^\v vhA>k+O~v9^ˡ5P- "i%Լd,ki"-Sn^|o~(`!l!ϚD6Ǖ!i ,@*!rcCqɖ]K:S]/04OjV@.g$<+)Z `ߴZޓZ)"FA梓 8q?J0xWв-U^ړ9xonv >8 $:H ,^FU,[l" 9!b[ X7O>mH8}ENl^ @rE6˿pgb„,S~""$,`rK2!q -Zt+"W-rE1S@xժ#Z |j\uJ$OAMl[`p(e,ROݓOuҨ~+>,N[O_,Zil ΕttQA3Yvfe MHuu2r=~?B\~ Hʻ>yn|8oA+pzySUa<_.$pG$+f$\{'o,&I.J~m4c rT)Y+0:("6YbBL'9Û͢u"UvtjB7bxHg6RLVCk@'z&ve~&[Yc01JK}Tqrb5s ʈΥ$x^RuCk w4MYn:f r6 7{|K%P:\BxA@bz, \m6S!!Nɘ_MVJ-R壈Aގ/zӅG/m&VD;l~@@gM2r sZ*kL])_[X~ "vM /v7<LFT!GgqA ٰԢ>b_}XyrI{haĜB$W&/קl[@1w-D k9JR9b 9&Ms遜 9͖/hMܙ',.꯿fyg'{y5āܜ.NN\\󃛑7OH'x{_qu*K.U)yNA 2{%-Yx>.޳w+*_Ǧɯ$T_^<2obXN0՜ϟ}a: Փ.,}g鶿p-lޒ,: f;eV1_ߎO~~WSżWRo߄Oee_%@vkoA“/v}1/ ^7v { +'D#LmvrSlFB6 ۬b{.o6 ad@^1 j3ӟO a)Ž!r'|*u9*?/ kFkDQ\yIKS9 Xv!910/-!Nbfzo近Q2#S+ L-nH%n9sxLpջv?N<ڳ{"hxPMɆ~}C0ŒՆ !y+% \EO6׉M:ZM'0Z m󕜉THb;Ö{󕆎5+Vtr{r?sJzgR(;#1bR59*%1{~r?Cw杧||h wbOh`^.n6/!?ek X* `pdyŠt<#F05Fs 5[N%R.rgr vP{I ~Tߦ@V3H7XJWguu.7—OF5C(?FԠ`"UPS7T"s.%]Hz*afw'v( Z{м3ֶdRT4;{ulD-sqRH֧݇;*zW#q-:yM(@Pw۔Jr_㏫2Yl@BIkJ-$ѤHBS~1)4_ 4/\\9Z'>VKiYS4xe4nٖv,FٸT\cZL6] ^Vgs0i[#7ںup.Ͱ弞K }0Tܪ.NZɟɟʟc{8W!Ws~ 9F?%%&%aUG#R")J32`̰W\%]kprVo?Kf#6 HcIhU"`'L*=,ct췊S ;NL=IhP(#i$/Ag&v F{7zI&P';"p׭wڨ2cU"̕#f(xrL@ft4G=+UB(Ze&RB&I/YF zWAF; i璵L ~fZמK R캃J!!7KaBnn4h$z"bjW3D鉳)W[}漂SNkН@<Xm+E?=1K 'M>N-E&_r;hc !R#U v2{͞ xZ-5Eubץgi68Z?;H=*-ZT؝ْZg%ÄF 43`Rt3ҠmʀD{0\)ISLjPz56*k U%Y~Ȕ9d\@=jak}+յdg,kk!@6Yin~]AlBz*SRz \ uKg,- Kf!;|dMtmj%Z:#+7giE[oCh An7 sh|YIj>"-./ Y-`xX>m3]B(tŖ.h{pB5c8_fo~xϿ:i~`2?8 6c2`$W&jL ҄QBIf4BQjp̶!r B! j~ WvZXݵ>=~h3>(^݈H?ՇooMmC?+炎Pb-fͨo1^q8\F'i4@KD-&;ջL 3\{o5s4&f6ZdUdirR,-s0ArKF/8we'YEAPVεe1@"ggq_*TQ(\4&|j?[}% 2#0Kۯ2Ac9 7Zl\h'>!\A5N:%K|;8KXhg@C3!)l= #D(+쥓2plۡh_Ǣ37Pd dќ1͟4&DMՔhntn,ڂdvbyiW}"n5#vou?}r*۴RtbGBu@Ŵ5U4dclb`ӌ8'ȐRSBzL-l7Pe3X .d}lADz&묘H6\ AJEPN(ip\˩&XmY6xBiVӺ7LQp 2QyUk蘲< ' 6A R2Jfӌd .+{AU?KVRFХs"f_43؄cGgVpB,vd].MӁTڶ_ s: R4K-Ygh#"9KTB)eL :{ ϔm6?wK5zm7:y$BKϩt5I{*KZEP`̵ BlN9OL^rmiT$s+VRWHYAɖ;6 Ϻur Y2tTOJ(!(pq>OhƲ2xuwk1' pp\yo5[W0\CtǖnTfA#Z?V~1T.MCB,KWn qgN?>x4bLwF3~Dl┠ -_-5ŕ:'b,֨$5Q\© XcH/>b"҄ 9# "f-h)yV`uZS|I a;-.9j3sT3,,\x*>}oP9vpƹJq z5*?FWԤ ԓV (?WKޗ iIY֣ތG9~!l{s HR&o?M[EXf>7hoh_R0^ǻ+Blћy긶-M+fPhY9'*7QI%̩.HBTuJ`B@A:2£n٣6_ Ad<*\xVlY@" 6] 93 ms)("3JEZlJ 6g&-{Z[`i|X8_KeC5oIAӬq 6Vtd4IHiGƣŖ#c1Ǡ١cy~;\[?8lnqYe@q`I/lM@-'>Op83Ig&ٹ nEq sl[$ x4^LwxǨ,ZfU`1r(䐀GC$(1>6*Hl[0 %kƦB[P[X4ҐbQzlȉI,RL{97;v1wNDX@%~q3Oee )xg,dm eeEV^z{(<)TJpE(=w~&N(QH)HM6ƙ?FstBdf: '|DŽ;a >y̺x>[˒,z^J5q@P"G]sc<ǃKCk\Spapg=^߫f>4ɝ^,dX 7{~,R@~~8g_5SV}t#/2f۳ro;1~ȴ6C~*K{!YLD#!:Gh8O@$]F_J$4I(z?SBfEMΫ#$Eڔ$ti:gW T:̒!9 U-> uˇ f Wk>"$ЬMx߬o#bin0bWhN.=T%Aoڭ9+FWE~̄8-P!ͣ=RKQ1:I.QSix3 m2s^n:77UM8ڗ_Vy5J;u>6naji$ٴ~JP%K5Xa0bLPszt`ɹI!`q(QdWܲl~oj[|{FFN8SN AsP 8Z@m($L44"Ԏ&(cr"'O4]\1r[<7ߦ"\[<<CJAsqC*`="Ay /!|k#?4*ͥH aFmQ ݞ.a#d^B(u.J, F[4UYqZorSC})Hՠ6Ot _)e,cE U)7 }m;a2ԛ4#BŖq4L-%)'V3AN*؅G (Eh{2Cy)-=s͈:hWx}EofoU츒Au Q۳˓ZMj!~io\}\\NVǧz?^"fG&t']\k0?wr u<}F)0ղ[\&G)[%2D&xVpB܎ywtv>W<А䫄mxr >_u57ZPzKٻ6W!oY~ 0`,$_/.)і($S eђդ^>$jf83uTuuu`2mMZԥݖA[6`8ml>pSV N ni9ˆ2#beˌAEU+ 7o h铥nb 02T-E)\AWCUp'``GIhI!s6i>{@l%qejSiÍE ա[1Q6+1%ˉuLIzv>]g:cܪ3g!&VXօR 3F!1GXB|1Z ӥIX]H݅60LN%&0p>TqfFIUAAL8,K6MsNOs:EH^@!#8TD~,c+a=sqAT؂K͠^}S@ndL)5>eŦbbl `b47R-koh9'$K N'DLܨ[K7^h14d- P(ʤ)bf+YX i E9$yp5|sTem1y[yd Xޟ^4!Sǻױ[nx "Z*F | .4ܢk'϶Ydg` 7p)#7 MSЫV;wCP)h0EJ}׊&D>A^Ku\,Xu1YFgu/Н6Z4!DKU!zt#i *ɞimw\T:tzm :OU%o:'1ZDi< 8]NjhF[7Z8 I ~D6 W3[+ ư>\gfklMpacCz~4"9dz/mYʩO3/h3{&#-Ao; Q͚!꼟UuO]~GiW&4)wuˆV93˽3]H؀0mc̴.1t#:!"n*iݶ9}9o%R<*$uYS2E?xˋxg_2~^! ovl[Zvlq8" hH9xj2ql 骭֯F?=bP1nۂ!FgWh?]na>͍ml{HK'ܵI]-,^ܖIK)Ss_`:BT @@ fFa[@8 "gG4V=?ANg#eJtp1@ƚ6qQ:oWE:tBCHu9PF"HOO^i ޢI kۊL#f :N @ǣd[md5{n㶇$-']ib3g[Be2EҐ&v0xDEM*5yH\y Wj)|BZq;Yt 0&LC7']aP 1,lRӆ }U`ٝ;.A9:R XKq ox me|##ᒉ"sx# IST>+}+^ ^Ů$\[<}oN<X5:1"f&ӫ89x19%y5yVu}msRF؅ʐl` [f%EjS1uJnOM")%yzribՃlۤ*]UZxY7!i3ժYQ*؅-\81VĢE602 bt\Vp䩆 M>e H7%!R9ĺ(RU}+Z$nDM͙_R(UtWOH伾J~W>1ZlrI޽;sO8-?Rԟ;ww<4-Æi\ᱼ柯恿\\Z!ճS?i%:=ᛗ{ڼa-A~63&40 \!s"znss{i5v7(vtma< iE1 2hu[ÜBgϙg&Ă%nڳxc:Us7#OPOchZ^ťY|' /Oܳl<-YȣX4=˓ќYdQx5m:yLȠ>(loary;鋿޻a' {:6[kS;϶Epi<-G?O3NF+y%B)}^9!\"<ϯ6B޻3|.[~Wc.+FgP'^s67wq)y3_;[49q:KRAK!S-cVhÝqQ+Xf0"zJ3QmȦ+,{1%%IJH)yvu,RsILHFS w~;HN:O!0sB}_6ޡ/ӎ-ls߮H_Su%H,E^"hDnxB}xiX՚DϳIJ!ZQYfuNJ)Fҍ!'21$ `ʸ}X?d*27ւs^}`p 8=4G)SBZFr֖kDH2LJ;Yxk r۶7  1r_㑛1hu;u`H4E<)C- %nĞ+nĞ;nĞ-nN1르$E eXlʵ[Q21X4:gグYeL**ll_,HmvsDb:γ}R\pL9ΕuG$D;^oyv}>_$s@16c{ tĚl>?nk X%i4 Ug˥,Ve($24z,ih\ڢeXA;8{ icgOlz QicYtQ3H✇ cgT4XDd־V AwoNx֐k#GJ`ov[H* p\rbQZHA{zH#/tIǪzl2{ v[.M z 0cy}2it)h@2D59F&& 5ӢJ.[w6$K7wL6Z_O(*%R{r_eѕt)kW1zc3ק>jh;~ҷSTb~u[ӎOtڴvGŗ>4H[~ۚq3=$b{~ -J[_׼ړ7ft<><tۼhP;}xկʞ2Cl:>`uv= _=uO[>OYn}U7GԒr(c–gy*O6X"Jf)玔~j5{g4mLu'8 bڌ 1~i_V\MMѬ16*r~u<lw5k7ja`˼ Xz-ūH#gM fsivJNXϫ53X2ÑIE?8<4^fr3Q><)B& 6U8^H2[YU1T@F)pLI6)Y4P7BIYO<݀_I9(aV*tRiS) ֡.ָ ]LQU*Pv/4`cn&^:!3m  gwܽ#c~>&X6DZ? ZT2(x5Q󔇇su-EyCR!)!k][6IhB,!MciKJG'hA8Ȳ" r*4z(k*$ 0 ![ѺQj N_UERhR ٩>DȘM) 2ژ8/1$h ,X2\jb.g/-~}RꅊDg`֝ہ5d]E=2r) hᨘ4,k J%d/ȳOZd?$ 1$J ,2E/(1(SQdd+BL),1` Uu=]ynv,th|I OLdQdoe&π\L Tv `L/©TPdIyaA>;Q‡̘<`r :^À{`>tR #Z# 1ILo"Z )=N,Wgrq L_ Ox~+޵-4$tCÍXu[hU шIfu{_i m^Lx2fѿ㼮;N/3vGڝӓ.& hf_K"`U^43,we|]Ty ~YĈioS?e?*n^W7/xD.@aٕ 哌sQhyޗł''wW:+`mˆ |#[dqu-t 6f-7I|.k_?4 e@^" ۭ9a e<=+eV+w;4G݂xmAԬ1Ilk506LNh yg R$3!_dEǁB/l^et{:GuC6#1ZD*E%Nc䵵Ҫ6fs0Kk.UONwN6.Q{!_\}X2#iSS^d,L^ʎ(ѕZ^~j! C,܃M͕mp*&LBH2W12bK8bȫJNE!)5* (4@9ceS,qm/-Uv^ ,yXUxulp|ttt,ߡb2F-1_% nR(rV!ZvNjR=C/*bCN}G֝ۏ\|AIt]JU)>,QR KxԀ^3wtXdV  A/.E,7nZ,EqJBxpB'K$uڃ%'V1RsJx/CA(I'W!%[CE-Ck}"Hk *%IugC$u%IH!YȠ"6El`Bj >'Y~R^Za zIxr@$sVRa" Th_W$(chVJ8)X@D&夲,ӓJ$p)W[ZJR6H c\rS% IÆ依N;kdRGo򀞗g PRr* o?i $A!Y(Iz\R-B:|ϫJ<6I=KXl/GstjPO80?+[Y&[ ؈NH 5Ml)T4,+\({ 7h< g0<3VKb Ҥ7?o6s|8ޛNk-W)kɴ<b?>mNźvLjnuz=`V)˸k:o_|6ɱi I>J=XX]ڐhkFt`!gZ0vlF7XM,sEuQ,ax 6{I[HA)YUk* @[ B7JgDʻiwAx^jA 5݊V]U.gR3> fKt}zw͡2Yһ$N4 Q_ZK;.GJLtk97+,uVՇML>O+_zf]պP&+ЪщkMLr & s6֊Z{#-6+NwX{ Uu>KKrJbG]Rr/a=5qi)$Rc<^=ۥẗbhn@`b>RV?GSM_F֑ؕ$2Yƥ6iC.gzWyHY$y<.? m0J@UFub0$ekSNJbX+1a(SR$M2$< 9{A2q2>{ȚW2@Tzuf|4)Od/C )EhN#^iWᎲݠ< N57;KWh]6mJdNz`h\c[aMl Fp^›Zَ^nU<5W<^c5*ELՁA Z &ˣS5|"pv1bmԚl-WNdYtr*LRAh&h?M+UCz=h8gVbdNG1RΪ)Qw׹edt[5i&J~񤻹 ksB255=E$3riWbJZb'/73>7Ӄ?p>|/{YRh]Yocr+F?H8&[y rn.: ݶKgf[Ejɖ#R<[9d-_Xi2e0}<|v^'wrO3{،mp_gw/p{;S|5S,Cwzu~GǤ:aѿߏZe&4uϟ_0I&KNڋtQ,s?E:y8A'/NhIíޙs2th7K|$H8hN5KW&~w2w5M`ӧSgaѲ\"~]~dcNދ[;ގr"`0y*ƌ"ͣ )a62rL^yJ6`YBq{oL6xV[2H͉Rh"ƺ/-]v[:"G+Y :S.hBS!NGB{ePY;lMy3a>g>&:}vP;mT<\]ɤCPuWUέ(QlRM9ƪafisCdrbPH5Y#0]cMe6Z]}qXj%kqhÝEA;F8|cWK3HIFF$q/_UiVu9wk[oum0M3c߻SW26l'f|99ZDeS|Y?ϽF('GSM\ⴞN.(+_1徥: .ӚoO<̞f9hAoOӟģi|$,=ER"6j7zDt*6дILiz}7r)zZy~ #xp'eb!QCSʑEV4~$G&_dFddNhB0Jeҹg Wjp*M'IrE!D>M.m?6»drD_>t|$ $ցقkڔLP)dk$汥NL4bh%@oA m"A竍ƚ&6cLN{5Ϛ{A9u) obKଷ6L.-V ZͱE_b%{HPfXd[hZ"ǡʧ)|ТX}ٹg 6fq09y1?$^$S'>FIF8ӂNsVޱ%A,ZXAuDdC-U[ZFvbv H:_I567 8/px0L2ި q}hVO,R{]*139`Mb3\Z7dZa~.'#l ג%paJ)+ښwr\vQE7֊e`I[5#  b ZB9Ɩ%vUa3Z0q>"b.[Gӷ|7f~Xۆ:mc=g XH$F #'ΞbyY3&`޹>+Gэ㭺AB78vܬ~k]ϚD)(\sщ!)[Ŏ|q˓R}"e% @h(+d{~߭kkqrt(-}(zYigѐ#EIɛ4d~3ks " > 7:Q`J{@QZh,:r lCC>f %oՌ0fO妤P/g%tԎQ$<U.n' "Zvy[ { :]U>8TaVCxZ% P 4"wKپ1%㽸>MdɱEZ@\m5ԩm-,%Ibx cX蓆"0;/#Nڢ& V {ဘ!5g>dj*?m)Bl6ܷ2[Ĝ#[A3aE)((ș+/=u¡8>vbV\]]ެŧ迾@=&Xd+ m6EFpc"V^VqK@*0|+DrΑ"@LPܪU.URIIV5q,)@N謋9duoŌmL5Z>eoF6jwv y%s dDϠXb{HJۣ$<~B!f3>X &%a"L m.vhuBpP^E'@K+M4҂E.@x8'T{'Ó~:CIx"sCB^ %[PFt&L kufNHE-(z[\P:Gy5nO=r?no;Q&ӭ+}7)*(W{_Zf9R!XAė~A?N[.^ 7] K(n:h|%[}oѾY-:)eARl 9A:i3fJLW:(kT< ML@qD v5z7kO-[ XV5"!/ڳvFTd@;)BgYˁ ֢Vb!/FC~{>]=sM跹Ʒo(`?s,k!kLڀeϖ{5T"⴦q'(lKZ?1H $$"]=ጬk M̭1ЬMiuot;J 'nL &o*Dz"~0ϴZ &{n\zή靖3cv&iDU_ em"6%MWnRcvs&_mC=8jHe W5"!H;mLO >^x("vZP1=@j@S6z+X8Bt+gBЋT,`sRI&FH>>+0Sn=x+r>Ö-gՋrhM@VM-M A(XVDQ 2)L^- g Ø_ז>uG>x;O_}gm>L0mN}_jɒrͰde,RMGy 2:|>p}Ƥ zp./z3)!O'?-Nn*b0%KV˛Stl'bnnM+EUzR0JUB};9um=~Щ>{5:t`(F2:Y#KK,н #3k_-iYGFjN/5{Et5ӻ5{6ǜ+Ͼ`Pf-LKRD(c )\i㍇l)9 ̭xW[Lmꦫb9xh-<39);:xՌ(cG(#XitO{ 4Q?ĚUrJUH XURµ~@2_1hwl3؊"cm!`,ft)bB'.̹C2eFܱ!X&l~"[:>h 8?71w<ś `EoCh -; (K]i~y>lpJZmȡ!PAG gM{JaDW9PF驁udܙu}ֈ`v` qT,ӄ=N~s3a5=~ 7zv3y /=QTygk*rFGqH)Hfh-xvPekMFV rx#%Юw5+@ȹĦ1:msdbKD!ҒmU\m>m\\Ln:\kmZ9U~7Ce6%)ʁ jŒhG|h\a ul` i0S5USdm cFz"C5[5Et.m}ϔ<H;5N]5eG%-Gro3[7y EY.&VJ92C_iýfHnHRtg)"jƗr<\cMe6Z]kIjZ x)s/ë[Lst/{cw[Z!"rkHA:KBAJ΃ S|lXPq{3#nU1gci@ܧ9Pr1 Z#1\IQ6g\NtpcFZ'yG;ßջ_Fs2ejbRt*H*ǒo@y^D潉̻ X6 SDZf%C. uεA\`tu5c~QQKqp]o#WS*^x[ &Y,`VF4:qo:|#[C"`ȏU*kd%HL)\ . v\~| u ~1T}"+k0gBVpLˆ9Kt嶞rI9K3TXkv-KpMC{Z;悏܂,I\T [)HTBƎ:{AJ+ZhоFg7On|qNF8ă0[[g4$6r|4K* ߢD*`>1d{^K|P 4`4ਠXC,zg,U[ ]Ƨ OA WV暶nETXm$_送Y" 1IRB /z"!E&w ˋ(n8sD6y\Tj" t82%48&R(هt| j]g-!")y"2>~)N៵W#).*bln𕶔ά^VǼ`d,^Nd94+#APJ <, c4zϬT;Iy4`aOܝo}\s 0]  Q8fYd-*03rGMZe S%7lr[t Xj5zFpy/5u6j :KqLm6$90+%D˵ r-ђZXUQ9r4'q\I6r38v κG.g `AE0s+Auy: ˹*it|{ut~g4d条#&0mrSi28DT"PܡĄL&P'*̅!ONytڤ<(4B+@*s=HS ,SGA5H xZ`Lw_|2,-O')#QQ)5͆C,1e+nhFsFK.UmXP,)EUUHԍp*T|~IXrX'4]k:/.t6>wC|md >Շ_sζ^m/k͚꽚y(vXWyHjWf>G]kjRg4wAbW;iedRPGgWy:>NӅgtt~2޲%G'GaRdn<:gGn2 F8tv.E<.J.K_YMoOvޣM\H[}6VpJN[=:#I'7p)i3"u3JaNr&UŨZ8Bu<`u򗺟T4hq>8JMG'ř/Nn U;d9K,_5综?k \sZ?^u]L9WY<,peIv}uӮ9I~f9O}g{3!$3Q($?J>%FL4gVS6'f3[oWN㋾qbg8rĽS)"x`7R3KCd=:AX4!Y/&-Sw6l7~ۍkWoMꐝ ۞嵝fDyHiGR]'$T ^Q9@)E)G,PRG鸧'/D%Z C4,>m倫8{mWncӳ'b2wMtX ʴ Mq4pcn12Y?|oVF 㪶@IU?Q.Op_n }8PgoFm~z\-_7qqq&r۞׏(S(uoq5ϗlMo@l} 27OE3M3}J ,޽) f#`um%'3`f߽YtC#yYj=@4 㳳*u12gI*/K@cկO`6^"D%5H,d`$@KcHI(#-O'mI5=-Lagz~4_h =FI"x"NOԼXhLҞ8K,XO4^ѸS*>P V2x_ Kqc5@eiUm6>i2YΨ]xRHX'$plI֬idYxmaW."R+pd?hP5Xr\2%}b0SٕVuV^S~r\DqM9ZQHKUR7}m&EY/ɲ"G>oV5ǮF I?V*j4EڃnP ° Yzฑda`'F٠W/iĔ~GbK-I_u$ sF|@/x̣W Ǘe -ǫh~oϐcFu|)*r~WKMjqznACr+Rg`VTN&Mj,*4o5 ,jU?!he=tbEns\ؗTҫW;l}{O3 TxTC#]+up&*bAhp+ wcR dw<OZ{ vAW2,ʘ2dsE"RgNp)0ͳS##{KcWshցRqLi]H;K6c2Rg,@uW|6^VצD9Y1JhƓGY` )xÝPDF" 1A!%e$g,mh,D)5JS!%&[>EJDv.fǩ1v8 Y6s G2e7yEMNJ*jx'\h*0%CLq VX*r+NVzuۃ=KaA8q Ĺno/A{csIuIpBKB2.7yQ#K|6_@bMgeyt.G 9Jсќ)2}3pՖeCB")ji7v>() g`2\ F̴@!a: R϶ NL֐Wg햕1ʲ\ISMKBVYOOi!ُ7%SɰK X]֠?.o\BߪAֻq4bekjv\_j2w59kJ\V>l|q?UK,3r [BYƇѠVGq9^L]LL-KUu,E"qN2$/}f6ꭧZ3%P$9 TGȒ LlH!#?uYyN`w2B 8u1:Np>Ay;a4;0ȼF X£Rb8vqFH{οRR^_ޏBI #VX"3 I6㫻I}ZG`I@t8Cϰ5g0k1QFJ1IŏFv4Qonuβ7ʓNbG}^ps7ok{4PA*3\P tvkշl~# [m6Xk2^gtQ"J2Y ({Jsq6Y|ytF([y ʫ@<6KN3%΃_/:"Km DrgcG4>N:g˔R$Kzmƴ}D;=[%T2lV[k}kulj[:-FtFA>5|47%It TRxB^Qx4k2s*E2XI=I)w92E+Jx9omKGY>?[ 'u[5̎YoXK޵q+"xR",vs_b]C,O$^|qob8Ɲ=`MUUJ-^ŊZZj!")Vz]SF#_ s+ B3DTu1yQ8[U<+`BKlV~¥w@?t];GTLtF|/Vn4yL@/A-l%ڄ\$S ErGj6\'jVK^+` S*k. SUm/PN/5Er9M۾H'R }tO']uW$q,fzˋD=F}?=b+7Bp\]Ȗt ړf!xC4~>ż=I0) !bPg46a$J7 CuM/'6ӻ帢*~߫?Ff^ϙhu N 7QV6hLU y`uҠ&,Y90rEST13*DxG 09CdeQoV( VV`U"6W!x3TeZz"!v;Ǝ]b;. i/FDZ#Ŧ1u5u* U ͵bOig1g*Ӧx課.t`*fFa3RQzbaiNDn:'%bQ"[jJv̬Ĺsk{5Aܸ(T@XPP8RQwKtg‚e5NTvֶ =(}Ңz7L ĩtkv8Uoya4). SaJKPdExAcC3eqF%[׍J:%Dq'$(Ye9oyŒ@b Tx;w,e %P6JTA!Է}݆C0!ޣ:u~v\*ͪ)Jz(9m|!>XϮi\tS9x-ՠFDl!8xe$aST [Bkk֮(rHCv8kiݜcLบHinU"SRȮBk Ɨ'^?3bNCiE7cGr>۝P IQ?%Y{3jN*0Tt&)⪅@U׷)H]hǽB(D`((Ct7ؽ7/j+vF4mĦ\Q䬇דnG[{bB*X4`SB.1Mq!ZOrM?Il1Th4*uluϾ(nhlkpk-&!%P(:aHMrXMM!,tsIZڬRc2(dNF[ł*E5!U8'?R=dFxS@*7&!!z-$uj`+qXc]MJZ֡hR2^`cv`܊*z ZBJVDꮕ70 :M9٦0p c .L5aԚ9!TXFǎV:Z2(.Tyt%%b)Wpxϥ߱j.M;9ʿIXكe]Ms-5Ǥ9*#{kC&:jK}vR{:!η&uѫX7/v#<$7=~!̲(=8쬖Ã+#%吾 . OA|Ih|Pʍ..ޜxa,}9:?W.Nη?;G oe62+G8x+Hf`%n~䔲SP۟px1}΀{1ٗmz_dFKq a T5Đ]KH.Ag4\& G.{cf3cͧ&ہ ی|kiTgb|ap|m8c4P6#njLޢf}qع¹qO՟Qdc̎9^0{ت<t!` NyT_LUа)5e h-Mbl3V*?=QyqV&UW;4* x=Gc,:iCQ($Qp~5=i9[zD&VJB31l>;QҰ%K-T :Z8$c!⬘ymbbP6d`udlxkE`1&1Ɇg* Osmg]|@K )ae:G}Q9J Il[;%rsV _* ddlu1BUβ QmD)Tl9Ybbl8w_][=Sx. ފ(w'k(c[#$!n&,w昑HtRo4o*m1WRnʈZ!ksDk,w$`IATɑed-#)ɤb2xT L,T4Kc+cuĞJ`Eg@#hRl1Z-ge+Ɉ.jIT4*\Kdù[*x[8sɸ㡒᧕%,ZQct1Ak،Lk)blK%;ՂJF뉞ZKVf0xf́[f8 >dù[2`Es㡲'pH4DuŤH kA'qHԅbL!,7+bGDQ gA(!@~m-l1ֵ58 v:'GU(DBΗC<ۘK$!%Dwo:.)"EDߛМ`닠=\,!!!dL ?dSmWrwG%p3<ή7E۷LhgڛN6w۱lw/9-GGV\$, G Uv~S3pRQyz1buj(^Șk{Ξmn@7; r} 0g O'=6QqN[p9&ނvM1d@V' 9-iAi:51Xq`漌)@Uh0wc08oB=#3 'GV Zu *WT 5nC B&XhEG$l6-mL5[_P 8+r3 tdg۾bp-aG = Ecx⤾\ia|b *䚕Z1:4,KG9Xn{'d-(hjs)[|VQ,QZ# '\-,12\}ސYmDJV kYs¬! tcreʘ|)ŇZHm!whX5{q֟GA.ɬ,&] qk"=~Zk5UA1j/ Y~k͇SkE{NjҦ84'F%nx}&?.=ݯ^?Zi)x{|#Vt;KJˮx]E5+Gz5e gmVW )_mVTk!e"1To;";ֺP1qX[ ClFGYz8,Z19a c?`iulЈ ;BO>2Swwc_~1$*XbQ_on,&omwB$Q.%i|(=<|SJj׈BCB :hPja0T*r𡌜ir:dq{,]y\fAsټ5^hGeolIwOLV`Ƣ 6RaP>:xrr.L'Ch$BP ȳsIז)sgT6cYH5Z,GG.1.Y%p$ f$L; _Wp^I辠䗓ͣdx##BJQc!*FGFu?KT]L!rxZ&ORv;S#"ÃEF3lv^gFvu?-1NRJ!ҲQ6=U\~:pqI50i2&* N>2d&'}ZDˀ]h2V+uy;_')bB9ɟdOxz0#>=䪀AАכ!XsNgj ͌; 4zV.Oή{yG"h[[# (gw,l,2~[Mҳɛ9z_|4k9󋓞;O trr69^M/.i:ust,]fz}2B 4%oQ ɷ?Wm2-u)t0C ,.qr׊Y#v8.ng~6*hu~s6W}v^D˗ZN:aF[[ګ0ή+X*>܆uMĮU )Q /*4mhmS*x&ゅ+e@i ZɆQ@f⽐(ޑ {C{;)'7> ̳(3g_j\̲Qy(MQvMoىG?0i77^$rEډQY|P \l67YDpDJp?'0d3[n/HEE93x#@p[(g ln8d/@& j3J'tشrU5 Z,v4h߽U"։ܛ%rKVL+bEc5 |(Z)/Fݲ;`u Y:st \+m3 ( Kb"x'|欑a|~ e?}I]-:L紳~Iy@* ⯆ȅq)JʧZɲQ0hMz֬蛯ڃ!$^.#Z-@-:TIe"ŢA\XWwc<`pd&0,N{][o[9+F^wVd1@3̼4ޗ]4NlYd;_EId%_xkN IXŪuy9-s&x\ OC9MD嵏"*]4ujQP ?6[bݝg@AZ+,?Ɨ~W\KюT̼"z6]5(+,"NN1o 09=lTQu!aKGǴ(97%, 3:q@bzr(t!zW#a6wx P@PƂ|̇hL2IW-Iu*L=6vn@ȍ"7BnoM&]aRa:A`\(57\g`OS$S!eHDSSotVew|O@;e|ͳ>^b2"$=Yr)2-=2/beH(Hħ W`o >Vkϻq5k&>~.-x~e@rHFOy-Q pXDRJR ڃh_n&ʠvhUtܿKdocĻQV1 j^GaXUx4w`Zu=?ܟOyhqs/=|.F)D/L4H?徘csb8FJs'b/Y+iu^"ue̠&Z} Ѥ#TB ;F&20$brbt#пLox?ȵ SBA1@P5!o1G @(X҇ig0^HwE=os(:(k=oL.D-xe4:ZI/ r9d(OA YVB8jiQ"$c:R`d\ӳDQԞtGr'  @Yqӕ#TH&$ʾ2mq1L;Q#_P A"Rqm*^LR< N"jKqQ' ~mlm\ ኝfи P0PJgu5&by3]~7s"WcWː ]Hv(>i [OO萢KDIFb<))ԙSd(.b 0k6Fy]x(! ýs,iEoVJQ/Pd9d8b9NJTh2c=L\A>t=NZRI0Hu b?Mڙ[[~N^Sw\L11vlRbSFBWLC,)KBSziSvB:>X/osdWD/qXl1BeH#1(襉Nrϵ0:\\xiV#E*5n@@B@(<˔p]+32`>eBvњxFW ֹITBm1Z,Qd! Z=t) {PN%ҀIU)a>SR>0H=HEaǵJ䢲.=-"hbD %n׻gyI鸞rjn'\u}o&>?~uyrP`qu$:?̅]N[k%V觓Ͽ<_}!qME߉H=O_V_w"`-+ :)nŻ$aɾ?y)K>',?%+yF1[B( Q|X/$S +O'N χJICK؞\9-~p12{ydFA3LY:U'VZr6pE&5BN'XgEWrJoHS\L FJq\#bJ  ]|Q(i:x6٠Qh/O)-2{y\?o 2jCI?˶!w' >Yuv7 q4/v};̧\k5:8Ў-H3SD*Fs$ yn^c)pCQbUB%sG[˹ ̄+p~(g\ErpŁpEݟj u}oC pZ<4' DȃQ9P+5t-;H/耐2TlQ9!)Q(QD9 T:Grv|ekfj{`9Si49GKe"%rrClo=pJY'% :MpJ${Piz_ޜ]?cV@E05kϰېƩ+ Ntsmt=Fx3 BM*o0N0ʠ)P̧\m0ڈYC?9E# MJ-\Y1~سs0^ N=zTkw/_m?3q-<, 1m$`\ȳnwHh 8#AE cw_]5r 4x`#A,]5YH˅5d#NiFݩ+|,ւ lC~oKz& 6"GN }Yס+պ@ ՅXžjVmY/0A&̴IOjB]b|ҫ[fz}3!jFvvfR oD^,+okflP]MN+v>0{FLH[cq 1rMDY~Seu=+M5xt?iȖ+H*]p i`%%H( 7n@'F5l))D`Uybx}2fDCj0з0M^A_Ql4ƺN~yI:U dcd)\v9(.|7͆LAq=^$]draߢl@ \n+n+H|a"LW !&VI)5qY8ٖ wut"6; #=Y66㠦n[9"G0ld:9ż)@ڽjp٨&V+< nN^:'.uİ֑L2>1mMhj`bx\u (`:9 VmO5כ)TI `s2$ZmeU&hkϗ%PEY I^fUr!3oswX,3,L$|V@; 94Z`P캚c}^#OHk e2FIi!B5_vm*5  LoIKn8l8#us65Vw5%Ѳ,8xh⺅@wysעQJE;5$3Z*M$tx_r63!dݺ 3cja|غ(WgkO3Gָa+!#OJMGL"vAyh N7IFĔ(`.{5ifvfd[bSFn?m@A6YŪUE$_*dh"ώ}' =nBcy'H-KLyԂ)ofVtTSq!A9 ȼL@Ѝd? D{\VpkY.1^HO;N1<ֺ-MeHʐ2sif J;Af֠z?oF B1Zd.e,Fp u)X=t$t@B;)g&EM{47p \.%Д}B{7yn H%HhNk=4㌕GGt!|t]%v%qH2JN4xSfCbw!J 07VqbayФR-uk1lw["k=1ĩ0Zـ@e:z3xz @{,yxT@eH dJٜ!$ׁp#hj{h"{EЪivL(WOdN䄁ݖJho&J+% g!35!2܉F \{r2S"X朏Շ%%Z2*\E{b-+-s" ^K&cڇbTN=lyfNGRC1P0{A8%wwDHדDf+$ ѪV$i䥡m *DP}1a7!we8\R=2n:xͿ^|9.8?j%%^F5I,Zn1=?W-Zu8YuW5B_ۧ3v4j"z b$ݮ=?^/nc;'14U0M0ן>/l=m3i>fJ*z'A($@f\ETBN%뮵6ye?.x'xFY4c3bzᏋ ɒ,&0 gLvZ(%Zx嵱Oę LXJ@^~ϩtYϿVjUpn_o8h$&^_Z"|RW]l_LHw._>4-{x)⸥oH`G#tI[Cܕ Eg\ 6@h*STRI@FeAZ%PGhэ  ypg^sKzXq_H* @82IClNG)s11Z o_}c̽sS|N7ҏcB?!:Lʶo)]GژL:&66uGmB&4 rTb~7i^6/W^i gI82۷i^t =+Nь{tߧS faWiHթčPss4GQX,<]-tΏ̳Ϳ$P%+XFQisARh?7_5VEMPyBl)i42uʏ뢙|D,6_m+]Pm؏WVˑ+~]OnƧQ9;%(PlL/ײum>rܲΏ--Ѣn-_%C^6 nvuv;7s틺Qr]׏Ui)>fU+מvZ7Cۺ/#N("b.^(;k_Ч{MK VsA*uQj $] ʪ^ `c80eٮ> $p$r%ge3Al9c ^< \^>mx+Fy~6 p}<$B)mǏ?}LlGp-RpK|tǽ{RBR"+.z%`ƅ}tBth>|<_zE eoһ),r=bStyCvCfvߐq4W߾X?;{+W䀎F =̴@76OnMҡq +"'#փ?Eɯp^kP.Deq$*ui֗ HחM2EFlth$s bm=r%(i&l0rnjaa4ON$\~HgɾSg>0uZ˒YgdΖ¼.:gĀI:.tC#F c7nkgok;؄p~|BG O}]dJz},*Y"EQҀA:`bǏwP疑=NSZŐ5A$8wXO: f e+SDz lT@X%z0p >AJ^Z*z2Bh5$f}Y8eU=Gz92ǐyo/?"Vtw8 ,lBh YlAJnx Ne&^z )BDQ֚u@_h!6L)0l+*;ݥ,f'Q!KŴ,c-$)t]^%`:b9d/i@wQ!f5()JG@*m:̓/ yEEa"џ̤D{È,> kZJ ^)gS,dAy;{ \tZRZJ=$Ͳ-llQ+5H~w߬iΛ8ǰ8m ?a K͓7 lbѰ^tfQK6}M ?&O >iskɷOs͓_/yyro15lz6NqWH4l} QT3N^&[|Ϻ>G&ո}}kx+MvogS#y6CM`E[F"OZPl5Ưq@=d>j BsczɕWyMhVd " @E[}#y%{W\pg2q=7ZhSh\iC+ VkS,a8X7ߌ*v1ʌ+{U ~ Lb T+zx d/Vu CaQ[e*~{w 3͏@n.T$Z}D Cw[Pm{DY[ c,YeKi"I%h I!(mI;R29o,5g DCttM=l 8+Áο2w27W)`oV=h j{- ;`!YQB"I򝱨{^?G.8p1?f_ًa@gz󵸹䯫mޙ(oFO$z;~L/3hT'3i>ۦ[9BT'Dz\a)t/yϦYiwy2ߖŰg֛.k>x1==({#b?F.Y~~{gKFC Vh5`u1[ql`:PG#NEX JXZ-]h :1DQqb2`$e(:QH<1093R5Td ǒmȬՓl4lS #˞%Me;]2Z}'chQ`T6Qm%AZJ*i}]_ʮ/eח;Rv+La }W^#6;M8teo1p8dk-D6OI O|"@T}%%Iw$HZ*gKAɀsBѢER Bj3*CL-1hG.^ב=(os= po7||k.Bc":OUX齈%JBiؓqrQ:hݵɦ^ZCP YgPƢ!"R*;ʅTȯ ,p[Q[>aPz(~L,Y0Ԙ',r`MP."c 29#jgOR{} VzZ2cDf9`(ٸ}ivV`?}i|㯟7r}X>ύ {WPk}Mt /לrǩE$؊f$|-hQ6$)"q|x,b-ju`zHV֩sHm6x[% hmt^`1aSVKz=]"R˱58B9YCtUc"Sk% c!!յj%59v[X"kN[(G!T!ePb{("~r_ͼӥb^`uBzsR!ye|Ƕ -:-ݵ T9H(@qmrjͺ *5l6xVUƒܳIG)DG,N'H4;t<8(H|L?`i<h**a@O$)ADo<>}QFkN 5ޑc.w̧n'uaw-7#!+-GWOEznEQ*ŊI-I"'}q$f$o'ZM V, ɱ5X D >dQ[B陖QJ|jYMYvy!o fI[38wT7넩*iuQ>WhU@ʫbE>t2EԆPY$٭0|Y*NDĢ*ZgcjO7`n#ӨKX:r[v{,rfUQU:gwb=aA4xmv SB Xn7i駇|ϗ7_.wF`n\M57d& gF86< ~>^1JE[H;)'s>/nI x0{; +mQ8ްɽ<w`N`MiA{> DACqH Ggr/B)::5Tkc[ z:ޞUs|om?7lJ7|WOޟ%vQ3) DUQ xRO۸{{仹 !A."-L%`W:R uIKS\>$x_38\&dPN]JUmKISl)y4} "ɊidT+([tpe8ǧ (VS+۲5,X:Ἧ91^ɾY3d|ՙwߏpÇҫ !xMM K Y1FuɅ߶2)LŕD2GeCl)]Es[܂q @6JjiCڮWQ: 燁C\Rm&TSulod$Bi1GyRT(S.Qx>SLV*i(: RqGey,F*4Jr)jreBkBA(/,ʪul 5ꜜ`oֽ\B5?₯|hz{xq7Ȫk֐Te|Ub*AVcf/F l"k#;ёͻ,$lՉzKy[~mՑ-UG.,>o1xlī/^Cl{rX0a.f>/;rȠIFD9 PDղZ O&xlV-n r5^Oڵ]T`aٻFrWCn|_ M# \]$]c@3xSlɲ-K~ȭa+[>2g,@{P@AJS:l&أd Wj-uJrj^DbȢtIc0eR$M I#0@$IJ`n͗u;U:`%"P2 ݚ!$L(. hLcÍz+斔)Ҙ/}{3M6ElIjD+4itNڜ xϡdV`s*!H0!p$y-"f s},%26amj{r*?(w0;Ϫa<#r͌J;W_?&-ΓMjVȝϋUI?tn=jRz)<-?Պ-rbYI3tLؙӪT gu)7U:?ȋ_$&NtLN:Os0[e_V{P-Un/5h8Y`#ǴVsTcjzbm֊̪ej_JJ-[bk7 x@oٌFtkvċhB,d ֘H:qٚₜ늛OT !Oee37jXm:h,8,>~npGtK1pGuLX{; M"'./p!#Cՙ|J1=;ؼ0eOymm/m7 dMV?^ܞkVonW:R3fU7/,g+kЙyN7ͯd .Y2Q.AjB6ښq߸-dR3BxTTd":팉\e@^ӣ:J9ͧMι:3e!i_FQ8 }"3Ύș"kI9F o vsx]/ttxP:sG>ΏyvƮ\"AFSBz=T5(K $xm^KQ @H[A8( _ٳ)y6/Y<ZeBxtw$L9 b[WIph",] oe3mHroF*^/&ho|6BDBd%DiS & #K}hTx Xۻ(zӑ<hGb5ίEdm{2;큒a韾/M4Fcz9(]s!w߭Ւ{׼9{F>^g9iwu~Z ?Q}elr<\7&Hpiqu+}s}sYzqoگVu#-KwΛֹWj󯚳4kFa679-fͤpLK3B'[]*׸O1J&\X/̭m5LǓq;:ユMߎq}Y@/_Sv`R]I zHIBʔwg@&dHEҪC*SxOL(2f)t+J:UMU퇪Rӱ,M5++Rv"kMSdjS^ZqpUWd,_qUR!Q0'?|wg&;Jv>\(Jd&I(ލS'X'HȘn:6_n*|v¢=Q YYtIbp##+?X`!yFYl^:xƈhVΩ5-%E&y XFOY{C^p[eJ* x+A:,B9犔4J Bϼ71(9_G޳fiX|֛g3o-wr0:i 0Gq  o.&L /6ps ýv3Q \1NFc^mO?kn{@- y>RJ! 6ƃxquf-2Amu^8?;VGaM2@ QbѢx5V&F͞PK]@\{?! #TDN\}I,t>A(TP.R J lZR XP^鞲,vKފsӒiN'N :AEx3k,HK]N9lqhO"e6[LDD= J=~|8{IC\S$p"fJBv*Y>D1&DFe09(g`XF"(˛'s<}Yl RHJDXf`y yiQIIdQ4Y J%e /ȭO&42 A^~Iǐ()eʁjL>DFeCL),Ƀ0 r=n{IgLZhg JKR` "{#3ygsnS`zHn%A49x>;Q2‡*@fL~yQx94^K}beSbzjci1nCf/B"N_4H_wy\?jtĹA:F<.ՌirOsϝJ>1myddݭyWRԛRt)M)ە'FJjS. .ې6:RmDTxպ¿MNFq|^N.%ۇ_vqK,N?1<M=Yhf%v"6L]tf "B.l:`YIxtqsEdu:*<:]O?:{b:+g=!.vdӹ=<8%Ndwq /=g,q0x]#ײN#1ȑ!lbwQW>o:zH``?-z`tM"U;$Jf!"+*>Ĭ?{av+_`NQ5gDWl"R1 iTL4o1 Y3 u۾Q2jYm Pš8{ӸV͋2eA?x|߾\KOmQ 9amZ8 7,xdckY@v<'@KnGKVoD (H]^ 5ID$Rޓ ZFBmݼ$jّ[?} ;͝S۷zu+U#{Y pۦB `x- O`p-[dfX{ (:f!%9%J?EQWh5&[84Z)PKml sMe{-ly&y 3ss֡iaԑ$j p"ՆRޑx7sb-Л~jX&&:QQHVf7Eƣ2 @K$4XUн=29-֪<uqJK?uY)kaS2 RiUB^@۬C(}`K|IB^ ]SoÑUV!`$ecRNJ ,0F))T@GjV=jP+n<6#xN+K2ĐP 4%H컾v(5 @ОY띠q7UeiA2lRNwQ iAiu^M2rFIxjVc^E1jJr: R:bꌡgzZ_! )E ͅy\ zdSE=O*Y%!e0,pZ)_GE12W"b1<@Rft^*1na׹a&.ȎGE?#Bu&j.K7ej5փ3u(X7t-C-+[gwyN;$)>蟻rڽkz4ZY~lci .6 xk[zL鍳J?FzN~J=7Ta{ ;xEENɿ?[vȓ:56\ѳ1R3y+e!WO=<}e^r|b׺`n_RݱzuAyw09aJSn·S]u?ֿ.گFhmݭ9Ou՛I%?v1rB]$X75ugZe.\ârecUU4{M~L5{؞R {l/c:!}LwELM,"I\Ca;v, U"wݽOhălo~#z`b=B|p:N04+mm"f$`Q&D u~3y`Rrf,'E$%e,~3y\@CF#Dќ- 9N<*%fefT AI7$1W'IQheD`jdtVZM. OOQJqYO f-w? ^=<HKB񒻠-Y˜ɩb.I1h~#%? ' (ȣ w)71<4z$mt6ӄ'~,,G`z3Iq>גsC:TJ&EӑhusLZ(t1% [ f.>Azio2V5>[n>QF7 ֕:c2+jp~\myzY>(y mg/9rZQJA#!p',-IglY10e ~Cay2i)<'O0;}aceɯ c\90'|r6Ok!htPy\b%JHnaBer(YTl 4np'rl0{PE*-FZ&aU1$Y!)B>K:J{CxAB"KuՎ6bb{ I$ (6ahNOQ܈LKr!N rX/HFp׋ORj ď׼\NF~4ףIv6 4pJA֋\h{ cv.Nj ^ۓ6 ]NwNTsXtOi"TqgBzt Q]69Օm$KgdBwgiR]> #*cpj+c"$dbBU18?;Y FFwh B+)L&&Qx BmDE}ۗƠLqJDOMh.v/|&I֪]ǫmHxbN֦D\ <+Taި00"6{&莴%3{Hp&ۜ2,iZfx~+mw\T:bUpy&ڟ{1Q\nl4.{0ڀAkHp֦JSաdUu"bهɧ6ܾV vxF & #S_=e]n݇_8K4l@eWű:}uνwĻ킩 =DҶ96& :|t)dsF <]{\,vr-.L$h8|jFeF;-ٽeP W!]dq9=99WgJE\GQ SEM$p7`&Ixv`c(3Oyz,I&yUxd[xdTVL)ƒq2vU/[~!$e!.J(AY+RklsjކվPiv-6`U|-Xw{G7:6 A[w7|{Mڨ`iSHܻol!;=mN k"vJ  "!p.3.ۦh:sOLUnnoi^rxԠl7ApzP`vEcE,Z6u,Z?l3k^$fۂ^ 伉Z>kSHg!*'l#-g؄ܬawhHy0kn Uߨlۉxe/CΠ`Ceqmk͡*K$*h5lj6m }E)6&#΅=Q Pxf66iu{/]Sd X?g[Ž%뤌єb0!OO:x2:Ɔ]2>hF̴' gYl%qM.ls9q[n;Cͯ n5(k; [ Fm.>EZI},Y sM'{ح7צc@4 {٪יp;^ p_NNj:Y:EN!^h0)ؿW(%di$M-ɍ͉ 1hT, ,2:pkIxwC\ F= 8U,Mܗb@}Ua3:,ag9P-B:qlb l~P s dR}" 7:7O{>G:NxʢQ` ^+||y6 j!j8W<թ0M9 ǻu<$;ua>p@/wtՔ{M tM;,j($g8$#3\ygZ8Od0>Dhe!;F+|KIa88Nm׊oeO@:Ud".I0B 3AZ-)峗xt?ՊUeaVQ%V" $#h_%mn%n,u]1c<#B悔hZQYmK眧ct =#6J  eMs M (Q-tdJٜ!$׊ z'L#֦V"ֹhHJr+"CZeξ[Yrm4' x\+}`UkLZ{`҂K`xQDYx;me,.dul<=0yXk@5%&nLyCN6vKzO.jd&T?䂤ZW3,B)胗g[5tHV$x#MʨVf- }%@!a͞Yڬaj&wd0Կ+w|?VsF dzbV{ ^$7<~SKv/0hC4 ~ÃQ~Zdps3mO;Iɷ 8D+>W gi!$:<]ƍ/j=5)_q dzϫ.uĞ]*ٝNkǏ+^i 7Y#6~ _/h-2_N Έs?O* t)!t?%m[/n4K׸anq0kf\K١iIl룤O>7_Fkq=93\5xqm:~|yH_if]< ldi6#ig@%%Tv wx.p)"׉'J S @:߳^^+u"M?nS4w]ewQ3aL29LR%Xy5X`FM/.!11#Jx{bVM\ ~Lglv3^p/uI?ֆffk4a:XĈ+] h6/ ͐F3-8|gWض P O tI-sىrW߭1YA|$ u@ϗۻ@ 7qwa/xi|"[=ܻY1AX[PTi PR%4{4K8H%O<ds^xA|O7f`@Bw!bS[4y;b:7ei/<0bi"C;u܋n>(>?ܷf{,لV4 6* E d?Dos.Qk ^KrY84YٝDМSz0^`SeY@D@B ̜ #ɶ"H>qw {P3bc(q/lRb1԰X2$(EmR%gs(b|s5Mv`MBm5]1 {n0)N-FeEYw?$!޲8lJ- &|؆yڮ#(ć`:Z& %w@DUAP]%SiKJrH:L @Pͽ9:BRGe t-#uqNw&q3BCRaxΡp9J:`IFyFrNT @:ET*{1IOrĜȓ.`3u'Awq!l"qs%t+ Rv2d hm ZHְNV.f!)"( h,T2&UqFANZUjQg d{r4 {uqhoo?+cpOboHT P=HR!Ecɕ.1$OJZPԌ>f*x1lT!oAvf{rSDwkCEIzf94h2q_`*UÁκ8EЫYڌeƀQT#;j\EIFkaĖh,)ADMj IOsiROLv(E4`0< _}:sfsg?muФƐ*c|pQ- 38Yk2g;Jo~Em3 @o n傧 x(nw! D,ңOe丳$@֧ ]AU Н.R_8 g%Ef-R=I| ҆mVpbYκsd|5Qww*I.rD+M(` 7)I=lEV(r ǑYy-{#;g$4kJ^-RR^ >z}쭤ƾLe=5ά#ir׶,& uf$ IWYUT"|B:etr}3 j5R9TD W&k}FJkr*=py:_;s1(1hX-" '啌(/ڗQILK-8 :=Э ڬw{!25uR.?2yk5&BHUOҌƙ̅Ҹ@hv=?IcL O9`腠b@1BRs,!ZC(">D"Z).2 =.y2.9polٛ3A=8ſAAOiQ$[1v*k.ur9~:C=y */.yATy}.=TyϚ"yAr A&l wIaV 2gG76@֒PwZ}掀I=P>P^FY㲎:;ì~&L+)<2ZRGo!o1b8K8Šd$) Jh+X>+lt4ZJQn[eoog*x(4(CbroQHx.` %Ϻ}f#9#C]BUÕ>:9TR*tOF[,7EpCYz\J/&u_E0=hlObQGؠ@nمw~4"97_A*ƃ H1dEcctN`mu!,)'I9 RWnd9%,9q' XɾMSQNQ (h I/L{]7> | "5@d!8bT.6}$z扥gptEz!eεKD*s(٠M:qoof2N=N9NL9Dx'0K`ʼnP3d^:K(jy-V/yv02:i)fJD$}1rPG+=bCF q;їcXnNn%Q02)wՠND>1N=?xԲksV. 48R$ "C`Ma.d\5Nix7wqwQOj7S\Q[ZvFV'l.`RvJ?VB(=m?w뫛u}yR͟6;S /~%@N稇?6qyif8XB~H6VN ,?q7ɻFwXB: .Tf'%5HA:SHTJ[S HQ$gf=}p>6 }1g^%c+w>+Ql^e?SUm!UmRkɣXin=P޵`׌x̂vρ'͙>%^!EES.oOw~yٛWm7ӲOZll{E+Is|܃nbd#v1_KzAY*S"\*r{ "X+Sy?_DڒTZg߭h/F|uh_\Se.\+UZ]*A5|JSeh|Yfuv2]}x%Wl,RIq6hDe(HjECNƖ$g?۷K5A;}Y`w(!6NX" eJD&kQ=bRwϞh)k"'%X+{x t1I(aw.1<, 2#,F*;ʫaEKbTumR~:Z6K~h   $:]>B:Pec]J)4%2W)e  ^i'I嶣Z+TWdNa_S Ĉw2'Ǖ0& ې|m.D*ۄ!Tai{pGFKnx5gPM lΘZ5̪zh7}bi2Za,1y,1b-l.r )ͫ^Ԋ0sX6 W(?B1" t  @sC(HgITaw[Yr0SGYdi*N|%]-{7AN/8U91 #@R'D&.QS&BӬ (FB" NW6̄(rވՀkd F Xq-XM{ܪgEdf!|ȉ,@ljpsó|E ɜj!4eh AQ MQ!s:ʉeNP@It-`E9L[e)VjV=_"%TeTknWAGhe2ldCot:;E816-WլP p(UgR"[K ̨M;. ^G RP!֐5b|#(ՂJ$t3('{rϊ s5XaQܑynrttqYmN#/eU򄀬MpDFtH&[j=r3s:tŪOuBY]K 0iW"4K* }a"p\\̍%`,93X[؂ ]ud*kk27YgDA2a8(CnzR#N!<֘c6yI&5&n5"Ul8@2Dd[T KVIwm1wUҬ8M 1@H!^CGq߃_ѬD90b)vt_ϡͪD-@FN%uT-M뚶" cKz / ~htLїt) w Jb材d A/!dIԊ 4KX#NN-RDbٓV=MR >A q@r C#CW* J`PJ4r7Tî`YwBbkHa:!AL".ȬTB#*ZE4UL'nSjJVWzm Ă: $"+eٶx/E.mAA؁h%ZEY(2 F%K1YT maTX[9_s1hw!a@9@ C5JdC"IQ$ 5RsMãMC#Pjn\?AJ`'PzHM/ ЪLvE dĐ y!VRJ30.7ͧM?M@J!{N¢$ 3Ę0K]o%wTXCdrd pnGECU mL-KyXjt~rYNNU ַu3gEء%Lfl+tPlDr9Տ1볶 (<@,*!D@ 9H$ikƘpgc #g%tq.,H-ѩJ#CC'23 jւ-0&&fkdl: >~@3Q+ MRkhGQ6NHxH$j: &VmS-38 z "xv'3쿴=X}I5y׋=En(ŁM۫98TAи9ʭg۟޸#LA (Gj%֡Oe[P Ol?}J*| m[qd|V)2_QZ}>pg˯~L:RNlkyr;xdA'k{Z[:ws\r1F%Ǘ5(xM'7n%~zĺR~էB qj]Ш7eV֗>[wQ Qx˄ z_.3:2@?Xu}J:C(Ujy`ڑ@F XU!<έȩo_c9O|l<}/qeߏiHa8t .U;zY &$W9VjSm>Vjm "޵_9"st'cOZa+39C:M*SAXmgׯ8-]1tca9ts?| /xZ/^l񥳍IuwT~vĆ[ӱx+2ɜޢ8Kx+B?{WVdkur_ @K~(KhlO$I^KD2I$`~X2D4GRQ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ(ލ*<,EoʮaLaxX*~x^e;xX?1!d/{Hnf9\ՆAcڗQ™|kУ"Sӣ0)$TzT=*֨mYGB}",v -hhNbSZ\)z7L(838383838383838383838383838383838383838383838383:I_) 88=jÒ; C&f/WmUZėOgcf=@;\G^'(9`fDh4P;sq*m݅j+84tZ!9\QTreJu]KjC&Ra= l0-S;ˉ @gxVMQ$/gCs8a!C57LaPΠuɲdԫOh&杤%dGQ8M|syY\]GpL$t: n8&tiOŜ1AgsGyf|6 c9g@8g: A橦lt')h3BoΪxZuAyL>Ѡb(K.1d,z9s}Nx`G1:MBx6.858|`P9dy`1_ߖcr3Z"ߣP׷#=Я`{uy^bx3gg “΅]g5'\,VS?؎6gjok@fw1 /8G (Q 0 x2fg܀I!d6|zlpMbsRm'[l2ֶEXNm-7Ȱb=(vLy[)K/iwwQCͼT8 Ywv@$u83n˺wȺ;*u? Mwߤg$RJbnībc!3.Wט{y])L*5)Gaҽ]΃0z"=ʒG"IGn|/YЪνӖWȩܻ?iK@ˌIR)Q-O(x+z\ϰKsw$|}M -[lٰa_$og>*U{éZuR q xZ:.D3 Tx$G3`|iM6^l&gwnCҙSYw4YSi_ְF |(!˪9yвj2.* Ι@ exCN'x7 41>m̫.-/+=q f<҂/fo?>|}孟Żn 9Yu>HG-{οb.Q(U_Gr|p}LJ]ΆG˫zzJJ({ޟ,^Gߗe?q|!GOeB=t7ԩ'o_^?bWjσ{7wh0{w3;,wTԍ3h[d7(-oh)59n9--ax{eΪ$0VDuS!Nc^.8FrxY#&Xe}ʽjrKrs,A(+yaYP2e082zvY0+CYҜr "S6x[\]2;ˆ>Z>L8ԉՁEXz?OQ^JtuЬL/iĬ)()2Fx .~C|{{90( k( hLmPHrYt.W :`i1>xFVy0gJgUh9QBC<)| 龗)&eFǢJ5:c>pg:ʃ48i^OrZ"O{ "%bUx*؉1Y pwV_c|iӦDlJ$ jR)%7Q]_^pZbExC1vx^)db\9-LЍ:^(4 ŏ;1<:ޫə3cE]اtf_e9Ω॰X˨>Rx[ztY]K,^ɣUQsu={t#`|B]Ƞ4;ʖزh;@F뭕Cg.pG#: ƓQ(ԫdD>8iM ჻޵*4^z ~*p 4|TV->v(^ɡ U{҃gKqj\x+\QzQtxwV|k lT(qR8=O;4褬E=KЃ=7s=h]<}{6PFUGIL)Q MP Ҵg2dN> CcY OA3c{OFk^1, UՆ3y Aњ1RWS3u|u f?TJC*r[Sgd n2= jT4_̅  rit,#u ƖN;` We&gZVD"Z8p归n}^– |H .2{3*luoa5y fKFTsiZBJُ!\3'w`T1l !ԃv$Шu x[Vt2=4F`[;#f HClTZ6]k)VFlɁ!<晣 Wx ֔fëޖֱZJ 23_N*>i|]\4X5+x|>pZSa=7Q ,u0kaweFgs-ScL\%`qJR~l=+M#JhA8M$zZ AP W(i Da&KǙ ]L.XAYr2VP`l$n< jmM*ޤPv'(pJqCk2ϑ5x2[hNAԞJSg1C*(C֪j2A^s= gKQ342r $ 5ܫli}nwE (H*% ZDr9[Euh4$mi|ue` @4jgg78ws&_'w/ ղ23^sF;uWSސ>:8\JK$f.qgH/!D;}/4 n§0B uL ^DBfЭ&kΚ@ehvցPx+UXvˎ״Yaq$ E@5 R2eU M{KVwYFGiU )_qfKmeRU#%o 5M!Vh bـt! 0e)yNhU$mیEaU,)G Dv `޸IUцW]96,dʡyqyIi['7-VsF:_cF+;+r & Z 3#U2zzM)Ÿ%"and$3!ҍ|7snR)Y a 7GK1*E܊Lٔ9qy1ِRN"}U$lquhor+x-SĘ`Kh!cFlmN.hʅm;kx]_쮚** 3W<RcBr&׵n:Km7p_! xp.YP菅%l9L)ΑkҴ*Cn^,zq#kls \0*T*|Gy)l|w@r3kT?w"" gJ4a;¡BQw# W?>Lr66/B,NV9{dꇯd :K :%œJ)M٩Wt> ӭPޟ,}yALS'K9V{;3x4.kԭ!S;έ&)?h,k$sa[p){δ|~{@zӍમYnpLz.@Rry; J,m:JCEoִ:7Lk_۰y1EIfQo%('GH#T<g9jLj 5ríWcz$ڭDsokm͚h[gՈm 5XRjD`mcNIJ HưNZۛ\{Ʊ !Yc/s%QTLHIv[=8#Y8jD43aM]+$u@a U9l*p2 S/Cnyu 84.@:in_{KA*h#V<D Z)9a׹-ʪGJ:SHGNIV2jDqr4  [be S4r!%L5eműmJEu ".8ͣ ҉A㶌RIjMHX7 S4d$1Ll7M->aymXM^w,Zs'fSc *~aW'<)A_APJB^e |"Dr}nVJSp>LӴ(3%ncyYIA\0Q!=ES4f)7ʈD%)j>;vQR%l;G.A$,8E6,EhO)mmB!5(/f<5$EzMlK?5ie-̓n[uU Ӛ5v1F 3stwb)kh|s?Q@o.{E+i^M?@0EI}{uZTT$"a#81\QJ xbiUXR{찷XmDI [d2@^8a$68BzC7+^$#@ a 8Naڨ.앝u> Ai@6WIp&YP3è[+4}m4F 6Cql@8vhFq0\:darxnHX" o ۪'A a0ψ\63(pEi#@LVݲRyà jh2ZCD%E> M$ʼn+>!V6bP@OEѪ/\. Ϗ%!7D7G]S ld:?2z` iNZr Jt[i!t/V 7ZCNޤM+GS /2W!}J~$\_gfsx8_^/ޝWWS1n~H`||h! 83ŁGOWln1ek_v 9wi,ƿ.շ){ k X_/㻓6yO K$ʦK 4!&=9ie@ޝ,B?9p:su.E^}i@*2tR9XcJ1db[M$zc{0/h 6 {6I/WDօ'YOJϻsn/.'t9Ac}`Q%yJ''YP&XDЏI/O]#uqEWCS8[.MJ7zyb0F!>vRQD=ؓy2$U=ӫIUE Gi}kÈ"X \ԲzfoMTB$t[LCDE,T!YYGŪ{= լXeU ! sm̢N/.ܱr60k!f!0q9Xd5'B̑aH&$]!fS=\he3 -qA5{XICAi!@ iEVdCE#c[fd2peGFg/$Pe@ qj#>z%SC͍yӳFA D 6JDrXUȘTf#L^oa{/^;$ؤ5H!GZ!ϔ0gPtQ&Kn . 2dD.E`%nd#3H:0XZxh?Ș [xmx•c줨DDt"ˢE)x  )a6%5-CAa-2OJPjN2*H >pwdx-{zn4^|,*[(8ݴLHYPb1< "jC_7m\BHɸ 9 zfޗ1L}TXwTdry.}I.dИT(4dwo(mF$"a!F)1!c2qKs'͌kc{jơ(¤R!o5=f KGN+&#aXe!=äna?s$21m#ʓ6qઽJOym' a2"`ζ:\ \,v0`fVHs-2;FtaqXW}ېn*j4*:ÐI 2nQh1ZF ܮTZ"i pbX:n]?9lhZ*S)"mnbDBL,|7*7 ú z 9 V^ھe5U!;*$G΂;N3p8r7$6pYTΗOOvr{ .indqU+|/ ŏ4Ň+;7g>oC13DN)e\V(̗tj]93뗯d<6<|}eF,|L2`.Χp(sݰ |z>EEsxz=<YzdF8-\*t K'ތF? /SX^d7;4XTuK8~ gg-YWK 9Ezp;-N]+܇9Si-sQDڔfg19 "8fʆdV(j[ }~4:Wh]}"J7nvfj'K7v4*5xL~x64jԛ;gs W_{ {W-NGe۷7'Fqou]/<7ZLݻīu!tE`s=40/@([ _bt^PP:]8b8?'p QS.O1QVarbyb!9<{t>p1Hԁۡe.C>O'1)ˈq%alR8)hJ6喺ٸO^9/5x%/Ð.q=] '="RGt5w(wnm*~L*[JVRyKUR<@ۚ%4H(P49#^s_[k]Ch(@lAd#䬫 2h BC5>u0c%2CXbc4d?bRM [KnM$h"9"yƼNvбCb! jǥ`Z>)ĠoÔa4RNْS!;dy9g xtLF0"bRRIC˧W) c:\%/? P_+2(d"ʭ*WcR+g7@y PgGX6=Ӯo&NԹL1 ڌodl&jlR[qsjel@Aj%fmBM/!|k >~g-9 Yv-$9dw~NwĽz֯^_ơ e g*l S/DE]zjc tcT*-9qh(2c:ϭq5vIXKjCQƿmd>ܸ%rNjcu%Vv|y^}Μ6;?ٹPV~M,46J'trS9jNVK>^h:bŅ*^46S!PD|4bP]ϯ6.p՜'] Y՞#HwϟH_`sO3gw9KQ8gB,}.Fr% tu+RCRrONA2qDmxNݫCSJᤫѾ ^䚰*:p-3 h(tk&W:E5GQݮ=HlA!]m |qr+B.NY[EO2 _]՗) vTlɩ؜| (v"%Pk#gƾSF5GZ1ېD%O%s\K^ e,@ /|rqiTTK/_.bKgPV^S쵘`rl}k#2U6~+lx=ѳ7t :k %pd$!k@֜v䢭6W )VM1 r9d=x0$M%:q0ںJm ɚYZk1Y?ݓ?+h/uEʾzq5U]S54++ 5MjL;Sss7~G2)<@'rC >E^~M1T rGzgXG:mCUwOt֮.N3w%5ϦXWh6G~pVwWQf`U{Th%bihctB3H>C)KUM15'ޱ/MA^zJK Ul8z+7SWEhv9/~Z>'g~_7/h~"XyX(G ™N2Tb X'Kodܔk&qTԡ+yj!Z3-%2-x{YG]Af/SI}*ggI6\: 2) #ͬpYew;N3Mb,: r葍Td~#tE7bY;cwE{ ] 9> jJOg~Q[ ] FXfyqjS:od4KS#rPtc:W V@n\g|-aۚA!ߨOu-!2`d0Ll2CJU~ոGpƌ =8uCuRQWHkZd\-]c.%UumǹMqœfqXb'ԐOx&w? sd $.1%%xXGobv*R`T d5fjEb%i]pӄ=:,1\E:kxth5U[8FZ6@ SXm[60emOyכz,{BvQeή+4'^*ZP}"ff*9SṰ>'S&~F(y= N/{y纹,\ApHȑq-5d+LsuQP*h6P:koK1A:(>4K/^퉀cгXJ/_|Bi`h9k9uY#]yT,.v]j]K/Q4M,i0FOѦ3ٙ:d{ʡO9>S.e)̀@*kMc\l$Ζ($4 YK9']Yŭokȥ45hL_{ȥ'%0s`0-1xŢ]6-J'U-u(qcrNCOc\8uQb &gBJ9ŐvL֜ } ɢaFW#3,08ǹ >[kmś*(MѲ-K«o8 wo.ƲE Y=M5|hC 7nrȳ'(csrq{y~U\/ }I*}ϿO@;c&v4"S l*F 9EC:Pt4s y`6T&W9#CgtJ!fc:+ep޵6#,viŷI.a>2x[d{$~n{0bQXsYO"fnA(fz\BTJÄ,#3tMa_. ):dQKl)"9R BH+O 0:[@`Pd)CZƅ}t%d+ 9 p[X eO7^oXpO~'yY00\ n^`K4| $L"tI=RFqQ$ۛ7:̑U0\>wplã JAI8+X8n -[H)eDT$enk<_apq(,lBI(kEV`BE@'+[bד% O"]'2* J`ZsZ^QoQ6d蘘: /ۭK}I~_IXVZiRHA3BV&.ך~x> &W5?4;m_~o'S-^9дƅq.#<;jV.F}sИM[f_?p|z{Cp-}^f:p~b~EW,+sa~':x?,{S}XM//+zχRϣ~Gӣ͈lu-^)6N"1".h Sд΃{NIg}'vm -X_-Bx=8RJ%A"L%EFܐBhޠ⺼iQ]2G/=q/2^]T5gI S=MU"kK-c+.+n+6k.$H:;eyL"(HB9E9J]hPGmm.:Z)QF(쨧&Y!T!:eX+2-o1m`dkXm/l_HjS^`ӍB![}LfU5HflM.a/d-d"doy bܳ/b>h`l Ga ڂOhN T2]V'0S&Sw $(e*%U,YX_OؙԮ8QZgݹqP@th,E=ŧ;b\h\*i+y(Al۹l"|U>Ni"(+DH12 T .=u1rt9tLKu硴*R*+d6J)!yn6'GH}Ic Zѿ=]=]M1xϯ*3 OH+Y 5 i=Em*,b&&ˋ.(M*{ىeF* ;"M-m!cB+.C̗-7O{;g,NkEm /d0p940mBvd&^(%XRd8`Aac 8*{3Lp]~\+d[d<^ Սk;k-q lעx$rhy!W%IQ_I*>>=Z$7j-QjFR%ZCZD)iDL!(ѧBU@Dv4gPq"rxi%Ƅ)UΪ'e=斔){D4Ԋc5$b+ؚ" &uEY$&te'!7mKQcLV`r, :(C@s =uKsxt,=mfO݂xTjsbWM2B? 6>DjAfH*7pT +_G?lf^LNǵeA煔Gx[jOS\o | Y38=̳:/./ϛMx`WX /6s46ByK`!}X7ZtTųq^D{@Ixڌϫ N\, =5"> nuTwj$3J1X~k6hlu/`S5sfTsjѭv Bv=- :lm+ AjTFHٝl= f3<S:'xT%K:?H"J laJpnO zTYMPKyB,WiRCꦙX/-n)u;b-IK=YsxiӨ^LKޕ=R3X5s'0]Qɧ|vDP\OԽm27^l:,7WZjp~eUXnVwCanf 72R6.'|,^2eR>-~5/wDnx! ~4 Ǔg)6X͹Pd:(E ԇd.{^fY^=|nv[UΔ+*)gK2>JWLqD pLP;[z6dxT>\K O&+x+ﲶuwxx <<].ZW-ú\cZNyJw:< Z"2S1 zb56UD_|u;L۳.Hn[fx o(zSExEq0@($F`>Z !G!%. ~\oȐqu6:hlp bH^!# *lMn'|ljafԃ'[ 3NkJQvtlTLbo2ֲ6)t]S߼H2tzku0:J6j{%eq@S K*hQAjQc ˆUhl+( x?_j;mc1'ǃ2 SBzg$ETm:dK' @̄RL|bk !zT:䭴Āuo{/)m2]<Գ\tZṔG#`f}hks/;';;5tj~y-R4oJg{fi9Egw/ MX+qo#eoؗzO]G`~z9NȴRu3A 線{ Lt:O5|D$5B+2NgdmvͧVM X zm0>TiFu|!h#?=ˆ4wOn.J?Ӟ Qxk8Wݟ//f'\^7zo'te{tΚXuhA(#J&P96 IX\ܻ*w۽wVPGxR@w-ȑtF&YRFbJ%u\t3" B<L)i-AҺkҜjIx PG|5楃IƤ? G%E4{*e/l*Ed:HI M+L&*bhXG71 a>Nbz<,.1.du`"bua_:9;McI,:JdH0;NHpnZzåśpڑ|`ҖK` X҂kG5 !aI&#";l[w ?_f6CJInzЄTs*]C-t>\, +R0 Az}H(J>INot1(NrpH.ԃ# }ĥrjΒ*iĦ,&YggG@bw#2TcMuA@ dlJOo<߀Q8-@E5(1VIMJ,ɢ T+Cζgٽ:COsA{V=~ WI=lnHR@ZAZROs@xbe!*=H+<<\! !v2F&(s@!\tYsuǠ8$CO*g )m QTZ&fKY8i1eǞu~LL_/ZjCq,pdQBK9XpJ8mA!&؜=E= EUI}R#l5 U90bB)I=q2SlF:2.))=:U@AWE #˔KCĒ:E900C] XFE]#X,^Zt"'F[2SB1: V<,XȯH@o#AV5ۣ#[\jM*bb%$,E :MGfdH=^T`U'!|֩nME PX䝚d 5wp<;8ٵKۢ@'@d%JLͲE.誢*ta/#S L!I +,Yڢ.d *TrV>,ɣ C'Yz,=ɡ{wӹ6q1A\+r;W$_?89ђ8_ cp'/v̓Zo闽\}ZjC吭~?:8^;Y6ua/Uʖ˦N6:"w=Iͯ"_ _ovdQ&wxy12`4ꎽkvoG>ǽ[y{^l'?r1N׿t٥Dώ/O5{F\86 9Xx;CN`W*_ע;Iհ >1!vaԷGNn:1'IB I;);&,$fUA!>N ?ey+I0$qə*&EK؜{ 䝕ruS `7xRGA٩ v|_}&(;b-yfJv| /WR@pK2P>Ȩ=BȺ432]Iy' _Vb3Dl\\'΍6~)MR>qr -BhRrqN]~[v.fB;BN\}|>w^+9z[?aVc52xSs\gjl3 Pt7 \_ON:g#liP6C hPO57輪H, Teb < Tj:\1I[bFM6Z[b!H7q,luk8l&#b݂eK_@:8# wUyxbHORE05 y6Z7~ky)Pd1ee.q ]-7g_Ώίچ^iqQžSYGJfu<;;ZiT[1ܱ[ouzQ+$Ny;㧟?˒oxϾ6LV7Mت5]kL?+D0Iyoӱwpc-bWGs5;^t.e_tn1uו ڽ ?j W?Aı{3䯖`cc2~worD^s{[cnE[bh~6\qw"G2ރ)ܞ3@kn/roi }{tr=|(T ݓvꣅ9X Z gȗVÁAt\'ڡ%VT-*l#gMFFŔ*}O,AcVGA#Y1E[ˡgm˳ԛt>k5w[m">$&+Dﴶ 8'ob;5" ERynʁ#5U6s1I)lfm%d)9mkFס/rt&sDN%+ECB+eJbc`j5ny$k$ F" \ulHQ)"\"eE/PدkPO&IǯIT?#MN15*a1{sUAA9+("L!Ŋ*ba.Bȍēȍ$jp=ɍ{M@ɇ lf*5*1(Q'مa>Á29?_/q~Zԣ d>akL+?\>Ċ|QNRnc 0Шl55M0u2~@rMy-DFPi6+Dv_ADs CDcgJfuvUQxhûà&۵o=5PhR9v̪m*) @Etb{#3Gi rcl({dT򿪺೭)ɟ2h(s&Gl̄cЛ#}&<ƱMgGj4Ӥv8;w(xK6PA!֒IB L짻e?Hؒ}h ٥&*"Ɗ!Se础z Ro;ޟo9vAL]r.;%XB(1*+ Z&'.$.ޓ3>E{o;o rϑ:M$,9 rCε ηơD,Gf<$T|b_hku1&4!kY;_ i7}g鮠3oV癠ǎY{ ˢJ~G L s\f[ \dԘ OFS#HxPŎP?XMaH rt.f{8_ Nldh+u띝ǒM ?=/5! 4Jc<_/bYx<ӣ|HfκKTjcT hƳ[Oyޟ1/rTL$24՜|9`$ҁ)YfZ1([*dznZ9gXKmT(J)x[|1餽ƘLQbP9vMm:s{5+ų g}@T1E,XK8p}֐*m-Eg/oB Q7My߭V8FKQMa'ѐyH񚢍nR$ ~ 65QB>W.r@IihZISǂ:2!ϡ!F3zIS5;k>\$-Xm>vtesqT9=_H^p=˩{ȍOaC0d]dNiNۖGt$elKFC".G[1., k1VB k*2Tr=O~6i61*Xx%68R1FjgbD- MӂR >=$v8@tQ4YۇA*+,vUt|wNfoo֡8x5M|"+AHd-*Ff>]&Y1eJJUuE*u2]_ d3VHCJ?Y/ɱwrbWt^9Mm<Ybؔ8]AJDQ Zdck+{ њ/C(\OgP"x/"cҭ|8xEՔ Ƨu>W4eɗlQRV?h5v,ϏJHQyH:FSZ"h*IenyIL9E f犅`*zCoU콶T=Xf5!^L-zmV'G7EV ~:mLj%R6G@ $$S;p&Nɇe?:߽= f@Yb̊d3|3DV[X)CuX}ԥ0mrC䧨܉ϝq1{LòT ̶$*C-,ޛj-TTE/[q Ůzz1{M.z|VjӳձMk/Ľ V gl4t"W֬Z oWU+j 9 p?2Fm??٦f*[sQ<78 ˝PA,zz,&(JWaMU%Q `ҢY /2x=Rf *^SD彬o\T)igZW"K/vkMXLY4 P!!'*P!#5=#af@VnEA gBWc,t Ԡ+dx,=M'MOAq|uG=hOSaJwg`SA{:l=_&'/>8[Vxx5~Ҍv~u' ֌tL F3Nl|O&?6M.n)a]Isӓi>vjVōa\VF4Faӈg 7 YM4xx|!Y Sʳ|ijECMOrͨNgezg7`~4K.>I97!TWC7M˘ED6OmPg7n=Gva 'h|kf'6:D_l//7+ ;5Ԩ;_^|;y!Nͳy&AxKUZL,,aճGWt/MldZs]5_ΊiVN:RgZNG{7.5ϧ9>w9=mbVR.ʹa I\NU m̫1挏bG\M (/,@m rvDq T.J!)yKҮ0}Q J`MVP)*M !)D]`qdJ kRe jبDUcCx0@ٸRruUCcًjxRuE ,XZeTr|i̭%uTQT[rD|rhx}bC B09b FY)BeLsrhi-أ6Jb(2@]\mɁ`<`c͕6虁[hEr'0:iז`«iR3b);k]_Η>[‰kNQ|./F$|rB#X,}'oFV*Ҹ|zCy..=͡ qLeMSxp),̅}4Og><n8 l[kf۟X{x}Xo߾{cd-*"CQDYV{3}14[̹*} `~t|ч1PESB+h:FQLF0p窕fV}Zi .0?5luPTK%iY5S1+bBZ!joL=(Am) u ^WAfւ*{8-.H-V ϻoV!e9Ai ťEL&G0끡8j(,*"$"ꖬcSj`hÚJ"Woh 3U6l6&H>' -Mvc"U׽[،Ǡ3r!T P{YT53) dg e-OĂ5 BоC;4f3Rq͹yyLDkv .]B2;>cyi.JŤ krBd*2jz3QWE Swø-2R&PJSdAnbfwxqߏO'D:/ +hVlnzW$Ӿϊ.UgOW sԷyϺ^tq:8.w.cv^HoϘtq.,% vIs.Sk-2GO7ǭs\qV*ר{GͽsRKs!uW _śWwuK4KghyNύ^UA"v?=h_fjVv sr9h驇}vAEa@ R_iaz؇'a3wU.Tt /ȈQ:-5 dדgb̲\ `xc&HVn biv.tkrz{TsE J1|<4%EeU]DrBؐ*2JzCU[-FW^,zNR"xi#R&A#*N9ZLԨ% VL&xUQ'ɩ$Fp-RѨ@brU2_Kt澑tDR?v H@ i>fGUm$>Jilt5pix7,.I_4*'-"7g"_ tN 5*FbF uWVZz!i!0T0*|j}Vٸo+Oj}d%Jdt2 >نXa~.'Wq2ݕD=qRdRIJ)k~2|Z>$c0Cr,Pȗ=IO;zCCw;..ѿЙIOai^\/:޴tTʟ)-\?,}_ y,>Xo~{7!:y:a Hɢ W+Lʥ+ ^Ȏ֣l!dxx)Y*'McMv^%EhrVN^:dE O!c-|WGHl/f3E4i}f%o6o ƫ'GlϷB"dg,@mbD(QVSQH@Tl@_)M^Cz"e7P.$eJɒT55[ddتH4R!DT02j:V|h_ӵlwmmK>T]dsyOJ,R{$5ClIMJq0#3Ùꪯ.]=U,瓅R_|@l9_J'~ɼwzo> @B,Bj'3R}MU $O|L @w(0J_)+Dʏfi}da|kzlw-+s7Kw۾h_ *m67Mk<6٨볓i[;u>^*4&tm]p[Io&EjmD`.D#ׂ3 b'y3*&ppGp1shh: 1M2 udVGON4e\$O&] cbB-j{ֻJ=K1>Ņm"+:,_L떓{w5LZBzd{_n5.iڎE}?i"C) -U:.-[G]ۘ~As4 "o).$B&˥Z^6Q'z!ւ?D8FW68۷;6;_]0Vo5 ~T?MQGM٢)hՆ ]S#ĒsOs y&n\;bG:CT i :ttʥB\eHiePR$❊&߶o`U5FgqՔrJ?|A{G0_=*0B [M\ nOGkbnfɾzhmΚg{&?Mpm1<6r8*b Z`?&UTQ!lMFUBnzq dNeDaM6s7b&>)d4h )Ep`,3ݾ%ZA$ 9hsA&D3g9z")S$piu5M"_(GDZk`6z ZTGh+\[2hk+T}(B'`"eOULMC?)vwQ뽆NSt:p1/^'Z 8 &-b%_g'[d`1“)%ƪԮ7xtƇ}0)vhIB%&i:B-8,;9~Q^…°l"zͤA)&ҜGQ&,X6EJf ,3dXL֔X'ۗbmλ?&4߷&(m+q9X4耦8DxF J!= &Z1ZCIyv575q"/B/)8rtb0Os,[GlQ |39G|I\&"WP5,acS^] >Z}YGvtmf&=@/B!*44 3nվ¼a~H2R?|]tu` IZRc2+]c ez:ِCe1,[+Y5AH]%HԆْ|wg11$ۗi>L+ݮsE9gCM+kE '1ؤiٺ7șk#CL#kb/eOz.+ɔP Ea&.5r<~c*9(.4+g,nT͍ P['@(CP%.>Xn4j2q:,+ox[VNEz*c-֕kGWaU;iI5X]mTNXQ1u=fv̿la(>",9WZg( (ebl'mJzࡆݝ  F Vmqm#[ 2KuegQ_V19R EcMUZ 6If9kO!dш̷1؞}*,frm@;EFg6cή꫿h*%JgkHqJ:k5obf؆JfZWЩPuwU-Fg@`:V3,CRŪ6 Ebᢉf vQo`X1(m IFV: ˚iq?(PR@ǣd '{ ukGt-ZjYLa-6Bّ#4.<--g)otGLdd.Hy! pxҤ>@Qkgyצq^Ӫ4e\x]"5g+*@N2_~$/= ׿WGΧixLZ;"/-,7M*Cj4eVvkE~/ l9.YxMcWK/o|XyK`..ޒ< s(z?Q'qoi3Zd1.'JslpG xbܗ!lOz9Zuu-N7J83k/. z^(}H}$7Ӧ<J8e DXJ%42j{zV?'*0dѡ4WS״KnvMXwٽ<~{a49&WSfBmৼ\# Wv T+~z?,)o;D}ĻS34rȧ5 p]+P-ETްMvA E]jBLa,;5N#"kO ƛj.]P/:؊Ufm!!@M=s&pf F qp)Pȅ 4wχ$}-<e7 ELIcl\2XR00Q.ĭ4ny& <[+RIgǺ3է{ $~[fl7!m_DFG.`L^:n}r'eKm N!Xdj}L|¤3'%qs&@+ҚQe>.R29y ΆuaMܦƄ Mq#~{ƙUl/I'koi~7F<}]P-XE >{ :xDЊFJ|3GrveuJAۂ@6b7)㫖g=v|W㞚ӡ]Nw'bNxW,(cXR9 rqZ$~ ѷ[Ǡ]BABxrITAYۈ54n]bѸ,E' ʂ!3?FqKΞ3pi^ elȌP2ݣ[ vSXx/ |~/v.L >!x&MwStZF?V4eċd!S<[Mp|\pM {^NGɽc2U0fV r(t4'agµŪwge`ܢpɵ"1Ƶ4a <$v6G&4^<{ O8k3/x%j AR@έ9ŏ lRE6 l /m/mޙ)JH,%+Xr.B-TɶC/z|T" 6o쫼|NeضÁbLop^UdhhpXZ2_\nպ(WQRW #?Gk}}d!Ō!MյH L<[LmsprŕdCInCuDd m-^6ebnWkO&ZcSG' T삇:2l8/8c цu7pF1NETߛ} (aD/!d]vdZ_Sh#E皽u֩k-ر,(aa: ؝ͨCOI">aٮUq`p^Ɓ^u }3P}@WaaZ:p{﫳_s8Y"WܪS*T]T1@1)ia>C' osjFg]t >M@El-N |ut&Κ,sGLN<%9[\W3d9_-*|I [,GE͵bgHуo?4xDoͅ`mW?¼GqAAez=dqNFXZ֧u'9YIȔc☟ɻ~BކNDDxb'ⷖ3_%bkes;{M#r!f S>U\O()Ob8ǡ5P- "i%X0׀߻OH3_o>E/|Iz;.[gMI"ZձqEH Jֱ8dKV^6%Iix\HyVo8Z `ߴZޓZM{aIHśϒ>L&ޕ.=X=ˋ`)& {Ϊek@31Y0tU(j**Ү1Dl&N,~[kjNh7[ny>׳lDHj" ji(=}?,bwbW/gbfT?K&dN^̕cj8 E*uU0{MŒLEya2NBaB>yaU:Ò֖3pE1S@xժcZ |j\uJ$OAMRկ(١'Xڿ֖N>uG>4Q}Ưҳm>iv_X6P'gu4JQ:VDS]{HJ:_N;U=&?= 7No.*ܷ"(ҿY xn?}4V6= ,2D^'5OsPW7":{F'[7цbfM집dz̲W˳BZVIjHʛ{(Wy&`q:{K? c!g#k͘LKb S.LaK´؇b0, ̭ 3V2uUGh[K-O̦$e;<ϻ2vDۂ<b@/$zkV#( +6)ȪbKk!U"1j6!luRCmS(C3:`Jߛܓb (J0cCX%loxڡ5>hB!Hpr/?#*ooGh"e[N$V4EOPZzRy:'RbV&3od^ gM#$0D_&N4ky{=@.LK*8aQT)Q6YbBLYOr 7EI=$UVtjB7b!g{,l&^aNX94eW3ɑ]чTZDm]Ӊ曋+n&P(oRi൯eZ$F^84KId몢djX0W:n`,lsTM1v"|u5;G5EHѵgJeXӥEwzVK 5M#F={[=@3PIDY}C)QAbvc0f>~OjQp;>@]6 ^r<زGSM9]kIjJby{;3-S;MN5}\5U[߾ ;dEy-y`R Ms|lX&8o-P Ġ-  Qh8y@̋ 7ΦVOm'CDujEFhIh\#Nlk!^| xGQ cel39al15_2MVJ-֒!0Nmᬛhǔ:Uv*D~˅] $28Tշg`kL.ӊFٝ#;E4?c!ZOb\*tqC,tؠ6hd@[85z^JJ.i1M#l^)Drm``u}>-})Zip>9-*~{~<1='"L룿S>=|?79;8ًSqMfvu-%ճ J'}6Gm6'gz?I•yK2;jX37v0[@,qz? v\ݜ=]ORV|+Rg_/'Žggvz9m \/}"hXxd&*X#wikR7~XQʧZ).Zer_v~AO1^m ^^\\ Z{1/0Ú"DdL2$>7(!ׅ1Z M󕬎kTKb# gBhEG'Am@6'MBY}_` PQXp$vf ,ф~o2o:z\#D 2R=Q 2n %Z.z ؼCV߆h1MUqP,GZqbKӀyY5FsP4 )T Y3kF]5B'vИRM"{]{F\w%n A 2OyNey%Q,CN!Bf$7_n7:/ׅ=J&ǔϗGslǾjOy??0l'am ꢩ|Q~꧞(VϋwG;`rzV_;9\Iu9ɍ?ɜz:Ů.~^'5ݜ1M^fOw8#@Ct~XX? ax<|FQ)L~?ǽflT([XyB˹i`^)NyE0nM']vPg?nG.TU!X0Xf+Ѫ*ٷmJ誷mt! N-#F4u> D-rRH'3a:. 1^;w'}˛Jg.B~ m̆G썛PoJ A˦ @6cu|ct1& 00)l:y  6B&1m l@_\o.z:L${K]L>T3Ts:eȓL9ٛ=oaOxn:k*|sz&-Y>@3qa]t{kS-e[ 0+ikYa.apѻ;i6+gs%`8%|HzvFN{z[̎-Ckwuf}eW1Dª]|zy)_q|,Hr>=-a;G m/O5G ?-tѾl]TL6A1 ,.ԿێS s D܃⪷ ri ="ה'Ϟ#VA| ,nJYV$LqvͻVGy<-xXBl@{W A!zo"61fߘ[SvГ;5) L%w{m\&+7I k;) #(l<zƔٌ)/s1[6D-P&H`]Sb\hjFmww[ŃF# A|廨n.Yp90v}x} ߾5_|5A䬝ԇ'?}tz~kfwG77ίo"8Ms>_M$ͧ˜]&廣O/o.{խ*q`BP|'s?ٚIXմ/Co:Q^5NI41V$`s7[,4_,d !uaܗ>A %\lYJ4%o<ѲfʌLm?1Jmfp16j~+JD1*!̞U>Z P䱄 á-VpFoV栈cё5e,&ec2R{&7&2&D6XHK6UZSZ:I^j,{,.VKCk 6+bٻB1eYĪi0v>0 S^Xf3lA1һبL~ R;3r=q]g\W*[)_#1bUWzgJ\Zf!ڮI:;TCt 8Ӡ#V[+COmF[(6썉V1>Zd+ 65Ϫ\ؐn t[R]2:REX,x^}{k {7Ȟ6-K(ZuJ[Cǖ\_Cpef8G닳?HZɭYqY0aWwwwƊI5A2" '-˷\ȶV&V/u}z}ݪZ6W+UdwN]R:r"ciG  gtMxܚ@trޥZmׂ7겉NF>`C*f`0 6wmހe-Ŗˉ^Om+H/kuq]h9unNJ#*lJ譢h|Fڱ[ahzcVOB]ƆX]35zѶQ8;f89#㰠CB-r?/!{@YBbJT]0ΖGֱIv7]Ǎ`gq҈"%k%թYXr2]P!ڙ,F3mߚo+zEN*h)91TG9e%.:N]^\lq疫 t!UǛ>W1lIy>vib>ʶgUSnz}u+.T46M6 t83g{8W 8Ab߇onscaO8?YbrVW%x[x[j Scꏕ2~jZ φ^f x|+mфcN|/|ImJ+$l ~2n.D"YX{承oΛ?!}v*aI1XLyPÂ>un餞āA_E._!86w'BrtlBňB8(JݡׁB2 HO׋(L "Hṓܡf:dL̀dF{;琢@H\(!A6@x`hT0Ǫ"@TEzbw窹B9g\J5\z,>9€5>^=L0zU]_wb┛ eVՠIzE泬k(=k@T8Wob]jg>|ʉƷs p]_Ve=ir7Ƹ=I5ڜJ}<48hL׃YW0!gd0IQOTi;9k F+o^saBaDO#4i3O;!C^X)! tؚ|~pndnQ^,7`mOkQ se ai&/KX M݂upy ٝeNkafY;GQy-ꀩS?3H,R.MbNNUKB)XcFksk) GLlRWd˵-g* >Y[c,iݹu,`ۧ(Qzƈ^Fk8+nAH9[I%P92 ^F̻)gKo((㴇cY[_%`}0DR*b8DzX`'# Pұ7Ďx%l},-a=9AQeˠqǃ4E> >G8hi%ldE@Ac@>|-~x-EjA20> TJKKSXhX6! N @8Q@>kLB wy;tN= zH# >&5%BqJ6}h+l$@C\0qG::&;\J:ҡMFC"_SAy&0bcY\#^u94 m}q{H @2j nEE@3»!j vm\GK8.#ZsRIicDJx2R͈sncO4 bpJΏϽvw`?Xu@y֚q }?]JϪ~y`Ff4d3$q ,vg7^j&aLm]O7[ .4VpOk{E; _aO+y6'ӻ?Y.Gmo= yE<fr΀&Àhj|/.>C۷)C*|gC׵3?@wojkk nStq5X20Ý&=y_rXO̍3`7垍;qUye\vRvz;ʪ(]K y`$y0ZA_Ijrh~tE,-OO`ʟ%fB OV &3fxTC( p=5$sB7OEIECВ͓^9#p94n{d aֈRrDCRX#/`N!mE {R'^ĪJU ދJ%4hi( 7*svvo+2҉n Eٶ~V-7OF tfV5#(Nvz PFCɟBB?p@b+B7% ɐ ]_\hszK36)/!+F7EDqvhd+#Fxn ˫,.2hqbVf(0\w! Q$@i#Cyp 䆓%Pg#R* f4)| f7vn,Ì:Ckskkbj%p!ofb%|h ZjQFWEfZt2d?hm!,bTѵ={26QI":eq3;mSAG*H8&̝8px=>IX,G3^f׫aklG*)/#\t:P?VTz (`1:dHZ$<*S#VT"Xp@^x嘲 l'%U?GJɽcj$WRk 2c>wڝgN8)QuI;#`yt m21jcJ)"m(rs$$ym FSGBmjћ ?*<.?Ȇan}5(/+-W)J;pbN-tx^F>.dyq祽 EW?_ncsɗxV\9)IHKRK&2c -=psNR!gA Bsu)£2׾m.=2V^v1)\[4ޟwGLrba^$"a#DQJy"LMU 5ed%\?{Gr] A[[n=A`l>ŻXb+*hc$n%1 ,Etܾsϩ 8Lۢly . SpF'ܕ&jf2wK*Ñ{CYCcupl<%Er1[7G]ރL8N\b/h˅ydlEnkc=%:1s mj; E廛Bp6%'e &Cp(KX-ωZfۦPݵ,y+(;Zz[arMw2RtxZq9jl]asM~q . 5筸SU#j̒)'TLrEX^ϯ;ȕ9CS &LRmgS0|+7e!ƕv*  Ƽ(<uoE`%)G4XE@T#@n:8o9KbtB4ܨh 3 a&>JJQ Mp4`^s;qCv,S dyTN>RW硫EySRvIUd:*IꢸG0V{Nv! v,'i5OT R$ _g{Glx 1bI' -L5l|+p5qֵm9qA#!GQ6Ʃ\^Ԉ=Z9ڋ#&ߊ&c؎nB98Qdb.,<]v;߭2XùnvPWNoyPrٮjKw#lS3ņ$ 6x>fTfm`%.s^c9]:Bfj̈́R.f%},+e'gN)߷Oq^xVO>4SujgIޝ'p³ ՙ.&r6S9v,;hڟA[e*9FݤMґl'2q )x`2LsUv<rsdʸ]H+1(zO=id%C :Zn4354;Y /ا%JXBXEݘ#Y&zʋT|w=x'J V(M#S[<}{"My#.W=BM죺[z{@4:C..Nc/0; 뷷yyqۗ/1j6Gڞ_8˝>r*?uzټٟ|8ۏxծ}|0a242O?UKכWN[$t بN|{7>|sd9Y Z}\[ocI`x;s܉<ַȩOvxqO/͞|;%>}:8m~|Oq%Ñ}+7u^ovq'~,4[ޯ^Eۓ _oo~}uw#F<{$ϯˬ=X-RN6k; Kjȭю߷A)֫0_r,|~2¿F H[ڼD{L'/ogyvN>ޓx^G֜5N޾i1{|W $ѻ=nnv4bI1~{sn?#&jwG9~p=WӣNAgTCl}gt}}3:v6]_r8=x!zn?C wa, Dj1_-c? r7|͟#>WzXw޾|sH }{63=pI[`:$;뛋,,1'e>O5}`7 A#2v?oӻonN4-,Hw b_ݔ7/ MC{],cG/Ʈ+[{m,װql>(@3RYL߼_m& legٍw}(\cTU j4rҍo*T RJeSMUƲD$\O~Vwܻ!$I!7C([NTgR$c̦~sYg>2޸/u;}x>hf`5rIThzn2ZcuU]fΏއLEZLNVe5gL/< ܻh(p7^DX|>&u%NfRi8 ! DtȴncBN<2ׁg!"a6gZzz*fS4E{P1D\ ڜ[Ţ5uoAIۼ -E*}- Z-h.7ؘ< 6ۈ蓞4hX1:kWUФ Z;ZiA1Y`ѠWEyyuXͣn Sڟ6u>tfHЪV "#~`*:4N)+xBS@0(Քwt$*@=P[] Fre"H K9õя 6ЌGP vH+ wNe>)Xo <].t[y#yH5:PDFH&6𤜢J \ 7 4 :`s˚K3>}qc RV~=PhMcO `]ػ @e1u:O NF9X[Ӡn%GP buA"ݎ  2tBBɮK2y0XܹI p$Ml=q][Oc,F5,-{GYchvYJ *P  Jxu5~ [`9ZHBhj7ֳE: TL6u4=0.IscՐ5q. 7=88[dcPZ+@9g S!@MAqych$Z`eA "ӁHR{hn :,ʍY-Ҽu*H>vH@m4 }ûɗ$-gHcs݀A;گ ƠY0%e8MAvkP H{y1y[sc9sl]Q;Q5Tcv<Ρ(,zW.P _AآkG-U c j-| sCAltWGADf;xzou[j2uI@vnf $0pD r`f:tB\{V#"u_ K1yٙQȉ,9l'/Kuԉ[.In .LJ,Yŏ\tLY[)"Ģ[(,ib8UaJ$֌c#Hs>xh{ 2q͎#e9S%fSqhɴl#gp? 0G^'lca 5[_1y1DO"RM?ٶ~$< ѠX1c$aV<5zˌ@\`E͢?jv;|¨ͨb3N^RueR \pvU`-y p^ui^s8P$gŪtsY' &ݘ)ZP^ 33Zlӭ|Q.ϭL# >V[ܲT?ɆxߦUAhUc%)L&~un闻6KztqFv̾]gf^nS./ g*yڮ߭ tvxټ]i1o_y2Zײ$t#g8m,-/Qʶf9&LQƥhYsz_jN w¼e |v0-Jj8,%Я5pXJz#^<6;H֟òkGqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXa38,'aapc]j8,'7/j垚8?!ZIqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEk^79Ki.y--:a]j3rXFaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqX?rx=rXhB5j8,'!=pX"OaiМ8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"(=`&?4I6ڰNzt|CfK~SwfG̛\*'YtTz|mƁ7M尾 ldX+V>^_s-fWe߿DK9۷?ۑٜ4c']UͿяeg%ehx02Mx ;)c=ǧ}\d1:J>آ ̢OVrYޜw,w}XC$0RPK̠s`kʼnX'm6du Oj=<;g6 =9ɖh9>/=4Js:j72tGa4-ٴ|@5:ћ`sI%g4|{'soT?GSדe|Uv qE &4nYsVуM74qT U9yH:>qLou W&L]IMPq}`񞽯Ӈ@׬Poy|}z{`769TKB#`q6ѷu;8-V^C\xU[k4x~y;h1=&x4?|$}ҧzSm5 K`AV<8zo|܋?qhk.E(ɱ;o,7U1Nl(`6U}6'|ax"ZNmJ}X&ː>N()b00);Yuc4ޛEOyZ!a@ǝx@jgs;l| :\7V:8&]b=fa!k:1^\O?|mͯ&51uI\&xc`I >kzC ']#{k28*E@A"͍v=)@ׂwzbʃ@^,cRf.Ұkkgo뒙hp"NBN2y5|=VxAns#&\3b4,nLf-ΒCJms[A ƀ҂+03蝲viXkZDV4x p>=x m;1X7h#:C\8M$X]NscS 8p,t|~ih3,ѹlpJ+z`Xe͜B϶>ޖȦɼ:!%p$:vF)Q2:Ik6w$X\ xl+<^ppuV V]7w׋qkxXWu)Yt)^}%gǫ:~+# M~6ՇrX%ONG )U5RQo-9ͣѰWh \zxU{={eYXI@ ԎFaeӢ.}G;B{n"iӎ\3peOپTa'tثD|=Ij%S]˞J0}?pHd8D_ʵEZKۊQ;m:ea^ύ/Pu*.]ZM|+Ï[rVk =༰SRR ^Ym 5։Th0Y3-5_וرKh-RЀQ8/CJ6aPĴ=ad_\f 3k ) 7TMF ώ.rc ;cCh !7\y40үX]ӌY}7 }ynK?O{'_E݄ {; &:F9kϡq]Y'Ka55RK=h7Ayg&k8y'k.pWӢm7tq;ɷ>{xxvAC8#X)Mg7}GZ"Hs.u d 9Ka^1;YHa>385TʝLۈC,w} I-qa_u˟=dG2;yow+V%I.}n[=SgZ30WCG2 1(`bNv/Z?tRIhyz/X%AcZ"Ew%*hP5#uhGۼX!o^u#tH{-b'5b䬋Zs1]2`拗&ȋމWr%A_gm$ݸ\3k-2~ئtjb.H+kA6D\4*9% @&NX?1ci)hPXI^5/")lR"IP\!SSiiCG*- pV61F/MPaI¬>}x1Z\r e\g yVP 8ABAV&Mqo Na)+6Ԛh0H_qSgl>D㙒zIq"\*X FR6(| ЂzmVKe~;E=9؉56lT$@Tߌ @HIn Ҷcl۵SWC$r)+9 XEIH ASVYUHZ%qґD).ɒaQmکmӋgIH`6 \VnK^qN)[8JI<UmOTL~#CN im\i^\*Yu:ZVOϺ5g/f \θr/^KV ٭w%voC 4y88RČ<$)cW:p~Ѷ-2wߵ9,gQ}Dօz0|=Չo2gWGof#-mJFbJv$5#Ukɩ,}Oʢ@4gSK`-JѼ&h\=r(rWJ@m;d +R&VLJ AS@" `i2"],]AGh(8 *#m5IXF)㮨qtq&GXj$ dM#C08pQV*2qĩӌ0u|N?? ޮPKOd|ъ'Y9Sc?ƇY!htD0ZVG7uER o*]ga_~$^5 &Wb}6*ċxt:&Msb8  8 Bp{ 4΁h^hg2o6ˁt`#+Pם|_۶?%,4k&K9=Ml $C*95|ɸLF_Џqx5^ ϸt\`@!Ns[)ztnVs!ܑܿI&L%prRkDurݬR :Rţ{VvK'=Mx葇MK)Y~Wr8;k:!΢-@=c\e9Zo="ŒxZD,ŏU4`B6&k6{5^7M c:y=yHM<^N=!E.!ECS0@1"ަhо%\oF+3P`vOED;O&Zdg|J)U^TK'GڮZ!8X~@:J'\8HJuôJ: A;AM1 5;Kx/'0؀2qWLF??1[1eʶokԦ'\tvOc_uܗ2Nճw_i4g5tȕ}08a5f:R5 YWQ--p=8LC)!92;[* P˧i5b8!OU' S1 H}*U,jU}ݤEK1?_fmW 4U#Ϸ?i_n3\ue]  :hӄs܇(qq܂$aYYN)OAjՇ  LyIIˠ5MSιn?n3LeNb_VZ/$HVk &iOy$>WTDQLFx%̓uQ EQşUB6y}nYÜ'"X4!A\ܼJ %l& ;f\pung8n`lB%vk:[}%K~p];If;uMNJfk³-locZ"~54Ȋ؆4&;׹P+a0p@e\io]^H h2{bG%SQ#o̦D(!}ݰ7L0+ ꦗv| ͯf A0 FܵD\4*9& Txl,H?!1šL"c*n.5ct *!V8JO`RhG&\$+_SwWf{vYD57+$CPiΘ-b!;zGEX VכQ…K.m]\?=JpC%r![f*9\|I40V~ݼhp~nJ@ZMBC,( `(7؂ gS ,"y=-8p!1sȹ. E@a=jE q "2Apm>)YTDuwh"iC̡imVd0): Iͱu*RNY2} =>ơ6EB"iӜmN ۢ NtHfѩˊsG2xY.(:mb:2^?{\\y!p#rG`C6r6 9(KߡC76( Ȁch@FqXL4"vq<'w\b*N93"L1 Wf%g(oL6.8 * n5IXF)Zk?3{)0w:o"l& 7Oy7Z-p:{:#3ZU[ڴyRf7oތmb4,?s*qJ`4F.QXD%/埫ۑc8o&i~Y^Հl*,mRcbooa]pۗ u9o?H.;_,El3 T" -_0s&b=<?.onÏz}5<Γ-BzOKcW6?a@?˗cm=1&j"s QIZ98IB'ei%"#b\_L:kM~xy0X*4 Gk<Aeh">1ITل JLiLg~̗{e2ټCss͝]4 +I~z]O^޺Z7w44~){Jh%^pJLXzRntR0 *qH,ׅ"t{1?s{wd"~ʂbb|Sȹcsrv~__d|y3dyx|zd<ѕ77CXC2og ץwsKn$c.r7բ^`\K?W H،> Λ҅?KsǦ ·BT^b|e{+.:F]}.+p?7ZۻhyΗZ.Fl-݀w^CΚi0tco&=n uwЂZ˂6Rȱ{-atY"Ն'e^%}i"mcDZ緳ك8U`0;h=ዻ+7&yhѡq RFTU<ȅ`ڭ<rQ(З_ gI}`z,&9%iiB%L,`T,ӣllE1<ȣM20pA*U 8LD) |τ% \ag|RZ^'vEۇl9. 0S\#TszW i dБ0[nf ζ\9ȧ2ېA-P.nľȎd{7%%5-GGVt.%3y<ϯvS8?[6ufִ>ܤnKMݓMZ Φpsz_%[G9 חW<8OZb ཛw`C 7t.N?ج"Z qoΪz]-WVW܎N q}:jyq,vNLr]۷t1RV鬗ɒadiѳs5U02xY!-+muafp +^]uzOCQ,֎&c=0su7k<2+FMZtzI-QD.me!z9̑SwgR6k|锱J3/M rstizsIZkS v}NYI1ʬ\*,E7d]-c ?^/ 'Տ A fm,Ի@!]_@$T) V k*JfCD2"yFNvбCZT 4LҫVj%XLu:FC˝SxZ^,aGeZ;ml:SEVnid{j[)V#}]cdݓF#-5tUϚOȁBv$8}CT&*2϶#-F/N{SSebu{(k4ʼnSV`*Nli*pIIG!64"!V*ˏB65bnD7S;[Cȧi.#,ηίH`=2&qrɒ4ƈoIa%ΉU_IK|\$_N.g}G"hWKS6z(gXax,$XR , q#A5bu\2`N)x(hҬY"V#]N@:F]88NpuaʦƩʞXXalj?#^ փΖH=#$!"kg:~lݩ_#wmȞ;ky*|*02S,m񤒒P{kՙ"#/"W`htrA1;Ed̚"iutnK1ZX!H_9ի^-%|rtǐE٬"YnS||kˆ9}s]_->߬Il9f-a\1CmibzӌӛV^Mo~>*oz2+e;z[O%6zݜ*]]>˅Ӱή+X#˰5_䍯񬤘~2{?t>,KQ6v~Awp߯58; qf7{{0Zazg\җtVng/IZg)%N>v^pM E"[L5-oCƱ @wsnD"K M,*KY;} "/ *O!!I}s%R66gNO۝{⏠;7)O WM lCY֭:kd7}ݼAvФ 5ZQ_T&*X0^eNC6-[Yrt% .¶>_.ɪZ!uͮQ'$b@8J` BHWԔ}E܏t agGe$ᓽ" 9 (JL(t݄Kuз!;775^X{B - D+OfwصvcsJ:ibN"UBY':V+x0ݫC*!8"2+i s@ʱd4`KhI%cKHUP1Y)Z}qݻg->HuN{"]/wT&v8B .48߇`ų6#c&F;OHg)oVJ57vZ4Tt`L`L1R G2Lւ'Do|g ^b:eT:ׇ8tVf"q)S<hrY"E)ɷQHP1`ԶȂ}X_ 4OlXIN9ZG9.93I @VE (tKY21[+d-Y`ػ*@9i TMMe`69cik4Ts+// Zv{'{ѠWi@brq)#rpEXL$i%!(m&NվV}8׳,:Af |$gOK(%,JH [pig[{nūޚ??+LǪ1gJf Ufkt:*1*QgM']s6 J|9 J#~^P6'rvqCr5mI$8;">R 'B'l1@9$An%kEb/3wyz^f-3 ?K;o,r\;}\OdΘusV䐓:*pRSkK­B䟦Tnav>G-iোf:q9R\]կ+m /l$Z~0BkG`k= ފ *a4*j0gcZe6h-_̒OߓHQU{N2`.j~(-[[uԋO=xGOܳ|v[Uڍl]pNOt+7x- 9/_~Ÿ[('u5Xd;9^hU|:F]c¡K.yI|d:Qdfo $4rFBwd -9'-pTh"+~K}!L*>]o C?+~;a|P'(~gunMi))!>g&z8inDI(-lA6Ei<_:dӣnE-%$V;Lʘ5Ddh%rƅUXK60.j[jM7zBOqP'=/uo!U`TYzŬ"CEh~$Y#]nуCZ#e#QFIRVeKP%Ml;Y6n۸qœfиtjxwsE GTilC_xZGLŲ쩀=%@.GMg-^:ssuq4Υ~jZZӋ:oZ~b9sսt/HfQV/CY&Yb6qRcWXߤ}V wCS+.O.*.[ wy'J6ؿYj޿YoKc,s2_޿1H2I .lA_Af)zs'[Yeӗ 2_ *XrP#^:B}PG$!./oЭ U đi,]i7mAj!*r;=:m8CTh8\)ijB*pW PpuسyhxJujCQbȧ#n1l6O /LV~7% W(ʔoF+k(v6N{>5:Pvo]PmM;>^|1bП9BDrqN2$/}z,-?is $?e/ ⚬!;udIsgjp4aY}L}I-^ʽަtZ gcikUUa<>T\rg!G+8 jV񔼠U8nD'=kh@"^*zxݳ>-ǍB-V4[Sf3f~v mx" g&Yc2;Nz^U :zc* )'0XwZ+Rg+%Ť]@7Vg h *HEt:H%h^c P11OۭQ@ d` XD%9 BL\ cck[S:!E.xc5̔ !$V٤D\~1% k % <=^sQ5f^jă:-c$\AUЊASZZuDSq'#9$BN'ae,.LUelpkúOF8'J&B4(4N4WX& HGz!+5hOS.샺}jS=~ppEqrP(OVyS*YR55OO?u$-r/G\*m"M3x,D(HGd8Fn28*JV.iZ&+Ww"(yU Nb/wwqCШy!9v_={5z%ʛ:1ަlHW$!,+e%@Pg9Q 2FzKmXU%0'“][K@=ar&,;C$gK+#aBN֕){l8MqΧ9jΑjv x K~Q]1|WepmxȊ0E!Vi6PC6u +2C,=fZn*lEIxI%rŬaj2bi7*ʲDQ.mZ,1YȨդ ?q+ )CYԘĊbyiΫ$S3Hd~yS1%JɄ&&IVЎܡKull-lݵJ _+b7od8l-:d>?)KٕfNC{5钆>=;b 25Avl>CoRzbmZ&\@ s'J ϕZWԋӕ⋵%(Ԟ:Gݘuk12Vv*hT3,>XSuƓ%\ʚ"(j"]wz0xxwie(0 s$1ˍ&Ah3#2=IN;a>%,G {&uU|>PmuQ4w! 0::YUU朿PD+ޟ//´KNCs )drQTj&_]Z\kep-{I+]Y9:Q]uexdaє*c>@Pˣj&K䢴@V% . K-#sBc"kuxE*7Krr^Pca` sTҲ7Eu[FOdTcCZ[T{YG[rV<E 'ΡKfAZV a|N7iTAQ8E%M4!3^~C'wwuM4 Gv$DJT'KʆE:I8@;ֶtNHafĺH4Q$j\yrM]LJFxH]f#cTj>Ե(?%&T8`, c.*^!hUoD-{RF kDB_hԲ*2![MUK\&s\DEjXV0$x,h֯Jf>6%52C;aGoBv]HX$%܎*)9ીCaR!lS UtHte,Vɤes"}s#˼GitHi삖וU2W2=Tn>'J:cV2#]:CYoQzZVa2㿧2¿|/KOb}8:SY2by46F_,9aݦVlm+PFq1ICRW*MquBh+go7|U!Ml<]bݵ_lv)~G2ytjd|3.?O 5\`>\U ;?{WwI|U\?d3ZPu>")}F <^!dx sxb IEfpȋok߱pq(2½nnaw:"?׭)CYaBkٛ˴pLW>6GnƱ(eMsJL +At>) \7ݝ t3oq Wj*iz7oy櫧|xC7Nq% we8P/]9'Z{N"Tߡp}*/7z~6z?Vk.6{,LG '_bN6h:ssyw\$BIFeL}SmER SG5ʣ)s8_{7,N|! dRqP29؜r@ɘ Wf@"Cέ`ZcD7ӣSd%uEr vB5E޶@/jn.9-j)N. -rd޿:i޻qi:C EjԌ5%Bt50.OQz*)T@gglhg^nک@;}㿿E> :󯋉@ȱ!p `B)BB,HYT[Z\0:)U# q;qS=΄fާ7VnH=7&"MG [15ht={goe730E%طAr4 nv\YPCs3|i?t͡ybmbp upMqF ;84kl$nb ^SE)*zx6@'*S/p[S$I'3ng-ͅ]yCåRkcX Ő|Ƥ=2 A\( Rks( |D‚6K3CsYAj&r<I[p}Ź&,PJh]`(r6 |U<ˆj|7^1pge-˞8+ %JYia}8,C{ ׎9DܖcR)'o=;,KSeBJ\务âg(WR2#uR1/{H!gŗ]8l9 ܭ1h+#[ZɏqSl=Rۖ}b쏬 ,C!g!ʨЋaJ _WcC7+IT/[.?>F@@fu }u$"A94ɛ b9VYe}pcg#[ǭ%ip|#޶zӏ]i OY)`6@-Oխq5Sw]J0{Ka8 Er 5hۜkd!{ T&vhpW\ ٻv6؈v'ǝ<%4ZZd#@Jٖ̍;&bd7-MN+jY³F`J( ];s\`aZU#mwf!9 c!l[o߮7ϓA`V*i!h@!6Xi9GY3\Ȥ!ǎMK`>uȉ,g Hٹs}5_8*%rKv~ ֊ښkr9Y;Ynx|ݚEG:ȝ8X6s=,f{:J|C3o'X1QɢQk #D/?6RrLru2ԑ&8Ƴdtʃ;,}P﹣e M޼s @xe m3.jcbFD\t 챌BCл)i18&)!tRsIDr1$nA-h:e4>7{Ȭl+;r&P҈MDi-ʊ M^YVl9+Dϓf1Lе܃(QrI)Fj뉍`4>mo&-&;šg;.J\Yx\&ް]/?6Z0bp:hqI' Zeak(ҲN{Q58Q y #e2ZHet{-Qe"=[v[][S9+B[ ia+[3 |5z;jC1+b֍AS7aLIu}>O͛?>' F ,J6WϙuEPj Nd2;k8{5fVGW-fc,,B>r ~K)3uI~1/!IcEٽx&gwŋFn +WY\H*etHD*=)hykX &FCXD5w^h` G/EFYYNRHڎ>VĐN90_6F*cpcjL4I4d`yw̞Vy/iUK#,Uh:ea2>TrGL*xDžTiXMxH~}]R>ήvPjrpPïvpЏGT -YS/bcXp+sO?Al2Q ]{xϨ;}k/. -O짎Z\ }:du )~T?NW8ŸO&&H60~&_)LU$mhŐj닞$q+c'կ1y}26AՌ/ɸ\,@..zݠ;ij~Sa">Glz}6!ZP S ZU&/xiLJ FNEܥ)X9HQ'rԽFrn2XڬH}Lb܇T炬c TM_hL)S)uo]W+vAHsm[!ң3.(#*K!'c]$=s"R oYp6D^6 SkAEOYYRLD1KKgH}F-=C 9#!Fe#!>Pݯ!cyft9L>>FyNEYG'g Pg?rjdq8% `{&f&1s2rM 4A#uI q燖`ʙó]u@.k?@&3ƛ8ΖM(F60-֨h`%H0YFLuBe [&d!di%9!ZҜ SFGb>ܐ9EE m`+ {jt]6D=hNԃ:UTѣMV׹ax7%ymɏy`pz t,c!u1LPGF^()UM|9fL8A"Ӥ''-PlٍfFK|%&6tND %j+ޮ 0syLndFkZ ١VrLUHeVfȢL:Lٕl;̽wMG`?XuD6M}ayUlg{lHK䎦3[;/M +Xw4VtDzח\q=Ah)&< mDr@dtT1 ,%hfIIc&K&A\$Mz|j-ޔ"12Z:GR1=ܣTSA- >HB##'<%CP;/N>a=CY%8o96z lVJy( )99X`ZP1g>d-&9<( V4+)AH]dgg=g@dST AKk 4Qi Vu%99c:H%)òLPY;,=KOQrh9ݎ?LA#il闵bb̓eJJ4%m%ڇgBZb[1  wORTNc.Ùx** ѨxѦ`UϪe8ó[ WlIoǩMyVdɾo248pl"jۣ3|>~N?/{)FkEQ;*[y}sM'?4,+26*:ҬYOzE1dz`gގ8\|vւ֏[* NK=̹gMٽ]Zܒ~bԷKN-O}xJǭy8?s~xBD t"IM H[YΜ"’l}GZ!=[9e8Ns$N1WZ!5͔ٻ޸rW~J&Yś$@& y Xk,KZ] S<ݺ%J?XR.Y/T9s\",1B87K zj[xvI}pQqa6hY$6whhWĜLg&|3Q&FӞYNS><շR뀔'XW>UQT)5lj7#sJ[;M{@6{{Qr1ج =l jJ͉#4m}?T^nq2/͌2ք2M?+}z,4P_?G6WY*SRd~ϭ%u<\g&יufrMsŚE6؂uoYl2Ya^EWXB>Ԙ+7c渗IěO/śs̛\o lXssͻf)T:Ǭ۲,JIzlGt5.95"L`Sv,AIgi=[5f+HUK7/>>th7F'xr] rDc@;Lɼ_UdB-h}P!zT_@˹lu&-)iO"ܪCn!Iػ37:U+rђ4q)I  H8q3I|B FջK^د61&iI$Dz\1 _:~*E8|-1LD~zҷr㭺eSkG9m`89p #!Eԗ]̆ ((d*^o Ԋƛq[9We-1TlN['YErX悦kN65S=+fF5PU{{`1^0U3tYtP.׾~o|dmKiV [쭉bVRlbgNN|o nxrKV[ 3fpubumQSKB9q]K'A}Exmcu7 Z2/D6b#`C$[&TRh`k"{w+knq3@[@sYf:T+Wup?:TCtJ; >`rG}FhtYc6wG9F J TJAH8anzY\ W皥@;0RtThA YL\!0BylIOz8* d-%+8rOd?-;eiLN01MOlH׭i;K4%ӅqgX˙c"K1%iaq2њE*/OعSW{\JeoT_jnk56^}^n;{z[M1^r"3MEk'Й3I|_txg{Ǥ:aߌƷ|M4?ӧ4vL2}B{ Onp"=ZNpOWl[Ζ[=3-d\9AwikZ:h ΓVA~?w7s"87VklMɶ1\1zW+_ѤFAj )a,fO9ylۂ)DvM g,XˤwvO|UYQYHōSFD.IRD7h7m5 QjfĢ#95YQ,(֯lS$klI޸Z udܭ#0& "<tT W\lqNWN&T2DC>r-hDBIjdSZ|"+Ɉa +J5Ɯ;t[Tbಊ>Y+=ZQdp& ?V3G\zK-;HYa3Z0Ab4ez'z=` %튏AL\ )B#Ou8wkzې]E^n FQrX7 l]IL$kQ b.|Ę8S1؀/w{*<dUQބ:Fۢ!$[skq(`F't{{Bh*,U9zt,:r5p\ZLqtUc2:b5S^pZ[^%VSK .wN.(oRT>r-bOy e4OgI=ssn뻩]2w9']j\Bڃ b Y9}(xC2׈C]W.UȽVA!,t7#_VyM<1>vL]svҰao?/;/b-DMBG\p$" n>  6;,6l)ѧ)"%l/z,JMʛ ]]u4_W׍SPVYoO?ܜ>Krѹ{NASSo;'k.zYΜ4q3?IaefHC!^r##O<~}4]A3cMc6vdC- ld{ȴŹ [×`6 ZI%iP%z$H-6YWGϧk囮JuM/!N7rQEEgU}`z=qD(pl 2ۦZL 4m6: jl0b-T fgl}aIl8'tצK1u  /v>M0%@/l0QY;D۩ȡ' LI{qu5>5[tknwj$S4$ l38Aa4;;y} S6v!ˤ$3cn>Hؘ|BfTJ;!0Hf:ilit$v ,D00iv=]L\z=>-9~10}\9s%MpU8`ͥl /ܱ߭=S l4jܹWמYe?&y 3F fg7x{6"kJL0s6jAU?=tѾZ`@(eT ,,c)cr"uhW˩u?I#N3%2eSOiMi8c:>ђ%, ٨IJTImF4VƢ%V{tXxuۭPr]t - )5̏Y.Ϯˤ2MFLjۓĦ12?滅v ؁v?\6hI" "P<[^3SrYYxi$C94[OnbFۄck IP6Hi`QtE-~d҄GocJ] t[lv:;ukxc59`BlbbiOڷל8d Ud1G*bڔnЛKXJ30rp 1q>&"w6nC7&jĶ -\)ŢļkPM86vи{0r[[NkV3WB`9G $j0!۲:}mRVs&Jj_ SR>)rR Ծ oF`KJ6w"6;2RM_ Rl&K$I`i 8n]KT |˝jRcן?}$VoGC% "MH=ݞP{,d1h(4Q6Cs)ܪZ+Y6E2HN1 {*Mvv@*߼`bi# ]%p'ZpSws 6 ̌|L̫YIwkY4 s.w1\7uQJ!NY`2dd陵ddf,>+)ez}DfNnҎ'QR%/1"8̐ɜ1&^Qu_%NHce1:AxV2 !N/S%мȓZvp}Kf:C"J9ti".6 Z-jL:kJE-õz}hÎvFWb=tG!G_ !qc8|鴪=;~sӫ%t?A'~69| ߑB$1; n=]OfݻR_?.'y3-W3IA nX1E'sdddž"ӣ.9Is3 QbݑMN&Dn:I0먴Q4 N( 1$Jk pgHDT8Op$X] Ld d)cNnтNppUkBWEs7*"Y@aaLL!DVJɍ5\>|TI3dcP 0 Dܱ6WbV9ip`=t(#C.7UhMԆ2eI[4Iq Ţ&,7 j4"K44$:Y ` kJ*c$ hpB[,GwqI+ ~Lyb@_l F"Tݔ,I,K#Q>#3##'+4b.Co1I"rH fߜ'BXw&3|!)ےg=$D+!$c3¯+”KwVznVrNtͲ:N|-C1zIZ qc.MLD "bɜR" 4q׺&1bJr6 L& I W"d"OsPxx=]k>f۪d  28rɕ ? .6) agBDH JM93B^)\I JzD1T2 A9)gSDP[tAPWG7^H=y2[dS#쭂8SMh 2ap[ u'#ddj&b+:'L +hX}Züoȉ.zK5XԢud@2i,ԔVͪJP4%u)@ n M3!eRA[m%Icq rh g/l$ :H:h%{E6SٷvhϬA"DrHVp+ų # ,!YT-J ڳEZ޵t (MQ(le$z-;)$i6VFj:[%HafHP5(LO2hX#Z*kʥ!I 3dH;ᡠ,]XTLZPѐˈW'9nses2_mLø0DG&y6kIHU5VamEШ-)T]U9&l!j+ J5{QV|& toCx̐Ts R VQ–8)G = {qu(KUFGFBt}B22N4+&.qq'2`~^ִyykк6)yRDL8n( r t! bA 6]LP?krJȀR̆VɢbIuyQ_8z":4njH~*8D9E7vۺrXkײ:':(Lk"fo԰b Zk@#tT kEaTPFcׄ2Id"EO:"SKI%!$+\JVD *-ibnuژcY<#1 ?CEk" ?Ï1#U('Qd uNEhU%GU,by%t D"+:z6*#|anɈcXDD/jIY \ Ab,$wA*_tr (:4!#*hQJ U."o1c̫'8E'4(d*(6 դe@p ~jsFĞjO&* U_P ё jde]! RZA=f ̻ Y%SBTB;Z(L^&ak $$`icҘb.H(Pҋ,€ZnQ(G'K".E̡+BW ׬"鴱6@7 CJzhp({DE@9*OKV6b )TvX#cbVc) k:(Ԟ j.Qe:.q,EMRjtUF]EtM%&@hjv &WpmF shKh4* Fw ?:T Z4] kcj jw2싄h*61ႳLd:I~E´)٩ dRG2 J$Z̪Y\ 7Q[:P’mȼE#ҔRʆ|풺Xoݼ AI8 ツ@ִi-+B !A:Kc(z6vR,Pmt*H^Z;qCkH:c 6AF} ˨~ko}uI:Vq/n(Q)f<\79dAD]Ͷ=Ko RiP(#=Ė@z76[}Ÿ3G"#p7݂nCbUzMۛs*)?ikWoI_: ]b=]5W-DP*(zm#)6tO~/KA9Z<|tqm ڳ@4]L'hv19i*}'u[rO0 =4pX07aa9,msXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1'జ rpX0׈aYsXD@氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9Oa( p8,vÂ!|E`aEc9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘G `02ԃ`.CZ#>w 9? ?9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘pX%6tqݒSG]F87Z쨇EY- ,b=]=҂}s<_\o[h8~tp2U\NaO?_BdNٷ {5da4BWZ'Pq߾^V/oyǟh"9GWtxMop|]Zx';NoFUwNۄw2"0 ;/KJʐSNo gTJ?G>XNξp_7 ޓcO׽]sR9FSfÉ së؞m`Lg'B/!(UjpF@" >eN񪎻>Dw@7y ~ v}[0Nv"NG:|!zi*,^lg$ػ g{~:Šk[oϩwܗC.0 t$_K%^_Oವ>~Q~r| iULnC{uܖ->xxS)bc\wq\"WswcS2dﻈH-钟]#nfCo?tÛU:۲{a1o> =\mjpPٹ.yX/m.q_Q?nd'\IVZ_!8/<ӟƅQ4tqBGmP*;y gپpO.őK #h17:ٺ-{B0aՇ0:ߵi?r{}"RQCoƆ0${-.DwԣmZLy_ྋ" f/= Dѓf;V3(}@xRq(inQ,6+ѓY.lه[]e}2#BՑ󷢜IFdp=RZW]FvR4Jpok{ 9('6犼A^ H7H-\fe+)^ ~6g$y;[OAQSٳ G `k1˳gȾ^dFUCF!H#Yξ_R[.~=96}wo[}3`mɣy$k m gKtIٜ/#Ffu.m\| /yC|"-1"UܭWt+ _ٹR[vp y:-YNNg_=[+% H!]\ǘo ]>D~1/_H͐sAxB3r=9R5JxV _QXҖC8هu}t5W{?)"DX|"! t(z&^|)a_f$p,`r1Z34v\y{I2VWؠ N8=Ʉ}ypJ2ɉʜZQ8ʏp,?S;vw^yty?Rij!3͜;jH]_Q"X頣^(v|7&UsQ [ts[87ɛ\R[uef#+h/⇲<'˲z۳BEGv ^ׁuYnMGmi<)#acճ֩ATGZ5E+b˷켗7cʶ? cզ=]컥|\\Hw~ R)# 7 pl+)F:}q|MΟv=/xc]1ٻ6r$W}|+hp{&e6_cOdّd;b[XmJdИLbu[]]z"UwO=ğ֯c~=[k~dMpvy3c}7ugoV9Wp_7rd$m([_\qtb]ywdFyV:\v*iM8C#}Z`tMê»Ǔpig5^,dPPw^4&r~`pWYZ5I%݁eyqVXÈ5hPo&\Uf^Xj qEX:BV'H.n7D ^ѰIswwbu\`Zk#wU?ldڤ^ cš͍iK50ns65_k 9d[Ցva@ Ռa#,<Cl{ƧE4h#"8g>M"n{~q[f(׽P =bԀVf@LK#y;s֭VpbN_o\>C|dd8Y|8OA5hHP7UDq=%әY)*ĘremO%+l4D"Dc]/IOl^5:Yֶ8LNϦWX&hcc|&cxƺRǔ1r"Z4'=W }sPџ?22)bv3 ndpVu{go#7gZG`?P7כDfߛưJ >6h-A^BBamiq{ɯ!8Nl(4[DH, %0,J,H__IMy3hV| ټ6_$ԍjL=(EڧY@M.jiqOzk\SkvUjDCd%[5D$$g%6Q>(]'w fSαe<M.FF\ƕ@m&!qi5K̹} MfQc'Y]bB.`f^4Rҧ3NDsSӨ 0.lP>Y`P$a<ErMZ?_:/WmFlV׸1V S`:g!2"+eS>6MmX#q* \Huj}xs&ۃoc(uñ' m">ښWocMjvucq4x9 vҰBȂhWM'Eɛ h8F[`,VbY”3g%EA N4){DjTk+7č\i ="35lunR.qIJiQa)I2-a#,Zc"զ&6Q/a+Zakk|ʚ7UVqܸ#?xPK~tï/Z쓜w^`+[Vw"FbL(rZmꔣ:T5'4i彯ǶnJC@ bQapPTr!0 ܤRI@R ?0 P6)4 = #kr31|z f~B,&W.F愕ZYSGlONV'G4~U#ᡩ /ue߯2UA %{DMCӼ3j$m/(W.!RR9}b :2͙EdB]\xat/3 ~cˤ3)"DH&CI|1*6{81uL!/L&HU^tmmZ mt:CbX$j,%C,1HѢkԨ짔o)zcfh|)H I8*t?;ì1f|L5ZTI[XŅzn cۉL11恸s =@{H;k&ג<.EGEѸ QsL8>W:Je,J p,(q(.P6S¨}LQ벦m$+1Q{YO;XG2NԿRí^ۏ՝;"/2DgV{y5EZ~ʛ/e 0|gշ| ?~z%]t7uG/ymiG'Mbvg[H. FS__unN>-Gw6nc1=_MgX[w+,AX╨3zih_9yiS*A+8kO1طTpN 89Ð|N@eQyJg dH?~w+?Ό+<{uJFd@*6(#mH\ In+?"@\Q32aB(ҼO>eH=Pa͹( ed؈13 JeC[/#Ei(6pu5RW2X'Ne_-}K۫Ye$=9gG|qޡ:1]O>J.!KÃ֞6n%TG'28Be9 /A&,WQYx'm\6y48sASz**+<)J DG +FpR{Υ|;$lѤ^ugT co]x%b̊Z#Ic W2RppB[[TTT(3Y"/mbbgwk͂Y@^b^qߞ| Mc֣BΨ40g+eiHڄ( Aں)[ګ13ٰ M#Ã26uh 9(=ː8-8Kqv&ullw(ɲH=[9l=I 9g [9f`a9 JD>Nkkt6Dn"PvwlQAeXY+X]!9\%Z0XsdM)C E:G8`pe" ɵ2 b),^W Eg^gI Y=<Қ[|rц-6]z7"ϞEɸ\i?udƀT\~2툑+LF hH`-84ڗvI7y#ci-ߚ)TuqE9tFrsZX+̸`snYΎ{2W13Z53rŞ"X35uoówlCUSjQuP!F!Ă#`|`_d ݩ[/Z t[>'*rxus|iX)#H/a$h\KR%HYl+K\ P\s-CL R)oi D!؁,5gJT9 %PG7$T`|sӹoK%eh!DePYj `E1,kw041J]sq ?<r3ei8#:n}:;3 X$ b盓=P_Ls'?rO\wo6ߜ4YxmzPyw TFtZeN^`/{ҡ{?>CQW$9һݩq&Jy_c2Ʉ?7Q:yRnVLQ\!v1gjW7,vUYy.?+N%ϸw'\|v}a"?4C󠸟.scN`sܭyv/4jlȟn>te{cXvcQCڮD]h˪dw[規MNĚ2k[ 6rE{\F VUPFlt剌A<tj#_F.O-xc1dx<f)5WSAÄ>{j;V6QhЫd5C3c#۶Bw@9=n B:or!{ ՀD:BeCgPm_U"ydOCg29=6s$BzV ܔ!f Y(:96:; Y)Ho. ߯o-< PVt:4g\07)jL̆2@5Wm_ F8@JYrGBh)Qhs2DC{۪ Z5|s=0193rxݺ3zGFywdc]7E螑7!g(~vN1"WJpݷ>8FyAՑË@4@Gsc3.X[☃+) ZE3rt s聜=[݉lzDıFyxݿzf2Rʡ 04ܪl+l4R.~#R~#~#b~טÖ%grsV"jSdMEL\,,R.YdO>|/M.D ) (P1lL֛M4"#^ӵv4d㯘jGe'e :υO!"chK QAQMa&}өШ]Ħg_jMjពǹN4(u d!Fc. $5 ywlz t/P=}2+7F͌d'Hϔn LOx@ kӶUl@_eb:z]>Jl)szkz/x#QɆTPg%Pv@D{j1%rʈ( AOs}pr`I%mTR@'Â/=k+)> ֜1&0j>Tc#{n$>{p)i˼bӼ96{"FsxrJg'N$3DuSO?Lkm$7_fnhp0l.܋b6@ bly%ٳA-eɖe9=_-$,7/x:<f\34q߽[(}}xxֈz}oZɯom훥޼׼Z7o7<\l88o5l~'1Ws LGU`튨ڂgOZ'Ǘ_^SZp xgtdZ Z}:dPm]'ǖOj58nt3蘮&&)Q| !|nƟ^[u9$midHUO);Qi~17~뿤{ou7ţ45o".G^8n൉.Y)B2,ޯxsk=n2J5+J*6HIf97t"ztU\ x{1u&1=sEB(WoSg oUJ]qps"tw*Y!Vrm/kC=B8G5EM%;J.k0 mED 4 G'Oc:\ܫu|IzsPQ^#耧%txq4/hUhd$X@u* , b/r6FcT ͜ yZIBwj`tRRDX"¥saA@&"Ynw9)z8 8i3Wr~~K ΋'ȿ&0N.&8SدFuKi` 3̞ Pk 񿂥d+vBOV8",4/qiF$#__2b8"@_SXoQzlzNAxTӦ0W̗D4",b(| RP} e ]g!= ,?z22bVF?Z8t6` f-2>r%)Nznd=!CYT6t@D^ %Yf-޾T<0syI9HnFHTڐ:K@)8Ц&H^]d5:Ğ<ଁ݋#:1175ubc-ސXD{KD[C~J$n6H_ "P2g@@,&eO#!97gy ݴBU<`5g(l\TQ85ʛbMOQs 2jSֶ%){q,xffQb r*8b&MT=mt9q=QS60~I iRuxBS d-mHF$y64Mr Ne$YF̓"\4RYӗ5 *j>0\b_bnt XsڲFeOku0/֕KJ{BRr]> [S2:R5'3SUggŋVU-khxO O. /?|meVd=}ߞ?>q>:6KqUosWZ6S1*;^Պu9lSL"}X>]H[TIQ;E⛊#w^|gm=wX-hS/*z68?ITyr=rشG8^λ'PWNd;K۫ԏث\v8)g6,'ٳZ]^ܓYv3ӧuSo7IzvG;R!H@]ˬ,zM_ĵ2 wF#/E7BxX|Y'$֭3^遛8 úH\1ke,pYA1ȘV4,:$˜a *U[}r~H5 צ|CٸE[5Vd֒m:%w_7ӣh7DHgGY+f!$.R>-ILJ:K,EfIyMp|vjN6V-J,;S2q0i4*TYe!G(J}M>Ξor<ͫbYvXgiu`xrrNv3oע3DoZ$W?RR F\O':kL--{nU{fHɛc+oNx9| \ys?pS%7XLHiypYe}FrPA8Y)Y xF9S0< (G鍰N9r'5OѪRCȩSU-j._Q+YD&; DP`;:(J{uw0dnv*+Y 2q*DWt"Ek-T1("t{0&D2s %'BpFcqQ,Up!+ؚd,%IUgC$%IH!P"6F9W4-.sE1l%>I+p{ԙI%w"¦ʟRAhDoK=슰R׊R,G! d/*H3L<(He I%J=V&]͹iTTb.J&JduH~ ֏x |KAv<a>&^>Ra%s=hְϧ֝t0" PqӈRO Tisnb5ЛnZ F/0$+ "mƃYrLZ"Xj-J%֨tjnma6U=uusp|)+?eã.E2d֜4,0 =WyLNN, ‚2* 1)'ss( FIThȝ"M /v6奮+_lXTR' t_Od/B )EhF#^R+pKnq=AAFOgk_.W?`.kMJ6N:`h5T0ntduaѺĢ5Af3_MZS+JOѲ]XU1Ug54(8Wu`бPΜ d5H(ۈhRhBTC\xxE%$$V4Un@'?f2Zd#+tj&ҼiӘXMkFڌbmΧ/3K:9h׷ yS)i zQj#在ȴ9bNtͼ{^ꂦUʏp7u?E|i^Hʔ0\jSmqs0kɰI> i>WswBi[)xk/&L9O'~ypM㠙_]NN;v++ M㯧pNRi{>if{B]MzR?7s2nvӢU{et~ca'݊1ޝK;ޖOpt)RZv6ȜO((3;S׭Dpi  3dR%{v7`]$Ϫ2  )Z 9c\1ss7;:{2FlvW-{C!,;ւ>b6.'R[d>l' 78OnU R{-1}"sMM,BP]O? V2ɟarq95Tjn,iY>s=aQowpt] K?G2Ο4~g?=h3# 7+.ۏ&xl7wmm~=˘w;/`1t0Xd-;ןbKeG[ymj_U}E֥ˣiu4}peJ6̯b6?sR.F7"Tu->3>xŜ3x-6gg'6 eD 6:w4D 2ւ;$DBՎogb$ K%TbWm5mEީ@cda Z/\ D(xw_{~i^XvA˕&uduKD‚ HQ*x] H5fc3،}7^odFV#V\ nxVqu—?DsNM՗L? rI`Zؐ@HGӊMcُ^'UNrGԦ;6zC_CVZcPPm*&%U.ɨ*PciMIiVl° !I(FWT)@49f U uRyY`Rz=Tyoo Ab*c}]eel8cBTkα0Z(rѻ RgFfX&O:rغcq)g<0{'t-a޾>sE}؎!=/"[B`T-WJE`9c-a[3Ɯ.tJ-kʳM-AhSN: eeEJѠdߢH3R~!dbOJSj=C2k.s_D{K`T {+#+}LTBh߃F^!rm,2š`#k0tP`zBGWYBf`-yErw3mh]JfB){}5R1"BElaAŏ y31:^*~`}y?Jה$ ɓT*i Ft)X5YSLdܳ%#sslXcX$ߵ>hS0O% Z$ b55+&ûm6Y׮ˎeώwn[ zbc [CP\ 0 9lŐ+Y٢!dqo,K塪lyV_вNsǷLnъXm1IP:ThWoɹl=azS{-aVk̢F#j嬕هx|C_\0YS:/\*ZҲVDhof_v僖h A=60 DϦs`m=ƽً%7;$s5O$ ))JHU^ц|r%% L0$z Gss,j %d舱 PW4Wl b j*,,D)XCXѥ| n{hz=( T ^g%UXSh3qigHYƬf$c>vX~m AV%/ P igZGvEnC,\Dc5eQ>joJ@Xr(ϱ(SyÓAIlO(AYR޽aCퟪKG#C5shlN٤ٱ0/m O%`@تPtP&B`I5!85d>_7^rpO[yNu7nZ|sXafE!s:'f@Lys{j'x]Y(UȩIRxo訜kAF=aNg=K`iѳU0kUAZ[=fԬȾVvjwOj28D1;t9z>±Z) r!D&G}8wqaC:Ff,5;[jHFzkiU ޡwTitg;({wæfdaZ9V[0~s3=_?cYBRFaN"F@Q\M䚆c;a cdHkmJt%yJ>L.T%KIf[8,'A)m Z?򎞼c؞G?ЏbF # bTPu fe,ݗWi97GCfo*UBWf$P|0~/ ~`S2$G`"F5j BBY P W4HG4&ܤX‚qM Y>RKQI'Gs0/=rDK/3o -Ԅ6U B@X#sUS]7?M{䎽:4B峮Ს˷Y))P {8DvVRX%h~HBfhMx3r֊NTnq/frUUZ\Pe$S*3"@>\u#*nVwov~Mr6) Jۦuz/77/;#3) TV"bZTWbFь|  l!D.U$&cXs}1%$Yc=i$(5]_YtW[jpMQvQ4z m n.I b W#'!H /^D! &6̂#O?? iHl]@iJ2Vr80ꖯ"@S]}:X,ߦP}߿AJ ѐ`(FXR:gԧ2㯘3\ssosv.;\]3'dϾ/&i?;eH:NdZMS[Y๿π}Xـ΀cD[(_D+wnWe@ߙn} -py$2bmD-gƙ8.$lvً?gG~%12suze>ț_D%1*:7(P[T߬>Sm_ɽ̩yFr@.#KsGoy,$OѨۂm2z|~-v ;<+#s߼oM/u7[+vn6& d^?syu1>2#ꢫJ9L`OZ_!Mnuvu|Ԯ4X1,|@/1% $M(UO*xl CkP9t3<4uT>szrLxg"[ɦ~u+l*j/H'zF<%i}Q=]"@h%T/2 랰~Lu.gY:ZiVzth=KWGy טsljh[ԡV7.Q5v{I?J]OZ.TVlP#gcW|퀻\3M#ۙOqIG?\/5f|o [d;ZCXu.Ev'(z'{Ƒe AKHfdwKƠcIJn%"%HfwU[}ԭTeKH/EẔ׺#R&\BW {ZiZzl?j:{_`9,B̅hUuhE.^M榬Z?r'i^nC ~\o-/й]ڡ+qΗEL>bh W,UA\uYN)R~{E=ýuۙ4{2Nب{b抃GʺSņHb.4mgHVIItwxvZ2k˯AdCpӲc{R~IalMn8v]f1n^pղ,2x| 'RB94L?7IE U]C:.ʴt4wA=nr,224,˧L!6$ 9n:u.yCz!Z>_:7\ۿݾT(F Cl%!#7!6O$ʼne a!e#}Q&T藠_o.a@98wuE% kxu֥6ӓH ޹"<n48f!q927׹$ >Zb}mj`l5I#aϵAGQŇbt [b(h#Y:dkK.⌆tr9}H!F Q^Fp -nX<V t hĜ!RDF)E(fm_A De(=|𑔨gXd3]^ 5I{T'c%wYoݘ¢S09I)+\,B[t=o (>JCKA$m{2@Bi2"`Z"@)xaF8$[, >i$Ó)I `p}X{X`QdM@ HVD0g峽ZD< (Ƀ)hvR,1@"N9gWhdvɢ|b1 D~>|<I0"yBV*g: ˨ ED([eV ڪ#zGA \\s nr$hLm ~zml3+ː(<gA-1yvzj09d#0(,6Ez QVaX4{(`wk|Tg:%DՈS`PJQ-/׽i5Ik2!)G &\h63)0a`, N32Y KmIr!xηFyH[ќ0t F7hm\ĦEZpq]NC Y!jX86Tw,"f-io(TOU]s܇('Y` ')%ȪQ˪wU&m5#NFSd\"<ؗ-'6`'E'""`\qk,,Z]Yn}53nԅk[r ".Ƿ %HvQe)gw)khm!84Oq<-yNSѮ8%yϔU3s5up}uڸzaRp(uD)`{#R:1~ ~C" .2ju˭<ƷoU,1˙ցpz #qI1B?@QjDRJ5`N gCC191Q@ q;v ~kS cu9/8Tɝ]^.r-/a?ie˳h'˳78dwTwAt!E $deU3@B_Wמּ=t_ д.45}d`Jڃu&!&)Hf"٫'+dM:1@Wf?U󪓯$/u/>waō.T?WZ֨W|V5R7wZ ,h6|1G QV3,.Z3iP Gaqbqym A 5&ʶUdyKd<~sЉvQW/g]}qf 1GaIIh1!)\MLc$^tzg\>nx[ 5?q-AVXk潋O/S Տcy0~/o7;˯Tkl S0+w`|hMƽ9jw0 ^x~pt4nUv~bm.nFqo9E~r> xa4XՇ0G`]GCoгWhUOaS7ǺZ~˿ '+D)#.Gp+Dku#k~OUya$I9/4UT߹*2Tb?OO_X }ew{~4uֿӀ2Vgq;x oP} }jfyy<֔A`ʽT+ڹKX: )B$ ɐJEMƞ|emO .>[_s|u{h9\sS-ӊ-NW*GsГ9R.aACܛcC0c `v\28̣]1h&Yn=G㥈9N޳xG]U]!LU]V4rz9l,v35a'訔Z <=N«V Vkw,ב(e򠙩G>qs^H>Mbj# i%L*LN2Lu+CDQiÎI失<:,TO):*ȁ`o ] [ed뉵q"Ǜ^P_* ,X0<ؾ|Q) FȹI2{R} z.>lg">Rx/jVeރ1K=> &x?MXQ}x7C{(&ѓ4s<0hyAB8Cgc)YR5 v! bv\ajeA= ը6o:fH hTVYcA[= +EP};ҐC9:q%Lpͨ?I=,n0H 0bdh۱=wOQ%۱l:,QX:W^U_gT1ElpZ{M9lK0emO>qP#ddMxHˁິ͕@%G$ewZbClJ,!#0>żF27ʙ[l [n;/+O] Ie]LUJ , )%h(y.VyN7_6jXt|.PmSFz \tc _GÞ'ʹm ?e=]()ͯb$xG!LɌE9\*EiN8lm6PnI}ڞ ^h)y m&D!: ZNUcǎ4ټ2ܜ[ IbJhwV"m & 6jR˱ldlzXbݛS%gSҚ. 2cgWv%@Zځi$dZnR)o!3"-!>Cݴ )kMW_dP_?`I%Kʡ 04 UMQ8[Q&v#Rv#v#bvטÖCIu=VNu!Qq "B Z\8_sL"cWIB,!e4P4-Ԝf`U<^տ^Lw`ݒM12-g Zl\LD$6'5UħG[jQF"#uYJ>VNOfgZ;@; Y(@]%YXQGv  7O6?2dgUiT*muif? ~fjR^#Eӓ|n_y%^k V67=8-ՖX#F (*=5I5Hp@kɒ`wZ|[At5ԙZL(iBLmqkJpec hlӫ;ɴULFfښ=iI[T2&h1}҃*tvaI!j\}}vĔuu45h6XZRdu" K3.2VpyT@t[bbkdY?2$>]a46p:S2ൖ<ܰ4`sxa#{_~yЧ5Y ݂0=ɜO}aw|n?VF ;zzvҟ0K)!?uT,I>_lrp"ꏘAccR;7|1Sۨ;)һh|0Yh{Me'yzT/ 6"O&'Evƍg,lnC4L:{f0M~,6 w +9X5?Xk8=;W/؂o{ҡ{!*wxE#s4xk@nT8tx{֘,y>OOo(sWVE.l"ظ`Y6~ٚ/KgE2@ 3r| j^,>}ќu}Y{wJ.Х;qcNӷyåYsze/MwH(6pOeUtg|Q@'ڣ܃|20~`&sѩ"倽eU}5CĞo#,c7yc6ggGIC x:1Eb8*1*Y}V&}Ӊ˵'3>u˶Б] o5@nN/$>l4)&v!Aa #o"ȅdkd)։Yl YY,lMW:⫺2 dPxNUl 80TUMٝQW͈&-_X}Omr]1DOv"1z^D)jϖ !@[AK"ld8B !mBۿL<$F0%0"ϖxmJa*A?IZ(M8# e@NYeY cE<4bH&k#.RվBq%R)΄֢3Gߝ74v;CΦg q)^)]k`?X@{;@]'u49>Pxjmڶ$UQkS)@Ձ#$<5Tx}bu;1XJ*(XCn EN9z97U5Ɂ-Zf &el ;u/#-F)p9XsIa|Vt$휷VX.lslnk?M.}Aſ}?='&o?Nҫ~'{\I=̏O'?7 ~\{z֛/ޒ7o&o>\ߘє',PD]|g3A,O`2ّj<_/UɴʊLd[&YSmj/­|}:6x:=g2zq<$G¸}ٜ_ޭ~RНߋLȔg+P|F畫ڰF|Huwa#S%s&Vpʍ'5q_TCF,C-RcLi,_ >"Ed ~'}nȺMi`:+M۴Y{p~'mW{ _iq0'?W(yec8x̵;4EHaVFNc1M %+s?\{5Efleٓ%6n5 پ2ڤa$!] AVyr̓)1T_Eu*gL9kS DF%]Dd/ ך\Yej$e*Q{x,ig9,KFp|Ep|)VQ Wq>ͳ~GzPNۗt˝\,4xW?|"H'c.:%ܱ00͔=!Q H;?{uH=vD=5h5֘''m] }BPs,<#Xֈ7ִ`1x 6~-v- E0_Ngow"Dz5rb]QudtN )bk*s{s9%UXca6J !xJ%QCV92d4q\ ЕJ9:kȀT-j9RR?`=xly[2ˉo}Ee!WHrF$#.zQO"r%ΌrR&y`cK Sq`` ֝T6+BAUMi.fh )[Yd >TL#Atv& 1bpF+:8 C KVBuI`qJN()E5P#PSgf*M:;D XNjBͦLWBtUIt } B Lޏ`LIM CV4[8"0j0HB HS4ׯC,Xz/6:6o-fZ5x,,KY6Rpn)Et'iiFPf]K3q.~0p'/v4.wɇK.~;<6jrJac~f=>Ebخ? V4ć&Hlj<>~C]Okl.B\5_}?_&u6=hzr-VlME-Ƨ9:q³ 4Rd6?[uà>E7F=Z9͎MK=\udqSk:?OA}8Z7돓[yCx:/_~]fiѳ㋓,-~=&5(>?0hkSșl/7sb-ЛaFƲ!9f4NM5XrPLˮKPqDSm$jc5wA̚10[誑숺8Frql)ɹv꠆09U^T@={F6|6d)f38K6TAQL)vh- ԃ8RLMm$Gm`́u-h { B-As #AтF&U=eGAmGM8޶$ԢE-Kot3n6;Z FSR KsiMqz,FIx8E9FV@T=圁z8Bjurɒ+>d\5Tp>?#!jkdAEV0:#ILX&M nW5O}}I ifdӥ4Z- |,yqq+b;)L-@Ao?V޾<%%}C٬q~!T䲩s$8B' F7,Sz+|̏R74xDR$ }lr#>J2 m뽟ӌ%Lxə_>Ȑg{<_Eώ' E~L.iqCjIߝ'ߡiN:+|G3G_s"ʢq]9W}fү}a}R{q>Y -~'Z\ZK]EϼoN]Ңuwetacq1@Yblfigƶ筗?mϩZ!ݙS`l$̄9Jg7mS{UyP{;r14UTGW{X[ M#gBe&[%E}s 1kQQtظҷ_/i6-c07fƌ9ك )XCT H;&{@.qA)Bΐ]'Bmd{{.6)ROI#ooV 4Ύ)wޓR]8A2\W`]:gEicvc΃(-YՄJ4Vp0_}5 '*v*2h\1T"ϧ4DTx6֖˕m]SRnz[֋7ZU A%k4V!Ve[ :*S++΂@i,1bgM,+dd;bo׳4_>Nl62!k&km+Gc^dU`vyi4OWۉ-;l(ɒXHvzO#hۺ:WEWt&lX}LmSK]gʃӋVc<\]̿sk?{uYߗX^}:R~X*yŐIm}otPG{+)nyϺ?2ƪܚ<8p:;q!}]^MXez%r}X<G˷)e?@2E-1 "Hg1Uʤ>u7֩*SeRy#M;\>r>qԅH;k;צDltL.7:jjVEq|Mؚ✽/5oW0uc5GSW\1hMcYj&|fJc&l?:5?UkE t$B0HIBMѶb@$k382Ѩz;'yՊkkv5&o| 7O^nS p J)yҞϡ(egdE 5a~vjʸفҡAzHhmdyFsƃY`J0UPsF,rJ:Z+s!E>H* jF&ҒcHbSL298 Bm'Զ#E7"rSދe  7e4!\a`P8V%*cV:OA^kZFmJTtG4UYt:Iz3Y_%OĉFppL3m %Ŭ~ {A)#ŊQ̦TvkbeF 5}p,FBva!2ҎZ2V|vmG'vj<8m'DoCm,EX(Z`,r)Y#g!c/tC9AdZbZ}Db6FjkXƷ`h*g z2KvD,,vXhԙaAڠK7 v9OvCzc(_<>JǀFO<ŷ9e2B*JUJL89TCR6rc.R&6hfAy/9 ؠ Gæ%(9h-؊"ۖbMPA˒R,[O\ƢLBphl${gmq8Xļ'Ƃ8๾tCT̘>Jj*F;1/ν4=O, JoLB2PUlQ(R1zEҭ˓͗\nlZ4xӯ`ҷ|uw2]8<4T/ƦlN=Mgci/]ywF{֬ue'CÕM5yo踜ҧk1jz.wKx[?;wFg_<-Qkњo_A7:OdWc>X>vOֳʧf'-6aʦ0iQYـ~+0p0)7x;\yxOӤKil?okEKl%rũ%7vǒ=0a!SH]"TS/-Eqs=;^t"%cZBv,QŇgeX*[8; Iܸ1tZ}r%f,v.;˳YGՄƈr%e<$Mc -2F|eH='Մ,Q.yv0zrq-| 0F:pxoEsd|9d,sgp3L ??~gzrtu9T2KӾ)|I<\]gaP qgŽ-Ap|1)SL9zO!.S;wsU.7.6œ6S$ƅ.cF=gŀ 9tRD 'aBֶtiWjv24k)ڻ8o%~w,Q;Q>\H7aLWU i"oIFnVGѵ꯴ }s" Jaެ@ž^+7AU_d8=>P]"]pÂPMp:N2;Crݶr-|m]bFjc͖Bu&AZ-_?Kny\xb xu=]Ib kxLXp*IGqΎNftq['}Jz_έmd$i&.bi& &OʧSVsv,6YX1o׳ғ K~AYy Y{j5SMt@TjCˤY:֓Џ]xUzJfG"641&staSH|W=苐/˱2r/7Qe5uX%EeĨ@8ŻMջ?Wȡ{R] _'Οu1R\J)у2BddWDZMEاveNC# n-Oz3=9꓏յsC"_:۵/j ?^v>Qyѱȟ#Ӑũϕ IuN pދeWq[[y5z/gDlg)y~V>gWdֿyp(K6ɷY2zLfvanLa Z˿VL:33+#ѫi'B|z*_G_:1nC$J)US13xtBށS %U3j1J(AR$[|ڰ#+Tj}-_B%9.$w8I($a,k**]fvQl% q.˔tUS*X<9f}xxYPeO,(?gaގYpM[ FdGI*Nb yDa coAڳZN`Dm瞭`!Ab cܘ̞XV)ZW˜,./ܬk٧Wd}+s4)6 I HZ & oIVէvYD ZcD$t ht6zFt6.zyc% b-8Ȣyg\}e'\(A-Ԭ$gHr&K$.OTP'[СF*Fh Dm!eA+V1$/*( @J(}x/fmFIZu驱W Gjs&ui&<:PSJ>:quR۝*ϷoX wB17 t}eja,j62R j1`> *=ت9c f] ɽImV{s(*O>"6}/ӷuG4ZD%Yj mt2{V"u /Uh{ˢ1y`@ɶK }bɎdK9GV$0/vdX+^!֩|sKZpXw]/4D%n{RxXG~{,(6urX֧:MMg S1P (kB-(X*i\mlrTI8QxRSTqX7o׫f,3^#k)(N,2^ҾPƛ/?M/.cY'| @}P ܖߖf)41!`)6r Vaᄉ6r]NdD۶xƶ HTsM&mN_D}̪lR$6 5ZNAQ : m`_sm)h,a+7'-}h10F=].aG(mB;VMŚ:CQ=m}r8'(H\J" Mޛ#NB/>0Ž! }M$b? ny@-wkY9#(EU^l?C/gԕղ |`T.Y}vdD竉qڄܵ[Dkxtk`̠iuQNptY!($BP2VԤs}-{wO W,O5T$*Ǿ-EE)9Wdt8iu2VG}d;$Rf4w}{ުdm#MnAN֢bIfSbfX/E}n(jvǙ=pKny+%Yә҃8>_=Yէ6-.w*89bA΄;kۋjB7j0ntU8*߭R6._s1fko|:=Mg`ao'l05Ҿ^[o9duXzUC?UJsvZUTPhC|Z%$MXP>@u!X DI6l8ܤ6I82nٿ.04yOmt`$m_\ *~۫/~75_yU/??K7+mܠQ[YBi2xj0l4:^C2!!8ר)jtҩDDTT C%,,e =7œ1[t \-!@6&Rcr%@ZSqZ!VM%d <73 Y:t:8~8ͬ?K$4FR#%9')@I''%ZŮ΅HCi@5yTOxdύ<T[*;,l7B'?fj }Z1*hXғ)5iSֻVN<$Z9k6Ԟpw? ycg@ &uP>(/?wig7vB> B~`Lq T:1vx JuObM)L2d/[LuRܡNog5x:t8e jO,74Qq"sqkL'Fj1p#CZkS<Ȣ+ն BWtUDTjc;穴1 l玡Skm7cXr>E r ҙWX #ۜt0r0ѰHFm1`&XT%irV`9@x[L!c|q*J>gm#Աd lM#5:>4uF9 h4`R8linlH~iJ!;' 8~Z1z ߃ɩ֠KטM:HWֱD0cP]I'vRr>|(Xl2p&;Ǒi~k9@bY)2"D-MsHm&Q<`իd;;ArJ:kG.3vӎc*U:絋*R:!U4ըZH1XB+)%`vcSwB;6˶ÁQ;=:|d[ev9))i<tʚE uWmY\jKHFP閇vvzGl|:Aqa/U\S|U>~#wzWAhv/P5:fu9EGuԫ?#(4z^Tb%%+M16:xT %E5&01}n(cq+>Š"#z'wV$۔Hr*'y֚ )쯁]M~tQR\HL8Ŭ?8X$N!GFjT-[NonSC'Ͷ㤎fm½!z ŗ6D:(ѸD"VL(4:(eP~/zuLe߇[ t}..#T^GalU5Qy[$*UWMCU@!wy&kc&,߲|GAz SBkeNGEYa*|q^ }rZv7eQ-̓D?,VFD \E L03lOcV1fm>4ЩٽV; Id@K|̈́J %=?K7ǜ<w:?.9"{R9lEY~_ϛKyIs5+*'EL6B֊'j̦%ؚeQy2(Kdm2[ǿ!QW[Q#e!ng|~|~4<Ư)yuaQEPM/gGc@9 RJf&GAb2J#mMЇrE瞛oړ۰$.-LƌQ5x7! GjuQ~LaoBNՓ AgkHٞll0et (®GSpX\IW]C*T3.5*'.^ko(lІ(*b]woFphJh:"WE}2Gw-X.`˓% }ភ RVlA@L1%Ā%d#hÿ&")(-qe*7g]A%^SF٘tnGjAq F-~M)( )$KkAk0Һl׹󷀳o֘|kf}42[bo/)]^}~?;"d1=kez39N\^?\779=xzwu^m"y]93^BU NF?\~|pb};6'pN7dXr,.%b=(ئnnXr [k&35Wh|׼gX&/o,޶]kۚ,E& y98z㸾.z~T%L_)7۱>|(}탯3<]f4o0zچS]λ,-.ʓ VPw3e'ײ!G5?r-cS T]"n?.3;'roZ)ȕ|M~3̾s?{w]`G޽yţiMo'(嫹U_٣Lh|\.?oܓ|?6@ mr?+m.f(%-@ɖ_Wvw޿s˿6'CZԒMjJ㝔^-m٦~z5Zn+)ϫ]ѳ[>;ZtuYʞYvY.Ke),exRv鲔]RvY.KA Z9we);tY.Ke);tY.Ke),e槰RvP]RvYee?9n+ }ի`p!N$Vʶ {=iԐ1&ҴcT)d]TR  ζA.HkℽJT袶QwDG ;mr+kUzQ0(݅ڿBmLF ~.Z|t5# N6+((J)@k|QHT1D*x?Ѳwd5:J֒sMU0pTQ5Y&eb-6(Y%TgLhBi Z1JI*[%eVIm2w΍v;~Ƹ,z)SC_g[C-+m#IENZyEfcfy1ޗ4yJ%Ji_$ŢmRU)?t(R80+L)88![[}ٟo-<[0S@ax Nhu!)&SBW?itn`lV/O 4Bb8. +t29'F,A6HH]"֫0 d?a=*ʩu[י}2Y4iv$1G6!$CtINْcx?n橨 WI;lzXgTP*u 3XD'P(vm[(` pzGt  ΅RAQ.<hWKl<K~ѥg^{^a4v})n.}̧D%v{u_&}}}d4K.EGEp.CFiWmȿ^{+5?)Z$,!`_g>ؗ%\UxJ4 n@ x,?C[Ȳ}tnZnPbA8"P"TZ*Q{딖ģuYNgcGFʲ@YBď%79V|+y+:@;sS1 %A!Ay( }Qr $;r-r=HѫLhUb<))hT2pz 2 \1Fy]x(! ýscu;e[)#]=YYPZ5Hk\ވV9)m 5pukg>)'΍ af#1[(i:p>ό@\&֍ctu٢OyUe̠&Z}m/J12!>+NKwC (M^/VanuP`21Qc\ c;xq(#TI;;]a mz ۋQ}NVqd&Icn@ \bll cD6cJbT\ X-6\r$x,P7,Kxza󀭢aF14"!_4uGsZ<ǽ8l!ѫnȢ-W1f|NWTSN,;Vї\~u 7ߖW,o>,w'Œ>v]XNL7w'͒}?>K>%,?*JVr-e2F1[B( QdE%oҟXR$rhnCJ}lH^c" sJf}qlO8JGLa (`X2\ ^RKER}]iCH&:=2C|pGu倏Q*|ЁVPLOهp] F(ghe[|ʚ[5i#9ddz<~=*I ΕŐ|*PǢCV&l"KYVG(]rJBE4b֤dz%|cX-:,­3`Fz Oeb^Fڲ Os?'GWn&$(Y'Nge:UZr6pE&5B^XFWrJS*4եb`tJՆ\W*=oW]kk@GTd8`dFC@>HDF/mW3ݮj EljKMEF'Q;B:h&1اW o?D~׿6;wHDT*M&9~gKйTɜwkYh>\vчKs~nb-vbO(ݓٗt'/tҺ4l~OENߞT/JۓQAf ,v$Nywo-3 4QisFZ0~ExW&[/=F j0Qྴ֍jڢrRҜȇfH !%C謃"8 KKp7a`vn7+֢o;JẸLk<e B@u(fk6,̶7XɰY}R}ZC1:DǮf_%zpX*H=^7l$ FkDNܣWW_&ďL N'K̈I^3k4`vDnh7`jðnR!LC$]tvYw6%@"Ԗ6eG?m6SE7rD-b~\K[[j{2?l=}CnR#c0]ḿmArQExl#p?уo@]1Ulq[]jh#Z+V\[,qhIκĹh:1Zr $]!دESMd 3.K2@;0e#Uq`%~YSM3Ļ& 8&03p @1HHq3`XR:W"!vmu嬚֊YHD;Lb㨛,-ѧ=Y2yw'mX֥Z :s&)i1f|iYJRU&h2QlHwYO7MI [0CCa.fٚbMnVeV<ɫA"f:k[y[>S+R(Չ xǤnL5pY˫jR¾jʊl׊?l҆LbgfFdK5yCY_u@-qFݾT|N]Z*l@XD:ޏ6`[([rurՊR4xooeG%a5-&K1uǡ];wL[AѥG`!(rfbds_UelI`.Λ cףDgBHŴSHc99&.0ĦSgNs1.s۴SL(rAgy@Yٵ2Q^Zs ;[3Qd>Ks2ebLY~/؛KAgZp`!G^7b VI X$e%̓32砸h9flš+Ip>Twy =Bbrbt-%s/\D^O7^ݗI JG'l#V ֌5qXØX饍3`\afٖ)Mf-(AgJ9҃7}6޶4&j`𓇚e G}6@ :>oMh[X7 O(,0( #F1t{jg#k=}qcLMXzn)’QQfbLMf 5֪Nj;m!?}jsWP*ty  HDKYzS I^Ř#+c _PxWKQI,* :$WZs u Aӎ[):o-=ЁkէIMfx=DBӪU{Mܷvn($6K,(CtyR7q_==lb*vy4,!q8å|V'}:g /N &KL+ki`ԩ8 }T7G? Dtb}Mmq}C(Mǎ:Sd Dʒd Q2Bl(5TsF416>/LdD-X 3댓g*N#Az{4(Z=A]76X+ B46U捙18oէ^z ߹H2HFgIEBA} hg+"T.q?w~uoM͖Ӹ򱥖?AOŘj-k0j|SH/8ł2`>eBv+#I H$Ȥ!X`_`vA-d_&9EA Y]#2{ECZ jd(!(#F@q$R/J6i/QͭDMoO69'vWYn4qd|kEh#YUDQ"D kyA1`m6-jb)ИR &*^P(ûJ|~.gN]KXODf,yPxՊ:c-0po͝|>Kfg&$t}H LlC-ڽ᰷P*pQHV0"{'d KQxsQ\^(a~ԍ?]푁+@!-6&uY=i ^ =L.AC^\EE9愌^`L1fݻ3|+j%Q|;1D f̓6;=T, i[!#k ZTK s*˽tMڴ(Z tp)L$YC O6g"I@/ѿc/rM%x5hpjbLz P:fȐ|m5J@3$J(7OoFj{}N4;v3w7z;޸Vy p@ g} =`*EI-[D2& N ArqIտM>Sb'- <9c4mB:̠_zP ١PD>s& C,MfU \bPSjc@r5N TІ1[g l]ID]nLiN]]X iQr%HkP*YP h 5bRQkd^*%&b_RTɜje*Hd(GZ+ywF]B4zp=akzmMXFAXuzFp"˵0`9Xq39- ]k65B)Zc0Z9*ў5:HѪm2g@O8Ⱦǎ|v_:u%['*Bhmu!U,2gKn$n$n$nTJ5Đ?VxEjQ)X(uIUOCҞlڢ }o2Ʌ<\-M`ߏg,gSb~M養f_Xh z ⱓq{迮?<ӿwjoSZJսQ<+OtU8=U`80;R^)_= ?'Zr>qoU׷uOyH\,U||Rf=k$1X@׮UzeI1j758=~8e:[mZ8I39sؚGQn5LtuO`Q 隹Gf~=Bqoڞ#8P}3*IZ3Vwp.> AN4\L/XIW]5`5:cr"1*]vԆႚ5[.yoW[cE@)~‹:)ܖx<#w׿-N ot;Ntu=ՉKFYڦ(ŋ1Ր棥3r“sT` EP1qKogdfdtz8^4jY:$k/ljtEv7Em- .#ZGTNGWeZy{v6%KbUkPijښ&q/Z`Rj5٘Ѡy e/}dh)2A `k0&cg,ҫ{) f5,, / C|oR(B1\K.>?7r_4 os#oii] .A8?z)P[CB\W jD MxMR2>wK8j /*j.h#OGEo |W'1\~gubCQ6|V) u` }8^L.f_.a"/s-ؤ7d=h|hEf d+ d0ǞcΞ s?/9pBKC̀,Yӌ+Ga.g:`6m^.7]?훵gћi#yf5Fy؞OrYӑ*nyj=Ǚ0͚O>W\S^NM?[n#z*=p̢.Ynn=W g_h:ֳvr?$Fs3 bt4#i4D_ƯԽhcKal~z0y&1v>3[Ol^+ '؞RoKNH䔫ϮWl,J[&t #U))]*a2|6yf`"=Hwsr\)_c)m,~#Vzs"y`H12,^2`^M'%Zzm$ŋֳ5|PȀ%FҊPZϧ5#,}-2QIG? Cr^_vy3+U}Ag@DŽ dh{&E-P`_kGɪ"B#%|`RqC&UJo0 (5B!DFeT/gbɃ˿v6n~}dR"Dr0Hb!cȱOkc4hda'}^ƚ^L2Pru91"G)K >:2$SNb捤2,a(Öj242 ' RAν gG$,'o}F'S芪5CK*$tUdyn6]RŁ>my f(޵Fr#_ѣg%"H < ,} AuuےԾ,'UW%uJ%i-TLy< FպTLC1e\![=Ұ+G&ۤBL V!$ꔋKˠ/K-s,猙41&ׁu !CM=6G+B 6 `r8fVhρm{xٸpf<7ZZ iS~}AQtIIzlnĦӣ64rRz** {rqGůu;s򗝶8:t㣃K{wte/=ˋ+cmdO >8R_Nv[cZiQHWFJVZ]ՎW\tduSK>=*!}<;zFw%Bw#ʜug!^ڨwtt,:zvPvϻ/ѭ缝zL+k CިcW~nw{}˦s46fDbUOJAŀ"~ bZ룲Sgb6 ?ya~+N'N匄"dRKiC\DhA*\-⌰I6ELGsQD{U1*2B61Gl';0DWR/(SKuTRV*@Cld9$\+VX"%r\6{*qi]1HJ^| IcP7Bߜsg[y7ff[bj>)v>)b^TΑ!Q*i[KdC ,3:ٸbK"ErnzuvWCH!PC)MI 4Ct&AS= 5ߊ> {k -\cm(1Z δkȾb%<3iV}tx/mf>a>==LUt!<9zH8|>p0Ų ԺŊVgxmlz3ͨ^s0MB4G 02Bu9e&#\eALjn푙5caWeKœqrt}<"fJi`h 7#+oKJ]s³\N c:pۿvK~?eiJƆ2gNjX7gJEM1%ra|} :׈7 ~9wrx~JJIYn*Kp;9|Q3?>-+'aKk1 9c?uO:[`_cmW:Ss:+%GtNZ',೛1UkvIӿƟi$'|Tỹȇr rƏw*e/>կw㜎+/R;dg|'سӝ՝{WKQ)7s62ZWΟӢuwe7ߏd qR{{2S}յ3ۋ&):&&%0(]pv=mg{|U}P24Uwlv-`0憍9G&1t}L)#TJ2uڛ<IəMr1e$E|,TosUjZ`|& (ߎTF*|`5ղm#X r5brPidFdzqRʅ=4JwM'+Dӂ1EOG#hַkhӴRŧ`[tET"Ui!{ ؄L^?&,xj$M֝{"iDΟ/ H@Jh٤{"9dcH>2T"Μd塘?:[ |tKY!䱫#N:u5~^^ )竃\hWo1=YXU(FbB`p7!~0d( ZOOX*o$Lƍz`k怇gQ+Xl!ȅl+`Tei%PV+u9PFlo/f3E=i̛ě='(RyWؽ󕐫Kг"LQcTz!Y+e2H&Qco$O5"ϼeȗP˘}F#L` y6bo ]{JYsW0c"7F)s1M7Cή=|e##0!|Cv8 Xah?I85 6Ĕ8^os Y bP2眒$j.C c#ȡaG0DTr8U-w~´w3Uqz@7'Uv8ؘk*bH&]"8g@r@^2 \ lR`[ח{D{߶9Bth_.˃`Z >FB>X`uh34phu^N14do& =:ZqJ)6>[%HIPޣoR+;L=D='A̧WyZz&p98`f y7 93՘g!YHx~B7UlU&ŧ۽#%;W8/%Iܐ]IClUk@.3xzy;4t ɛ{+F>\P*V'PdL s0rWn: W%|q{4޸ܖͻHvט( $ыCac2fF@7J3@{kruҼEyK| ]"B/No NjÞ2Q"[0@K'A3 sGb友"/z>}7Lm9qL7zZK+#J>[ݶhHVl+YIUs(gV&gʨpvəl )Ħ=dK0*a+6c{g)˰_]{gw|羵Jey e{^^3ss .}!%1~> _$҆%fP6@贅rLJl& ,SgcݳF658lPfg?DmN׆Т 8>L[3lHb)䐢(>Xo)WtoOZy0ɺsϘ;MދH$`K끺[lj3d=km*[k¡9Fͩ@lj^0 Ăe}JPH82j] ӞuR3%ʧ.UMG} ,Tlc5osb Amo)У歹VǞ]Y<@1wW8PtgXOYONt-I5S| zL֝db2d 5i-`ZWSઆ`{bfҕzn7C g|ɱB1(N0O$IOVaX`gƠiS-Q2IIDOɺ,PnXţ""22 o59{L}iuLNS*FoRuЍס("YLvRjBQ(\XЀGQIN]䖻$Y3'8WE qH)|7ϥĉNY7iݸ9@2E?SћHCn'5ohC*i< Tߊdkx &B %:؀FgB1ACv!?f%/ ۳fاOrHOJ Q{ڎBamxz18Si,ݗ>{Y{ef5DFXL2 BFt^| "UlƐ uP JͩY#1$4ʣ 9Z\-U(ZYE0Ea(|wVAV+ WXLӌ6>p,dժ@o4 3n>saa:z.Zπ.1F,;<}Ԛy{JVLl $fpoyRVF_tcKfyk[`F]$sI32OFZm@_'5cWR v}:]U쫶tһT| "aKg%=hig;3;~d:TsGj]ܷKkFpjOZW閤]x '=P*`rbfy ' k^W~ZS5xy}`mGs. J;ppVK}rL U%+aC?ԙϟf7ә:'$骻9xNZ_-PJ"^+?4=7R??vs.DI}T%åAgZp`!HΡ|!o<"Z;KvL!@U^ PeYZ0^r]5&KicLp* kg/"ZS (jNg0i{`-_g1+tnbB2$Iy~O.*(i)!Ąqi@o!d%DvAr %J`J^9&E R|m&b]̀22`NwC GI)O0 J zRwO/}G?diߏ:su9ᘛu}&yDmw{H?2u:. ۇgP9]|hW/$NqGhh,]|~.x9pn]Kew'G݂}m?%,ȃyns% pŻl}$0:1Yh7 IJW̷UuA$hm48-,Q3`6)SA8~+n?aSH/+ q'jLFsTt"h jڡq{u >k@0{B0ځ=x `,}1>("1M/%+&YDPxyA:\+IXdg9R+ Z'ƉfRR=QXmX~},i({(`] [̠)YٙB̃@_G`X jɩF1cwjGG!HR0k] DJ9lRMa)CWu3ݚOwpJ8E?͐3#_\l:4}S7gn#(51\;CŸWӳr‘O8+BBHD~Sm2;7#TB ӓ&ikS)1#Z[®69FV8Qp19Wi?sqvٌ06D\ܛ-y0 R$VqrmB=(h-ŶW26hAf{ɖ)+&{Nq/7L]}?"œmւť+xX%!|PO7n":e<&qʒK_R& 1wG@ђh4&!ܝV H]c4mpQT7XRsåϟ9ۺN9wשӍD7XILOn4נ5M/!W[{WFƽ{څ]hL`O/B/ϯƧo0>|M4ȝomī+Y#4:&6HG.)JW|X撶 Ml29m[^Ѽa}Y+5˳6jkGZ(R2~m̂6: } x{r/83vmth8e a + 7)%7땇¾ j[4a-94C,gKUe6xq4*ǸvKkY)xmxM[9Cy̋F~# G*B1NXl7֕k\{+*'O20"3녳tnbT72wK>Czo7v%+v R)1 .T5t.:pMt|`gol]$e2K-{̫YIG\(Zmnov62+I(:Ǻl4WMnUC j(v\q[f>Hh$ulbUm6j3!WEFiYH5Ȥ63I͕56_oxlu(b'xƬp#hݺc2$VW=RVAe;GZ:'.u6A! J̩":4E`KNcPLc %83ymӔ !D?iJl8b3x%F_FgXVrEib3n@6__r)NXpNP`>eBF՚&` m[Vm'gaH ݤT:Z&L.sZC+~Oa)ucːM<:)}|zaגɆ'!cW #@ j쀌)y: rHY=\-޵ƕ׿Buu[F6 [],!\Q!e=g3^ن1 ,E;S9 ɢNğ٧ЯX|2q!art4yJLB,qgf:!,Uwcͽ+ ہa*]JHI)fD/v=eTuI4YrOZGb5ӤD&b2oo{=ڢЭ[ &gl:3.4ǴHfOu~|؛;DC l,Ni%Ǒ93JI=aD6TkwX&VNUjQEu4hW|h OE_2^KLC_>zyv/C;XR4 *9YvS ҡvyQOst)>a$MwOObSbȻ-&IܿUAQh:2X*PK"e>gH鸓ϝ;X8тmuJl亄I)8.IZI%=D,df8jފL$ &SZ\gE^ve^~dzM1dvf;Ih>|;ؾ{$820X:bJ1sOy7A&iKN9jQXW}kEI}meO\k9Z-g"fzЩf]O_!1m}#-1p[֛۝SZkǶ=8 So>xw[ލAF^~wg-zN|gWy-mgwqfwujo2]\!t0PMonon} Gi=LXqJdѾW|oT.ol3'ag͒8H{#PDww_xw}(Xf=I.Q]Ǭ'ں`KĝCl}zuA|ū]?5z>iȶ ߤLxCa r(,ʡ'AHCi͆\o?nd4d䌆֫6\?7nO𤡄7:}/tuaW7۫_] oؾǯY+[{GYy4:vǒ}t$2hbD_o/Ϸ68>&^A¥FE53c0 w7Ͼ-ny,/w-Ii__zu P^+dS5;|O $ʦ^mi³br ~}?!mzmj{6W9~lqP+}UǠ3w 2K6޼Ë\^^mʥB߽\1S#~ؿؘfֆ |E(;lv;k>/5g />OݷKŻ<:ig]o]F8cL7d@m! +ok3e&1h/߹i9μ3N71r#c^>|squia(Ͽ烈74 A捰/T |:߻QG\$$1¾VVk㛷\YvgZv}X{7aOC5ӝcYK%- ]k7A+*4Ʀ+~ #w=]GkksT9׽E ٺЈQɮ3a "jcr PU͉4x|HTm8-\PS )>Knޘ^yhbXxbjC LAn&ğ79i3dC7WF\h2!Ǒg)s&.l5MV(ݓL.sW5A Ar7T)B#5mc7Z qc`^jEh1S@/݆\L8=` @V-l}ĵh>zQr"<5c{hO`I!zc*bF hf&T*1mU(^n= 6r \8\{< ͚![phL_M Ux8ߚTrpANbV]Q i ,YQxBL;Ġ^)|!$ dKe6gl 1U*>}cm?TP sZRP(B\|H*j &io r76\W3Wf:%(N.lcv<#Hr76Z ]>¡@5g/^5\WF8!A}&|&gM0,WizGZIQ)J@+˾ 6]Md)`}_?A`'PG=ǹF Dwb* YZ0f?Nbr}hR4ȴ@'TH[[eW)$ !*@[BIg*<Ֆ2͕ʭȇAQDѐj;Ml . иV&;/q%lSc wFNAr0 %Dܟ_Acn>:HDbs51-_0f3ػpUC[N$޸drpHq/j5yt0g'ӃoEŨNϛEܱOY_&s@`xmNx,>קO6GomuԵ"‡0-&O'Vilwm}6$\) rN}o@HJ'_$}Ssb j?0󦸘>>=GJf>& #8oIkզRMkޮE\hCO8e4q<=z2x ^]88"DW^d]?sOBĈ\eH6e@gYS<M}~{Άv4.Rj%I نzIЦVH XJFlۜh}Xi}*_)n`C "#↓Qt7bz]NQ6g"I,G!M^ gVw9;N}n^PyHoB6E&'%megOKmW}*Tlrp]CPMbX_w1lïR{cAEi -;L1lajbMiK{&]ҙ—2,rI7RWSS^ڤA)HGQ78"KL'*ywc7.:NYX&l|}{6T)le?5Is]bhWR<$XڝpKJn- g1 z頕OF)_^i ҒVƤhBs P jDVMD&Nm9lr%JTjVSm䚒BťSQ5>bZףRFp`YBUPͮPKMLFDA, ,2:T-R i}|A )BgUR8AL{Z`RJ&F=[M%\麭]yN7isy)˪ &GtXijC*//XYwXZgGVad:seP 1,lBAkCد+;(Ɇ hhb\$måu4IѠ6C9MGTT)j;Z՚׷|O*eZ,etti9O5/Tˠ/M &XO?Ch?'N.F)0e^ʵLaUg!)O*"zj3^mIj0fgI]}n߻QY7_^mW_BvsemZ0 z_c'b򖳊cl[hc: 5k)1BT,Po!uw:ZQ epTs.yx0p'gMIrgdO7 $ ɥ(cm!IfJ@`jȺHfU })N3!T֓j=nJX0@q zvTzTwS|܎[q>( cIv3Rg)MJmWG'w!*mk]^Tmu/yj,$yB?dsQ7H %~Ck4q9a/Hg,=Rdq }ks6KǴs7g_ș^ן. Ue5vos/~Ê-+$cH,TPli%gigt*F~f|7퐄2K nvzu;Cgw`rN"M)6dhO.m:G2 CēC yD)i%JQ( CEI2g::!Et,SF<`UN2Dji(xCEՉ%kW؀oAN/ʒUu,|/I{`! #5bP%^DU5ik"Gٸj%0x$vl8%R4j,-# dtMT_!F)ȀDp$( E%bLZ" ! B(A Q?X0Q"N,O\ɂQTPk@+`<Ҡ'{9j;`EtF•GQ[ v'00 jd%\T}BRQMgԔȃ2#M U!;Z4B@̊X-Q#XB;X-2A 8 !b²GO暊!y0j@Y\AT;o۵sP4@dfɇQRU{A6YB"hnIs yu{cErAW6[UYEE4EyPD&p(sl='TpEk[)( )b+0*lt]O M0PF&)j-k\o uUn`ѠW潃U{b>a&a~Y0ٽ{ئYY *,UH֒@dTԌM; f9 C6c@!XC6 A9J0I/ATF,-f-]ҥb-62h p'Դ?ιSVY8Ovۻ6O{n 5) <@Dvd"U0O΃ '`qjwpF9SK 0*II K)}a[G8.Uu#e؟$ :`n b "@jJ +Qd%Au3 5i W!`M6Pj'2TR< z,t,aIlL,*&!S$#*lIV KD_}g MgzG:f6Fr%?Czy?"{{+yPl H#8z0xکJI(iqRSeN,5mK:z2[cp^Cŋ_=h:K#BH0g-aOa]R!r!!'ycA)5~D\QD*5"bc4?I)b~ bXlpaĈgb rOy۸ \'v+*X\MƄjxn>>4$8B wI2QV*A$ ڳ db(Z%Ads`faAI&*hq< K-TCC'2LdA 6G.f{NjpMW0and9Ҥ)e(^ loa$,v~DG H1_be?V-ZܪU_^#F>>4Df)̑gu| I맿+q<]v+@qo6{ imog_f%>i\^/NOto8giu>ϭ tv|+}r8E0Z$ulUm|F,Gar9:]V=_jYm*VWαg\}MfLOpB ט)OΒS< &]auVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVau֏a 4OjHZ4>,uŇѶQ}XmNv ޺>>>>>>>>>>>>>>>>>>>>>>>>a=56( UcG'M[kpPv k;o`ީxp=f~ (w38%>hq0or=˧sl,*L$kZC̾KѦwUZ_Tv2֚FzjoGs=ŷ-ne߿Fu], ks62<4Yeq??;`L3kճus#e_v@[|& aS2twv_0Y?Ҹh֊"?yxzNZd=l,= s0UjiCc!Y9Od[CL=y,W<8[ ׎sek=9Dr7Dϡ[|7ˋI[9W6"LsݯXf 17 QMh6--(fM+#Ô\rҠ{R Y_eku^ҐIYS3=N塇]#: {̑nFkxu{- ;n1gz;=_0O,&yי j(iH4væ+\M' Y"=w~ڵX4v6_Z&(CȚ\Ԓ>$TG:t6C5{wrXz(-z+sl_.~8Mׄ\A $:(S)mI;2ZٽW@W(\| 3Fۜ)[s&ďaoO3?jS4Ccq؄ͫ-0PzEQ.UjV]BXR콬Ahxְݵ/0cLNY6)%?:S&-`d6G* E}tX/=o046{Lf`,{>p>- ibv3hYG}teOcD !(Ǯˏ͵pu=) :lb|w>l%dy #Y X>׹Cs}ܛ%Md Zw5hЯ10;-weXcv,m?V?ͯt)z] O6AfޤalI:|w EeYaMgr[>.;Xv0'knl9_lmVgV}&q;RVG꜕Am{Qa8m~Ja_x!I뻔>Wl#+asGW|}T$fPn~~9S-9ԄȦS ?^iɘjŎsD@wÈΟwQtFXZaawO,c lHbt:6y{ɏgu1埶n7ƷW4H?zz٨/0&F|~n;G?z4.fe'>z2YM 5-jת~=v0ǻaŗ"enaG?z͋~.a(tW-% d|Vi}^k6f󗧳?[c [O5[y?jxȾ1ӧmH.WrUeZ(ǨhD Vɐnkwx[fc$(̄".2_ g*\Z3 F׹|uXMx)xczcNP߳a͆KP9G$@"J&־߱{t7{]9 ti+hV}Ӛe^˝K}'_԰PUuN,+T)О46TˎiĐmf. fdUfNU~^z(Z+f7-]koG+f1[vn=`,fD%sD&c/VbmQbHY2 YU֭s뜈 ]@MVƖz M=(Uм*ߢ$r*kg^|ڒD&xjM Lw.ޥנ _Ɠd2f4R>k(2VƒfLFadFFk$Lʃ@Ux/ Is'Wq 2Ю$;Tx8!+Nh)Ih[R%35Eó}vpR#qVZRQ)3>T?hpvoע ,ŻّMLs6cX\JMh$FC`Wa|mp}8EPeC v"Lk{ <.GJўC^ZFDuh*d0)KCz|cpfXH4=I w92EJV#ܧ{ȳ7^Oi0yy閳Y,. -|OId崤kƷ: ^|p.y?JUChȀBI ԡufx[i:&kM !G"e G2 .xͲ ]%dCHΰRrH"ӅG g/LR%$Vǣh xFxGU6- ꪍnkTxNJ9! FQ\$唁83h}U-b0ƣA!IVF@kBN*\k'Fut:쬹 ogq"!;o!-!E;`8cy!uWSI&ΣcCU R58kdl%".tLRHб75XK}rߌ!= &s@ȤD$nPX>e'B\m(ycPgZT*Q%E :\2[=Nc%#<IĒ0"22B;#GS5yy:[xޢ*&ԱZ$b$Cw$U' bQC l6HKʡ1$Ae"GF oUxB?CH NΙX"ZfQUv.p=ao}J4S`Gũ3hrU u;i"AfP;{4iTyD-RQPxV V-Nm*ZhcӇZGfcAsKa8s`tt2*8=̃g\v YKP\g-Q9ZڪhVr\z^HEqzRPUڮ<>Hj> s0:9h$V,_R6W,d%!3$1Hds5j|C^% 2JfWX͈I_Jk%%uU*Yvx,c9zW,J0W0p QhTwkGޕ'@PQW9$LM`\P句-/һZQ˝͆ h@k7YJReUhZ&Z#G.|!{`:i˄%i9S_63u1)(*ͯI2Ξ= Rd%\=#@ >+~5=zsD'DcZNZVpbƔ*?9n"R5@ӧ׽c8 *~C1ĩq6_pVTY_]?#6ON,_I2u1Y:Z(WiZM |=~:,b:x D'ҧy$bӓ ]t;lp斤( 9/uϒ;_>N5s55r ilh_j0t.3.aD8Kh9S0śOgJM;ԋ}uBXt8o8-ckshm?{lϱc(kYփ|hf% djPR3o-NiY1C?ȎʶPw%qݑ1ܑ[Zr"Lۺ}"I[;G[]֪fMx j͘e,Qe\]v\\cLPw7<ݺ$Y09;j$UƧc98=3SUr 55!dScCوϧϷ:nQHU2&ߍ2w씶sչ)kT-R)a%҇"Ϸ~桫>TQ@ g &'B_v:lz^([k)W'ղGuXwo|1o*8QV=j@+x۲$tJ@w$cGu1 k:쮊Z8-5$kmaX+ja jI pGMTZjg(IJl#ђ?HFΗ-:ΌB+ G.+d`[V'}V7dA{ ]K l4PsI$P5KqfLٕkL^lQ!M pE'r}z5w\'DLdL?r]b9`6'\{غX][=erbq51ҡ֒5r{5bGD|XkFnÄ́@u*o^V,2XUknǖ.ri (Jd g9Bs"qN2$/}QhGUT{1iFaC `hh UdBʣuݻvPkTӫ\>NtNua"T&E}d~-V[Va=,Ѱ9ؕc (P"D2F|ȉ.x MkW{B{0wP]R#@ƁSudFdc S˝͆ dK&KIBSeU[F6|}J"W|~$LDɃ@UJX/ c|=;M"LHfJ"*)(mA_k Edv8  .-Tm[ʇ}[2>MHȶ"%sW¯,ZL"GF@Jj-{W&9eh%Des@L!T 54+XXu.UTS'J ų6+\ +ZLA vq#gULѵv&.gY_]!.gdCI2r%{v~u&ՋNO_m(R?[6OEL'tx@@UK|586_S!p/[ `T~5釋boO]20𝥝3INSb@}2ynUϵپsn| ;ڃ&J]ci(\ݶەl1X.9~kB"\w%e 98єe4@-%K*,x&&鸱l}wUObԦox)g(tqm>-q窻<:?Y:C]|E4rIU&Oi6N9! N+Leȵ _|n-דizo姃ޝ6X_Ջx?w>rЩ+^]w8O!\]^Ys;?/}I=Ϯ(A1ՇgoyQu'W *my9؉R< п`tYZ/{?L6EN&'K$uf=ۘ&jajY&X~r!xW-)'RPc0E}jOF6 C.8`AV~҉{yfH2@fe<*)hmF"H44*RIEv8f"3BQ]WJ\Ej\S'ߌY@?+tJ!1Ys)8BF?*?ޟa"}؁O>+x g\H.& vi} {jilj|~~}w,Hn7BeRJp{\uqI’k~UFr ?UT:NunY1R.(X|9w`3s}?BU}+Z}PA#ud}ՊWf9M+o BˊIe&j:kJuz~v?k$X=Ԁoʀt|=wqh= s[^nٜ8rp}q[OE7sȌAP,~PRY X2v1m=pe~7w~:gI띙?huvyGkSb=Q`t3}3pB=T[ykx|=olzaFU j5;PQ_q'%I=o;Kp0{NoeQ;}M~ͽ]tV{Ri^տ|qY}~Wn9s8w!1zh1r= 7ŋTZGq|* 8C*Rloi7gm29mKZ#P s*~snTR!@ Bf9Vg)w# }xf\mmn3>-a !IBgD}0CR[`T#"=s͕\Z$I!!'eKDyZ/ZBO(e289|(Mk.R.M۷:` >)Zm &%HQ朚GU^?^l͌~Ɏf>ڳjeN5`]qRI^cBQ$aѐE(Y=yC-Nl ]-mdy:pz3a|4pW]-_tO!ߩ:k"=sq tkҜM̱"Iu6$Ik'n6ZAB޷b.S Sj֜Kynr[Rɍjۓ„ mbDu26]kk|٨WUmRaܚ0JL!N[lv2[Tq@C_SH00M6 BHDJdjG(Yٺ~0z*sF}%Bn@3_6>_,]Q?QuswGi=[qj=[mfbD(nqǎِ\~6Ɛh l|lR$]GnegW (XǓ)BkNʙr^)kg<>Dmfw|`/ Gxi=lMMuZ$&A%-jyǞy "/ =6y|VGU3XWThB-zf[y@^^g+y 묖lj48J@O|dFYjKͱݻs,H v3t FkZЋX{ի]GsqlE\7vYhes¯ӳf9Y/JǓԸ37V-ND9xd.AlHI:~^,rPzҟآ0c4~ȗ`{]wɍ!ԃ-j~~u.Df)zM&_kr GpZ 38usu6sbarZ\V(VP7R.,jk4ĵW5͛%A(=Q.6L3N+ْKDFYJL91t(>r4KGDbT粥gm=}te~|ڼ-R Ms<,ZFΊdךW} y7O7Oo`Cr+F43{GNbO!CBlst]K ʗ-Ae[r*ɳO[%"W`:ڤ49Rʼni<-L?(Gno垎@m n3刘WBg |ln=zoOdũbxA'^ՙË@4DsDctOk/,%lYasm=G7KxJı'A-O<КLߦPGqBlp71R% *Lkkjd%9'=qRwhA,Im14RcV\`.XZQj-Ho6;c-B#RFMɆ|SVG =V;+=)[g '~1_LdG^HCE?NbTZeEJO+Yѫ(G>h>O˫̋dR@ HKͱRmkG* TK\s%Z*s^ͅ&q=Hb8: 0pS8 CJFR&2X&I ΦB(n"%|9ˤK/+ʺ +FruOlkO,K 祲d?ے|*7+ADubbMjB-a,~jrheD|K{טY?~RG6lz1qXUb)PhZUIJI'5cMm֑]6jjՙՖя㱓 <{nn[ūOqva wp܍ ]|B'M`#xK6MܓGۼMv7PʿM;gW{LCxs.+zK*`̑`Nb&Yf}/ͣj•Bq^'Vm& %7{8)s`Ħ]5TjJٵXz%Ԡw彍# F2ٙ4bFҒeA{^aY%\?-Tw;-t{-o&PP"0'+;RIj.8kfQj-ܳ#"&w_:=)04Լꅛ!7TX 3M^L}`0@a Pv9 6X2y6j%ȑi>d  !Ey5͵KsxvW<;Ye 'e;|ar8$bOaajt^5ZO?T5}f&>.afY>&+zK? #=,9hy2-*KZ"8Vj6FTVٳ"1X$`<+.R,0 qX>x,ƅkqGIx(}9/Uw:&/x'xwOMW=rL?' yM+/U޽<~ǔK,*kw9g²b¹om=BNCN9 +K Fz"zڄjKvKv[n[Fy#τG8FCC3]_|W;HasfmTHf)kbm9P$VR`}w(m$Uf8KUHpw ݠX4 !r%R:%E]M&7+E7U0e~3))ǏPg9̧#B}j|RMwREb/s&̫f"<"lW@Y ja!tD/[RQcK3GN@"2 tcOбAgVnǵ蒶0ƚ>MP3x&d ڕr? l(/m=lmWܹ _kAzXMꙍ=J{6 ~ [_Jhw;8Ì#.8!/bn y{~W|f5WCpu@oaaM!o" ~h~kfWsE%d޿ ]s|"݊>Hym3QCU wC=`xˀR ՁqT궋k62V4J:E&uLYŹQ A>T FF Amftpҷ< $pr\d߳| e?ͤ.}V)Qr|N'᠊lmăc|h~?H398߅e0i1A5LQ?]fCOgwH,f)}-|̀ͽK3 4Ct]F>SpV]G)y@2S`T$*Fqe֗ŵl0<egC"*.j BQNLY~jqsY[*fkR 'Rbդ13㾜 kan&cF|Q_o6N{%]Q"vےb I1͵9ŸVpx$`AR*'xꤘpI[y%/f-ef!nJ!`K |kB§*fIܰU,d 9,eLIm+~UxcJ_M+f`;sFbjL"3;w^!G5i $()lRH`08zƴRFc&$ ߅W8:oDzvw{MԃtzC\ @wj1jPN1bhɕ{6mAHyDVQFP)o( qsJ(;)e 2rBS$R:ܣMY```KRF5 @PA /"Ko49 r|]dmQ %Xs[w/cGܥ%~/Fe) 018VI3`+Hvu^7r^q=W*Wi)q y"1%Jp |wĨ.fޕkDuw뛺DM:=?% 1bn-W\2sEl}(MK3Nq4.ء>6"6Vҡ܊B6 ܦ:0L`iX&)WWv]ȭpEyz1hDIH7 (mrCAily3dYe;"+F.OpqDŽYjB[K!bͥc4LAШ12Ns'S NX`DA߇(J@9>F+HȌ 8\bL`7 Гql; jԿ$Bw!'Q(X!0pu3 X(>RGhJ̸Q>* Gs{`˅0*E$*ZlDpDyű N A#& -#heJ @P %p)9bRBiSRB!WTtZzH%nuVG1L7Y~٩Qwq0ISUYYHNp\2]9ga\<Ճ^q}}Tia;w^_6U;woHw(GFTw'!ɮ!}vsI\?xrǐ6ub9Q4xPLM=*?dqVp~zf{P>(tIyw3Ր2eW 08궔HSGsY` O5UƛbmfỎNJHƝOV;R #Ll^UH;eG>cvE;o{R8FP3!1Īi1~KD/ f9L*.׌Ӵ2xәSenE;Nw'[zy^v2+]MR=ihYu &`HFݯ+V!"LjQ$ajT_bz)r]qG~Ξ#\^BM2nBQl2QTdx[|]3S7[W"-&nUCVYqy_|p#/Jy%Rpe"_x[T~NZbEYNjWUK |~0~cZNl+ٺB*u@V3;IB ~]Y+XiRqaYyX9d,Ⱥ4ŝp+qT銦el0,L(lZ]Nŧf0jǬYU9. nvar| 8-:C(/EJ(SrS7G㸎F}lw^S U+3@$Ŕo⅟Ѻ\ +\~lS?h&3DɸLw2qBBH%/\V8[y.KX%R޼,A\(K1;[ 1; dr8`6&Hd-Y-{$6%;^t8rwXUX̦ 'B[vB^i70f%+(ETq0]̒I |mp5!]u3d)~np+Sl$ Ky붬˧k<$O| -0t!~[ᢃ6^1/#\G%Ul5&@,{Nh͠c QRÛ$oV6:,iD48l!m;=?BoeS$hT#6;<0pFx1n;sSRpmߟ76rIӍ/,0bch%j#5GGI*3#’q NaPDSL@-afx}Zo @:U6;>ͬ*tk` (w4\I綪%f@0Y"0P3ɽRN[Hqr?ٵ2bu)ƺ~Kۄ_6W8aKXS5 P,ׄ1e q z鲽8ЏBlJEP&3(mnFyЛ_k] IDqwE%1̨X+ˆ̪䄳V7%3@ݳ̩ 0bg#p0m͂C'^L}KV6Y܇Tfւ&ш5Jz3 ttb;hRȇ&聴&J) "VKO1CA#S,i>hΟ4%`B}m;x{F,#h E92&/؞y zF%/1}4W"03$y)AֵtN<[K xY`L&]ݢvO͙F0Zx]l<5gw3wihC.4]XdJB]8^]&y@P<ߧ 'D2IdW=SRǦT%}̧tzQJUӟ4 %+>PrL4A% EXDdXQJj COwŜ s_E9pRj< @'m|(վH`K\4'F FBvTMXC/3\nƈz$ ,-)OWW#u/-iՙ:;ְ :νwۤIuVlPh;I"T$iʫB M}̗ʖRAZ \Ogi'|yt%$]K%i0D UD-Q'a#x44xK<A2t/ӟ֯>Q%V_>Czg)JqLL˫2wx:ӀEwxz1l曓a$u6,Nt"wī淩;=G}}1Q藲tSl 泖@8|9R.iA.+O#fٖ[!mւ|Y짟oJ;4hvvn4iJȘbr:OI,Q)i'jj 4&{Vr}gd8 i^q+uDt.&h1oލd6.FUkǣ .<SkFԲKpjabѪBSbR16p #svi}vh? [/U/,#zK6-sZCI@$|<5YW*X|MI#! tc'?vntbG~;9WA 03(FGgI9ނJWteo.Ζ=Άm8atI^3H'rB )H (cq<~7+6ҐV-GEOʒ02D)4W-9+^MFyjd%rS/Y_joR|{=e){Okj}瓳O3etNi.~|/O4)qH< 3eivv^!_]wݪ;;e^N.4wN*jNh=j5 VK&EǼ~`qĶ"uM͕ݦs~o Z³wRP*UK.Ei$H 7!e*jm= jLa0ըWC;Clb{U^]ε\-hEt`L:[i I\Ca;~I&VAnǃwCns7xOa>n p4Y6EcRsgJ[Q|R(#P:~gD@['G6uݛ ZJ8[ j3yzUuLe ƺZE A1Z!s D!̽!)A{Cu1o IH*IBe,/F$V,jrYDBN~ UOKnpeH Q KS r1^ 4%k39U%E5_⾑׹':|$PG1ȶ Ϲ%&$h4:[1$>IeirJڛJɷ" Ww</~xl&1dCOdh: .aZ:8gLʑ|>TU}JKC7!4$LO~J&,& 1'ZjQ1 K /B6E4vN{r#ǿdbm>s(jxHÞ&o++Bܙ{,E9`j\ GZEq^۷˦?u(1MDުizFX:KjURֻ/A%4JA?ȶ$a]dzvYڕ)_.QcU;hv:l~e]'V$Lv{y9:H<`w7l3a9o84G'lM!exIxX~RnR*U5)NR>%R^օ 굀/ʻ>"j͖RlFoI 7c~m _=`qb&;HnY^2)\D/<l.4H =QsE)jUx+ZxxO<Gp6WԦ,$ڰ8Q%_hIYR7k]<;6O;l^ \UuL_tz̔Ssq+#/̦\FUl-)̠4C-/"o͟?%~&SSs?pǩ9` :n(Z?oJpmxFKOo}Smq]':LR=:qt,ZW9Wliejγ&$V܉vc.DHBq^j*5'V#L԰F=4@$@=W;0GIM_ͩ}G'(Ԡdv9[gr9]9>Tir.m`feeGu4-\QmRk3E7QCע_]R b;AV7 Bu2s!ewP)֑i3ӴbdQ+ N |E`kr(px+ZV;ZUPؕZ<{fo╟JjH"H9(e4#Vt弬N(1M:ުif+zު󹶚RSĞ!Ħް;L" Iu2*8lhjF%*:ɣ;I37l5Is/]˚/򴶡"E7[u'B7$҃~!A,}TRLH[.H%xcstrN( Ow;Ƙ۶q FR놳KQ]˺񟓦w^TCVC#mL7if5Dцrr K'F'W{֩DdB0ĦQZZ4da4ctU3dH/ tyTb9q9g'O2_3g?q5)-, iՓF!K'8{ZSQl@9;u}9pU>x2'i%#BhfY'4| ؃/`CDT}IaUq`弌j̚OP`JaWw{)%YeL LnMX]y\u_.{+Kl"IIYZֹ Z/HO-hܖ9U{+ZuqZM#_Þ!DM8M֧E-F׿=LdRn({zu6I̲ñ5_6uTx2vznTtujn_)4'ן !j,6;1b\ FԀx# 7/+׿wW=8磍ht[{ ~(_oVO_Kj8kLeG"}F*>mX<ӝNުI;߮.,bN1d^'K tkgWfv1=sdfy -}"5sf{,`*~"Z=·|>Z< "`FODf$VDSXECT" "b8gDG)<AxP …S^q~/g Mq r1FyJ-к)+9a=GXD흢pbnI ( 1Ծz|` A?8%PL&zdž9Vty)s !! 2qRҊ u$K|a5 $=P{cMʎ/t}y9w5 e2Ao?='1) t~Qh՚5Ɩ*&6 isaRh~,{qD7&K(qIQ9ҘMXȪr$v· 1-- :C_͆>hA=!HcXۃ?H1TX_Pgh8:{&%nLDvKci?xp[ ;NR&dC!I<'L-q-DA'5(<7*)p{k!K8dY_Zqж>k!=Lh> [/yLYr>焦W6!m|T#J9JYϾt8œ+:.I?0)%gA1#i =PTIz}.y'<%pG$kf$\{\k|e'd_p5#9]5чf({o:4j(,,rl7 1ԘC_ eP򝓗JBS<.[kbWB#F<, ,8u{״0~]v59RnПt 9O7D߰&~[9@#G?[Mgci~xsF- 6ꞝࠎs]$pMılk5T[c-)֬H nw׋m\xtNύhܖl6 G6ޔ;^~US!P.ųCү71c{V5):4%55&`I~$h/ъW꺀/A4)Qr#~G6I VxKxHsϝbY"bFu5gYhA@34bU* ᬲ: S9TZ$T@<G6"}^b-",};\pPtɆbw`PA@2cCqϯ"Z/K>4i!5UbI)$Ah㹏! 2H$RrП"B2 zH~ѕ?CNl@7τn CHgÄ,ielz}3`Deln>e+I)-*2BA#R,|$*U@ Qʓt9=gWQWb){|/g}=Cg τAFp/-*[HӁ^iL91҅RGWYf`g.`ޟZߌr܊4o^_\=cu S|v$ƿ!c'~@oAD-a|?;zC$̾N"}_DS V/_N6ZӴȁ,*sO'dž]_湂J ڊSBV"?;'w_}%%9*=?}voskЭϮB}tzk;">/T|ef`aO~oOm0+9{zf;$2ZɻGGŃdoF念~RjowTHHGu5iWQ4Nj[:md|0h֨$`: fT $r0A3=c T^IFGJ?_֨-IE%wye;㷣$rPKaZj왰>?+9~y=E6} 1"}|DA]w"ϯCb:ϩ\l!il:Ka%Wk2>]^wtӝCYa3|^<୍VHP2Z<Q:$ښʏ!ۘ]LIj164L<[thCLvp1"5'jDž~l`b4LDqo˹*$5幟#Z̢i>mUu[]ڃ=Y[VSNh)8P#3;'Y -oU5zˤ͐MuAu` ei1cjѻHUNc Iԃ굩^lw*&,6-ۙ2P]/.Md)CzcO]t Z )&sCa`nZMk~&AF{>\P׫GF4IlSضm36Ụ4P\RDmcL)֝i-$I[@kIΡ08 RK('"ɖ4ҿ=Ӈp\|Zz[<0umMLhͦ`#bSD&LH_Yϙ<ϳi; 'LngmiuU^V kJɭ̠rOɽ6Z! 3r@;^f%E"a 7 x l1u+Ա;t΀tUNxwuEKZ.ps4рɆaM1Ȣ!j97l##=2Vn[Rv67mp &eNxW,6;.|9H}7*n>_wrok&d|řKq>6Ygij׍ίͭ%SJ4ɯ3}Ukӷ>YZؑfečDr<]4{8 5;k2&t&U^1s M7';fV`zqq=9݂nAJ3 OnQDsRhdO^DV"bP%E˨'>MԧhVm m$hqmקXc=;4 F22ӷgl!O[ݤ wR92p&4R EtNNNDZΰ)_£ Nyh LA6%cf(j}vU2Bx\Թ"a([t[8ŐmSgln ɿR`'PB=B@TXx @?n:)* mi_\p+r5e@i.4܄t(?c*6*E;n)fÍ > \͈(8B¹}2 C) pj"mUrpG&d#kBL X|&u`$ڰN?Xmx|V.)Dn@>e UCoR7lib`3 شc&&T&bK"'FqXk I,XiP<Խ sJ. k ̄C_cf`!|$J(AYKjJRz-qR2O5旞HUB%+`|sa?4Q{/7`UhBRS;q3 1)[-/=) 3&=gcfK2WH0Z P/K눗lkU\#r 6A&"qV:$U fD6qV叇ι_ѦX5g R[3h)*ՔpVJgl7`hk oKhR1tJMCSD(e *Qf]J͟gjU^\rדcVpIpVCuh\&kP`/i&D[CgsJv_AL3SN!+ "6IMcڤ d >ED7sklPeqW*sziz{^]ܖ4bX;tބ\ V8'OR0p0>1qn o~r i 4QLSB#O>IERa9;\pW?$Dh6D3@E8Xg\ICq4 Z};0Iv k J[9aA_5VTo. $f 7JµE̼eiBl7<4VG>Iƥ.vlK[kaGWG: X*{(mbD5^k:#ibhTͼ)gϵ+a<~$dq 7)3GwTqLW)ds'sQB_NBU5|gۮM+(.( P:CnEqxѿNNIYuLH.5 [ ]$ 8U$%3 lb܆ Q2ڽ_Lb|9\{&r̰9>,[0;S-ja7Qw>2eQMȯ;0pm-j}m@ߏ;]΅a,Fq4O}Fe*8pwqB>Տ"1,DE+&rVY ɡ, %^9b ǝ8uN+h/N.x>a78%K%ύ2Z-)uk6@tK"8wf \}ZﳶwQe61KWiZLzʲFNCb1.|ZrGݰp7aJ",-)Pv-Y/%!6qUd#Ƶza*q)mFC]}(8y VE?SgmHr!bl`;WZjT+ë4_F,|"$^=,wUԅUAIA2jsܗ~U$Ӗ_zIDZHZSQ$*Xzod' W"M[=˄@-ELm1mIݪ`=UM\(9hWuQ 7զXHjWdMOu! nDgW@r rog7o_gϟm4?N v}O%]|p{LCW׻~=v|zOnCG)w7lGc|uD4 Jnx/^&||sp_"gj쎚{:qP+?~QA1 ?~5˹{P臓诿\5b+^?yPPߞmz~S~"~ ,> +>Tz?~~n<y}g9Xw~}~_-O<ȾgLo~Z|! +w̻ n9RUAy?-k'h욿;` wb+p__>}vݗ沾^~i__os!׹G%jiT}ygH$YJDp{ۛ[8$vskY|rgsнOcqgpW(I]sMEJI/ _[%ʉFtG*{iNଳItNes;"]\g˻-dPxhkjWbvDjk֛R/¡ O8ߏPiAM2|{pnT˩TMƋHz>&X„ǽ$"ȑZA&s IPy|c܂"IKt2%̵7ÁǭՑfo9HN%C\qYBypiMsQkRNğݝtm.߂ZP؜Ή\v+RQB%[(*2>%>b쮊Bi"k/EF6<6Ёm6`&|$iO2Gx\eIIƨRqVIQBe'ZJ!,|Z[Ë HTK.9 w>'tu3 c @ OǧQ*6((Dpf #T-:bpVx[%'Nn- gS]b!Fe9I}̢?t: #@L ,BN rWnQA SOh>gVIM=[bI *Rqm?T ^rXIu;nld "E 1r*DHXPU]aǍ͙ƒRA/=cޕP$%ƭg=9)tSqiKc]IAθH2i bFZ@(G=3 +ni,r-IGm&0RGOʙGNj dP#5VMcSM(es&)Auكg( |UW؎dxo|s ./7]y̐5:PFf+H&wbYe$Atq7 4mOab@BճZb1:Oq_9n2IYU#r@-*>:2}pAzkSEp0fGvZa@26 t;F^ xd4 dTw蚽+w  %-Sc&!H6%+غqC{ط1I֖#F y$f?lLFqU?Ccd$ylP#tt<+oJ-vCVkyQ!V2 vבyj;ݖt./ԖSxpn"wZ(Nh\]Rh cAIS2rBi <%84ICR! sGkuqm'R4!_rtdt慚Mnvu~|2Ria/X1Ip?jseWȳZ\Nvq1ny?y7ܼ>wm+?蝚/ t@?6m_#'Xw(:J'i)qw ґ9p|(o|w/1oOU[jqټiq>߾aeIhFNFq>~kYj[ΰ=#lۚBPp~m\u-kNTھ*P1 x$۞'y6>.pXi9,|JtrXma"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,ⰾs3ωR;|8,@6>v?<ca9,]"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,ⰾpRωpXZ! O 8,JrX[aqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEwQiqXZgaupXFjOqXD S'"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,ap&MZipkn(hg{۝J0JӫsXӒ}i/s jtyӕ4_pEC80oz]rֳHƛbѿZMluO:\]~r/~]b)7g[#aߜ4cG]V 2qՓuqrwxCT<$|)a7dw_pqIoOӣRǸ EMuVW_|ӣfݑ﷔e=4zG!xZb[c,NT?tԩӣ[O={`߹2zeq.jCkvj5Bqzպx^0ޱ>(3 I cW)|1?^>H|ov qj 3=J6-]OJ+>bkkr6EӂsJ?]o8fY ~ _u/X3xݞv ӯR;?e}O/Q[Ɖ[12ME~ckˣ;*-ܠߺu9!{{=w}u]${>`y hdK$y4 [d/ee ; IM'NRWZ@~穫a=7,\--hHKjh=Mjuڿv}ͳz-xkטOh+ X8Ys.Q5*9٥V[m/pfc.0?KsÉ Ru7GOm.7ZmA j1enjewiJDGK|zˇO_vZ+p/Ԣ1^fȭ/歌u$g8kݢ:F%2T>/~pe9_./n0C~`H b5*X k&|_86x". >ftA]ۡO^aw8mDžOڴ g!f27O㒯η @|O^gۧs^vҿlv}. tFF~6Sc4./_Y(io^jJ⨺x;Ia.S_(?褖H㏺'L2j0aRw:p.VD9FJ1rP*VY+(֬6kJ2koMlg試p< Ƙqp}GmA.2D8{Fgiwwq2ܾm:&דwHѬ]Qw75xﭱ\D -E?/>P{> >Xض偛o^_tMk(z&$v5Kweؠƈ]#PKZ; PTA*؆\|RZߥ9VXR%}ikɧFwةu(ɚW%0*ccs\A ytt=/ 07U l\2$UGS*uE7F~oT^ya F6 0PWh%}eHO,ClE3K ((!+}%bcXQsjM텕l`|}*z ]$l֠p8_A ל2֒mlͺqfU'H0P)pLLڊ*]26mO-·X Vy\DŰ'<rU>G=QPCPEgp5 Bఖ2y5=BѰ~,* "@(%Jƻ&b]ӇZ|-~KYٲЬƔhr`F1'*c 'Ns|w=$[(%z,\L maU~/*9.knrlYuh5sSU:VYIS|$=^aҗG˕GgKxi).|t7?݆,:G]ϯjwoT۷'Vnޥ׿oEdT > f!*'nmx9['Z=€v6_ .zed?M_[Cw}ړvjjׯZ ~Sa|WBVC6~qC?:) W;6XnZz̍2TczZa=I9jm#SDx%a{HW! RaM ŭ,H-I٫ z/Ѥь$j36,UuuSy-ڥn60#>ސ0r ,{mD᡹z DK,⼓YҺ/j `7]$Zmvĥ-5Km^Sfʄx' @Ч^jo5@z3ҟ"{U17O_(<_D ms瘏7}͑ewm$l{ n5$sMF%E>_|ßǏq3nL>cHařیG1lE땷QR{Hr5Z:ݗ{M%›sOxO%jC4UE&*yuقgG2ǜv "ϵ}Y#"qxEa1s*s" Zx;gT1gfQT iH&AaҀ@AZ*gP @!}k֌؊~5JVa>RC f8Y:r7\ Whp!2bU8p={Yl\?oV%NJ6/J1w;-9h0-󮜅ANDal`F|?ژI! B$y/V grj+5#}Vng]'7(l,@;3G 7A͹f7 U>iep̧8#dL~Iy*ۍy"tW3_xqe\H8IhL Uk](T)V[3Ʒ}wK8nÝjuiF^.{[3Gϧl$4۔-!VN_!*u3~ϦS!B1%YpD:HJQR4%N ZrZ@]+KT ǿ9"d!.X~WK$1S F3deD`O|90cgB8R6o0S0:7:m-e)^rjo"mp1O=Z eUP$&ZfhݭYY `b%:Jz"Af %fWՁ-eo^MwhVu TI:U*E}ٛpLׂkOwY&tQ̾lKu"#yNpȎ4ݕh[h2EV1ⵇ\}θf9opjwS9ڄ!Th\@E)3%tye`>_$T; n1?`!nUꄺ7' ( զ\VUǒ[p(F16(1up]ך?vWHJiwYX}ˏȚ(J9 B"קt*MsYœGIz]76"n7~8߿9w~\%AM>P+`ߍ?ᄏ7' bHDhuz5CIЯlbӂ4Lid'ko7'uf7' 9i=[s;~Jo8eH9|Kةn|QHp>|8O0U)?(]Ynf0\x?TMOÂQ=5GY}bM*6~IYo_xvG {jp} B߹Fs@#?N&T!ps.r}{ʏ ]l GXt<T";>w%"\\[]vNLo$ LHkBΆL#W^KB5΂Z{gb9`}.\2AH-MR@Z"aVxc `#+S.ESvK`z}kd,uA "8-l#A@" 8ʲ R]tx^ic5NQۺGp /;^Qy #e2 Etx-BE<>, T#%4ssg|F qh~aGaŕ"(Ep*f Q%ul% fbN:Lgu x:R Ȝc& WrtdbDqfiagf+;)рC}R`e&͔z'2(PE6…)/cexcH)lY 4B(*{;κӔHA*+s8_(ΐdV 磣SaRTR1)$Զ АTYx~mp0Nדx>{0W؄SNӑăZ Tˀ0c}\z$ءdŰ["5hAFf4d< L .P7+=L*FsO):m5Hj/[O9 TED['qN˨T%^G9(:הjO>$`n>̄ 4 %)DQ㓦lȚJmmAm4WiLef@xz&|&|$<@T6 Lm 9xX qS ny1hx:=KbY 3@`li\ k u)BK`-l]j'BpN)3V &Wuz庙X^m<~ہSf T!?^O*K3L:[` sX߷ZT(-is`>||B)s^8zJ*ܿ4_+M 'ṖR:Vo:&'i; {jNQa{TUHwθCm`} OFh_JT3էS2>\lJF,-9JF =3tԎJVWnZq( 82 'U,k?..kha݁ Knf­9\4 xOи|FF8E2+{WGJ=,`# z3Y`= ;m4x%mK*ud=T*YG(Mإ,& 2>F1ߎEw;ʏi{.SM[gϛogy}Er%{:FrwN:tU?:}5,o Rwz3u,Id@WN\jж򬕿YkkL[MK "4&Ƙ@:A҇2)=`L#|urY0d _ RM66%nRAS!Uj/N׵$ŸWF>67 >:y19kŵ%Ó-w&Aʺ#M [/O&\ԫLy Gv]tJxvE4#EGX]t8D<[dru}y3\XUi䖿1˂4clŁG7d1!2DD(}``+$VqW2n }ңujfDτ,cyBQu_t)u'`GFH$b]Z9+jVN9b䷓>V=Jq3I'oNv,py緓yKU2nEvoB?ǤL'=ÉnM -yJ[oY7y7'oL|\^Ϝӽ|1WϤGmn->ʽ bV{vͧdz~! |mËez{']mU]?>}w!#=e|!WwO-FH9}iENl%e6&si1u:$]$Ȫ5ZK*bg!$ $<,EKu%h ΎvK!u{RhN'!@YE+RXR`]ehj,no}Ũ*P:sޡJ誡L2E^ \UEUua>S0 'L/0~IdQ&ж`JdrTd0gEq& &rX1RU!u,CC0=]ߎ^fJa4x,,w,3xPte?N~ҌI󶬋]S3q.~0 pOq2}A})C%A?ڀ<ֶo fimV8-WZZ-u-Hѕ&ͺ&]fsHk<ܻ~M'uƵQ N6rIW:VͿt{su=Kʳ}ud#Y<;ZӜ" 2|֟/=kH}x?,HF +-lO/bGpݫ:bp|Ffļ(\f?~?W=yg|ϵw4=Xe=;OֲQ=vNM|Δ\1Z?]:䵕~JQ]NsO'i?Xs$r|6: ؤbbOUZ> Qp8A@٭pb=if$.9bD&E';椘,F#frш "a=տnw/h`q=~Yj=b1*kjbDyH~HVBK.d>rXIA-` QMЅP9.mCR^ `mp1ι"k!oҹQkC4IQY;8[,%WELrlv)5t^ٌ(P9O[['OE;;1J:v,xsAX\g,3:k̈-[8?[SWי#°6TR`d&AS, 9_6B&4>XTd"ިZOU!P5%C*veكfZj7 K癐`0gS;7<׿|&9herz8ldBmR fqk9e5aF Ʋ9$gPM5RF(6%LsGT3.t̬e s ]5Bg.=eKZyw>Qjg *5Ů8jn*IOR򃞠 #>vS2DD1dT`I|)ǘ5zUXR J=q6Fu%yq-@7>}թ,D1@ "E |=x! zhмhܲtB )fhֈ;;`]t>uLa 1G+&]ɕKkQzmU):y XAAN)A6(9@lERX<%XM fU  Fe т@&Uѭh\@#IKFups O{EQ>kKV䮅Y-zwf{?U|q]wa0hiwGWNj ŵԬz_5팶gՏPrj CݙcT\G( ɥ|!9J'v6۩ı* kz4l/?/NSuC5Bp/pV+X:8|EE 6rD`]JAa=yccVs1Dt![3mm)`m4t]uO^._PnCvRiZ;$[vYWZ[ؼƖU5uQT%h0 zq$-jkL'U$ye2kȒ#.KF0l)dוL琜J VUvYU*+&XВ4Xwn)IABB#'ڌ\u6qQ) k./j T086Q[`[R>{IBK卑ɉ$I¯aG>0b4O OIDɢۿ~#IEۢ,)[S@-U/S$3U[)a>jD0!IF_jh2񤱎5c7 Fk[ĢlJImMRlh[[dQ a<?p.z&"OU3zLѵV)zF0*i(6Gϗ{Ҋ ~֠%bBo9[v注Gy_pur m+c15N~H׃{~:-kꢴs~o_~@խ fgYO6Mx"7Fhgfu8ެٷk>τe{Q' HAF1Id=Ի7O_) +gۼ#IF}(ǘ,!ScwlpRe6;UETב^ 1(+EqUPykTѐ,1J֢0./TL"E_p+''_3hQx{D˭%?|c*BJ^+ (UJB jL˸e2i^}!3Bmu>ʃa)VgE# KtmȘa/ PAPуf)L'\jwNBUejs_rQie:Wm Jy1SV%$PLFj=lŰ}| Cؒ9.?4 ƙaɻ HJy3bNDI$;`c 4FQp8gVHrt]?q2Ԇ\ 8&Z,@(XP)墌g`N|L"oF /pܑѣJt?gd ㉟~h?d䪭F2ʄIi ^LJPtɇ蓨Yw5&/eqPB\؃z\.O^т"sS:US=" 3d۸t5o}llPd2RrhmsƐdu;.3tc sÆ\P *qitwfМ5tȴc>]xo\e 1LlhC~Vp&EbGE̹/a~C`z/h2h2\\i+3!*](y+0l<-ULwB>zOD0xOȞYѨ;5mnjegn>~8a{H_3p_A]N xj &P"ן/mh~d)z,EoDscFQ  C Η>3WzhQ9Wmᆓ."P|Gz߻%;`Kvh!8L&S/LbrU{cg|V*3MbFS\B[;0&ZVX˛>HfzowiIzcc6(蕁g}m:(>Zl͚O"OCy(֐I 8AJVTe5y3[r "\)v!GZv zDSh'z|}x60U Z}StQ`c XSCTsI>C'3[ol zo| -sށهZ-IJ  B,5"$g/:VM痮'~T!Wko=]BX$wh _2xahdrO;z,Bkߤ]|.nÚRyꙏƺ)e!Gh6:EtNI6)C 'ol,߃B;!cAWPh2*хowWJ 5ڒVŰݳZJ`mk"lXNΉOq5DRbHQQ_Ķ 2%8ጲ36=}Z./r(>z]X#_hG,EX|i%FdZGW=ԼDF;JRV2Or(T. b=bꥠJ^!^TE0IKpTi0hEB +?qЩXX{EmB"_s[Fɀ&ʸh+ck/k (LmR7uqQ#BiIZ[*pged )cW~qU:%"@F+jzX+;Ly4JTVgBV2 EJYbVRTDb]!Lˁa-3t*iI]&&aYfW\T \t Psb=eNqힲU/ZF@@R`PEǺvt[.1TYɭާn3!\Xm4t Y(, OJ:U'o=1(ᵒv ]3CA&a2f)L{7k"] dlbTJۧNy?]K;쥲V8Hf@J2f,RH"*vqNF@ChbĔ3(^g->QP4"ջ]jYeP!km#W1y)E`v}g۱d'-.Ve bUWTŊ*lZx1m:Xc{ /JL>q+^a0ߙ͆Gb(0aA)bcA9mwn7??|]ȯY>X"Eno;Kdϊ b{sR]I2GuTw߻_h_.Ki- .0Ee#]n޽Ǹ^N=w-^٠8{y1Zj'Jghv:bቴk^hWڏ7{/|t05kWAZs^Rf rr|ܔN^hLMeÆNRmMW$i=%* ]a>gǛ_ߝ].k:gKeN vGQ[w&J/i,**,J (.R*5 leMreZ괱o2NE\@GDTy®g#2fM/&2v#gXm&e]=\\0oS읱Dk++Ȗ S!\%ؕwRԕo"b;q{xW@jzNWLr-W[WOH*#E:&c"u-a1U|̞{9ЗB  Cg;`^&rA)iKn$C\uu z:ԀTu_E5 _1%j_"$k2JՌ XU ȕ0L.V:6}lILdt-#UqO&!RagKE5Xe(eY'ke%" ^8)ܫTtZR̟2ٔD%'HOh(dHk.s_DުT$xkTpJƴ$s>y_5!5J2U`=t >Vx0^%iTtG4UYt:yRLg&O;%P}(}ۋުdtd򖮯 5y->*gS`X6H$HZ3Eٕ.#l#3oYN=jFV-fJLTo"5dݒjeӂԱvo<.OG>:k6zCɓ$dy2k{ ~F,)EXX*{Ql&b֑ j$/t\tl OQz?+`k/ـͻifKRNX̲H)lY 1":{.FTd H[uqӺO*Wy7+G)-l'̗7SӉ//G_f>R/9͝5~26{5kU|w>w|@m>A9,uo̞o .ڴ1X[200U}?fjkI~={XNYs@,r$h[_^̍l;/w8ۋOZ] d ¾J̆j-Lš/^L`R#j[Bu`51^m4eBQNiO;,6قNx?4]6XP5P{se`ȇxfeUEv Sޒ(Z,PѨJmыٰ egXrK(;6kaS3 kg8w'cㅿ\jR}f@\Q $֘mpu8w(sq+ Ǔn=fx&k % YOյɍl}UeQu˹m= ŖI\icwl;l?F?>uܵr2᱁ (Ȏ(7^BېSvKyGH `E|нMa%t 0# d0!$!t}^{ҷO)l3QBijJ\EwKd0t*{ْف+O6ø;}KK ]A̎LH.h&DdG}lmFZkb2"=6QkQ-%H5"yY -6iFR$HS:]crb y%510lTaْEwXWud i8n8QJtGRft U ARx0q soňVdm5ItP` {.&IUuh--DZ&U #Jő,DYri )'PZہr2-Vz: BYAcDF'd Ee뒫I9A.xd!QEdlB-fHH뀀UFb@T-ߖl$*adkB[Yax&70>;x!iY_Vc noTD `"@39X@!LC^ϏЖnFUbIS$N2.\{T`2G5&`i8t=dmΉ/߿-ڒXz 9ۢގx6 iϲH|WŅJv;F:ϓLr4׫N?yBR$jwǫXQ |e"i4z ;RJ{`T5 ShU̩`MV,0[Uc$/lSlF|ASS!@L~{65b!0޽4(΃>N2 TID5Jl/zXVjlybV'RKJRh [_p jxvi[Lsq%^g EI|3*2hhlDhuuF|VNb!Cžg z& +,i``"S? bW &qk?BqdU S(HH7pg8_~lyp7sî XZZG[xusmF_u6oŋ.`$gjk,K$3غzFnit$Z-jWd*7r2'lf=t=#vb#H.BZuzѠ@oEpjH:q3 !lf\fʳ"z9rft Nw};]D98ur"Vhr}{cf7 8s)YJɚO/;\kc5cn|-oahgՆ18W¸)XP*1ς ӣߔɭIF8(h,zD%`[f)y,v9Wf k΁o$L%o r4(2itGTdtՖ瀳 o$j7~d+ٞaa?CxgF6h4̚q~tr9,7^Nn^eA#8,ʻ뉋OÄ* CL \&˄EO= =>kgVlcW~ۏCo-ѥѰFo,tG#XUQ|Cloc}=?_፛7&az )V`[Wɲe+Fsb^o㰊;izQ\FK_{Ϋ FWf~v6g깡E?;|#0gQ5 Uٻm :=JfpvjxW͠Si4g-1O\Iʶ& 7EeG&b&މLAIS!tEdU2ݵbL*,O@*RyEϐp>.eo6CYcn3 3-;3mNEN'ŕh>(D ړC }xb@°P-b%X"+aGVQ;8ԥIӴToG,YϖFFe Ag'v F7zI&N4-7eɒ٬L_T&gYQ̈́; *+OU ה+D7T% T`#R[Z7#L;# V3W`G.9xJ|aFGcxsV##w/#ݍW0zXPË8l2LG'Xէ؍:x<fOLo8jɭvzE838 s>$6Y ;=˂gk:ےg͊=vg]ѳzK=K@5#nrf q"`oB_Ւ$ai6qaFf\*r7*a zhgYrۼT z+dɍÒaBd#*䂙 hit%KKA$g)W䞔{Α g5B Y~`\]5P?l{Y|7_vTz83`#C[ŜsJ>aLj|.8hbzr+veDȉ0ֶ9KN2ˮ lAW 4bj'CH r:pj8Qw%Ra^b J&2.#r%3)l=N Tp2{pCb:kϺv()iE(ZYN,r{.:"՚hntnJJkfKFG!)F˴;zf%[8-`1H8"6\3[-yoRq hZyW1}( Z@p8id76ј'])''>Vf%sjC eoUړ- 4)ўj(%TI#Mf - q8!L/%8P=i:(:)jhrlP&(Bc&64AŒi,WZQh61*}֮# t>0ړ;ёMwcj!c=kɕTҔ.rp&CLNh\2 -Onmƺ>Dk_]\9BYbS=`8dUdHKe/j80QiLຣz.bae:8 jFgk:ˈO`(Z!xKSxV(9c%(/$*@ tXq&C-@`BVP!!g|AĬEdaN=V`uZS<xX nݪzv~01RB;L' a,al ˴*x 43Ks9$dbDTF&CZ}NQL{/5,iH(gX ӒB%1W cYYu.qm2\),ʁ@*XN<7:om=uL$^1ut"RZFTxL= f?IDypA%ht$>YpZINZ1gSUBd ]jG.b%3&<F6Z!:eb$w5H_~0R ?%Mb|˰RYfg^Q7X+~~'veE5^ $1vۯO,BU(}׫2>JiIŭWW_q/đ$ÀMu2M~Mr͗j]&7`'׍7"wKYKR?;,"pSIU}2!5D O ٲxhee7ӪQGrtƶ,]yh!=Ry5BRO,+&֦4JOC&Ѐ0-˰H2q?NlV^ : X>ԓVӵ>+%wOкbm)Ԙq-- `N.yD"뚢;p\&UbJa=Y$k]R1TqF&Oj-eD)cǚdR͠s Cb[ hdl`%'l4qѐ'/'F0Wob4<F3um3ʫ@,Sdd}/{ЌwH-ѧȻzbi~'ha0Lӆl,f0{)m֖w{y^NzD豒"m-Lllئ]"Oӛz@k,?af\cm)ayE2oz2H~h_zxDRVxJxӚ:}gzBγXoO{drjwf-~ {s-WM=iTV}w=Mgδ\mkĞG1zL+i!WO Zd$~ua"{> KB'W'WaJLK#rx!Pжlo=4uRPXѣ^חJI|p3% XCۋl}r;Tx(b*OoEGI#XopA 6KOS Kw^c;k֒-jD ؀TDgx'h}Ct;SD۲=uVgK=] )Xhل.9"TY8@c!m-9<ד U5G"FU [6ۚ谸vul(O2KI :6  Ddžva]x5 L9A8ǂP |ͱvjXzHC]dR|~0}]!HmҢs-P,H( ݔ},їU{`Dp (l͚7ه> v-7&fX79ffkE. YY[wMkudGa6C*Fփb-jŚ) @voNG&@+4@Kb}K gB;*j>]|rmK`:u-6Ft͢SEm3L+l$ U"8 oBuҧu2}k\P[&(QO Lu1J]A>ZՅ])iWYPd ) ŕ&ϴ4R7FFON#uBdݪS6gZՅ`>9HJaX,}HϬ\RTDv͝|A jN$ae$zjEj9B<%x r=t닯?(YehӍ#)(= WkQ>r%Hnun"gс0"ק5[ZB+d{ s0 j[<Ȟ:ALƦțMd]?fLS(Sp?tjwџY/]w$G\jZ8渨9.;3Ϯ~䉵㣸0b'iOIħ6.Od#L(v WuJ!9?JވLpG!Sn%-!I Ր+uYR.](i^"⡞eYQZ\3A=b>=j_׍YÜT̫sC1ЛJ;ᓧ[t'-@ˀHĖ)fUBgѾ^L[Cm..zK9s+/S{&ڿlٝ{[5#g{z]̧L 0"(``iEU +E<ǚXkY]"OD8TzBJg"ꊅAW jdF`?iߐ- Z~h5؂L͏Qۤ+#b-YP䅉`$.DO/YO9A,].v2J̗|AZ<֢pÃNG>p콸MgumZC/s|"`>7 #x~7B6&U }0 ;}?)7CQ Vk6g ua~e0SNejT1@8[`ii9$:^nF :&FJvzhY_<[ XZ5d[[ڤH=h9S}`onY$%|kn@@P=1H8ֳriJe, ,-bM+- &~]0q}0oO 1oDϨֈ³2 VvCȓ=dBC2XhQ?VV,jשʖ؅0yϏK^҉XQF׸5 LY]R#wwJL[UpQɣ@/1Rꚕκ a=~pJ)4֮ڣ#bn@) wSHAovE*%gXL)]nPޗ;fd{t*.nyf.tΗzgVnJ=Ԁ|󛟾kq:opFGïw⁩ᎍFaߔەb;e$3 917嘾:]N/WD|"0n߰@2?J-n}.\,{?rj%ߩΛ[>GM_sXohuE1K*}{,*~+h^-ܷ-2YKZ]oܻ۟(ʿM=ݲz_YL(g 4(j  CB2NX22, \sC K13>{5/ Z-ZقH| DS1Iˣӫ(!9۠2eSRg@<ӕ -t,F涷XRɁoѢ1^IA@WWGHci&rHE[J~R*GG :+a6E]crҹejeJyYdNŠ蒘YbDųŪMBې3C@VMI#VòW@jdl>TOU# >(w V>ij,3;6, B;w`;7`7ٝ`PbRDʶ߷(Ǧ,[9 l"]]u>޷Jl%`LG"?@s,6G>8Z郶_b/|#N)cB.eYiL94 P놿F!*pEmڵ$)vJ*֑Chg__-1uVKA5g-&eT#膎(*6`o!NBlƓ/2JJ29Mr1mȦ"0$jJ.K$phCW5tQ?fZYC*eBق?dFmBBᏠTC'k֏uI7k ! *A+h*cQP$tAKjT*JPGU։JHҜ) Rȶx !{cW"J]EP0FF`M sV4ęn8h$ a aTL&"TW1:$x{pf=AZ.9D}C' ,&) *X2utϮ=`tzVTj)+Y$llƻ#ͤRIH븧C;c%%u#Ԭ@B@_L. ,|dɐOȩD(U y_u͡f*Q a'j6Cu32Q AVYRp0fZD5uf 7L"XSgeOԵh6k1hM+!Ijx0o5*9V l< 5\7=jxȠ1*kO, MJ!胗 zKd4x\F}H/o}=8Xͱ6OVqK z_e:߿c5yT*'%ZUUԱ(۸TEvxD3=&p(AV (Ք3l웴Ibzc"rhJa(' 3>{H8k&R1Z}f=6N6+wɝ8xg\s(7<I]𤄨XIX De,D&4褜("rAM(Pn,4 :`sԊ1984 ~(vQ&*4`1of_"@Z׾[vmQZ[eäZt0!#tHY4BfXW.JEӈSrnFfka>Nv^+IPH>:,+zmc0)G1LMG6  * ,AFTAq@0ޯMh#U-AzBB[ K/v Q>Pu)c>Nt5p:B0vahLSK ZRH #.*\QV.b ;P3-5Zk0NZ ls8[b b f j &_umx  `5#{ ?znQZ8iD1Q@PSkCr( BXh` yKs JS k  AĨdml%q%SufUFB_՘#\kbȺ^4:֨S 9x Opap̒3&VJt\ ry<Ǖ!Dhe NMDZjJã.]U <Ѩatuz״7WjP-F"R.%XK%WGk䔓cg?n?V ]֔T-2% +̘D]bd}|—f\tvMLט{&<3P5Dv) rrV˼ !dc 왃vF&Ajh3:| ;a"N8Ψ˲*i[#Giiɦ/~e(g+OHX(U6l\$M|1\JX rQ *MɰROK{B"B6hAܡ)( a3x:Tˊ.>F K=E}dGz6vR,Pma[h^ :CȐ:ұ 6dBH髨vk(8]ˇMr}us4n8Ea}3 Yh!WuϷUH[bOݪ nUюX}_w'd6D..KF,[K:?a]]w{oحd|65qr7Wc9cMס~v/KA9YG>fm{uYضm[/ަ3Xf駶ϤZ%IJ,L Z8, ݡ{p|)m( a9,腽{2sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa}C+%qXyA^h)~V@wSa 9>NsXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1pX^ /€MP/pKh9,=9?Wb9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,_eyaa1k 9?#v9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b[pX >K8R,~^7pCG8oKkuuG=,rw 3M lz|CEJ7kJ}\!-뷋ݞo.6i}Jd|ج|ﱧj7ju[Wwg{`nNNʣ ⲋ|Yo_eM[V1qѓW"vY0_o A\,c~/<-O_VlG3%#"@ɀHqo:.шWGe9WG)E Eo DވsnE=S};j+w@4zesG?(vsw>;3ga[Ҳ E\.P]ۄ"-r>к\[A[B؅,yҁh@K=Kgeu|M(uibl PE:L:,G>a]rWyOY FH_+k~G]x(1܂JG!tpm_7gWWxm4önь,xyiR-զ(lIaT-xiO_ ,b[3r/Cp_Z?0TCwZOJ7-p\0wH>iI7C.Zғ8Zܭ/1vq~mAob桔>mͯ/^߁dF{l^O9}?S{%>RYo};^-))RSMXHTvK28eǩ|[ w?F<سlSwtR98&eD?BICMG*F#ՙ}h{OYdASt $s{DÐ?x9BeWP)G3oj1`F褍'/uɪ3PڤxS*nABu#&qNVkl*.*D^5X,ԅ Z5j%(g-bi-⤕WvT=(Z:jcA[`4<#vmd>p̮8{}vv}i&<ZoypF*m*Pv4V jvuJ>s<&kY$АFPҢأ&},?JWm:С7D.U/ޭN߽~$GWD WvM*#.VB3gK\nV;FV)W,w)aPJc9q"+IRr)JަZ* -Z `)1DմFY൞_tn?_ aP.JmsO,':;(Z%tG~[SD=IW!xIk=~%kEDi,+IdTSX{W't;[4@7aSL"D]_bH\i;i/%\Ĺ GO\.WO^J ً7R&>f>qCo Rl͙(Fr^i /ikDϾ?4&I9߷ jgcz:_ 4{ey o 0J%j2o5e05e @$"ӬSFtQÿPRǀD;۔8oߩrc/zN:{i ֢9`*Ԣc&GX2}l5@+ 'd>17]"hsiہM[x~醯zDXӳӘ-*Ek0;m|;OCAO> "Ec _:@˶}0j ]4nA|[JJ@P~VdԣY'cʩ(dJ(NY@c-l6؝c¥sxIɹ+c{N||6\pju(ZP~|իGζPqYqJ]xO'5@nky]Tp>N7Dn lb]~Ӄw(CCD/Ո}-ذz/?KwMGq8K%w Zf!b}sX;w swuxqlb:4tRRܤiDoAJ IHla-b8A|bjowѮpq*4Uٷly:[b0 6c }fHxgjӈ2.9AD,(iOE x\w7|)(=|Qԥ١= 9f=-L-UX;@F*9:t5AJ\6`B ,OTf95Ka"#OՐg<ѝ,$ͣQrħE9t)ͲZ<:| E[M+Rat@ډ {u"kdaLh˞{ GcL4« )(o (稔)jeYw y?yoNd'Pp무J F.` ]qJc :DK/4ߤHu̹+h6i,_yNC (Q%Z!pLC2 EJYb]n}~v|8@x oڮZPdnօ5Y)T"d_TD YwǏ! fK;7}8ڻk%+uX6ܮ Lb/NL@_,m ZJq9Y;o nm]ui*1ySf<)P6Njtiobr&qAj6CexF8r(_ON9Jy*y"o`^$fƏ Ac}NA,Tht'b K]Mj)Y'b6u)Q0*MC~P)墌CD\ϻU.&A]?XG4Uڨ$% >B,RtZ1O b񯒑__28 |QP4"4.CxѝZՅ``PH@&Hk0`X\js00݉!cLW؜/.[{zNDfecj*S퀧p!sFv Q.0uYG Q #Dg q(4Evͬv5*~}eڦANG &*4L v 3 }r}. 26CeGZ{c;f8wE [Ͼ[[vcB[j~rijYrqZí~^;AH}*)dZQ:&9jcspuӤuݴ=)~h6ƸǷ tR!߇ɳ4 <.0YǓL]򂖧y+KK4};isǨ($缻Fk Ef'TrGxQYWcݻo?>erL! ܤafE2|5Η]3y.A);1S (LÚrHsyRe1"SQ{]>ͫs(\dNl?ŜMMOqhG:;=;;H)uNKُ'^}l\LNwf<%6HɻQ#+/i->~ϩÍTކt0SI5}#u @R;o ?h]tJQ=Ulҧգ~j:Oqqq>ֿ\ >승M` B gt%ۉǶ6:.5G6 8ㅁwՉ. ⻯W~ڮ||tlo/ q-Z=0Kณ~+ZvhZH 'Mk԰g+ژv$ibn-s0Z4!mղ0{V{6RmM6}itXq[]%3уvo򴙈Eq ˂IRCU-eQLv'd:=y 6Rk.f`9ݭ5ݾm6aU"TT(oeQVo+-ja+i(b!0&2F\_߬o[e;&+7n~?>-?ڷM9ԾO|{/<enSNL/ ?2˞SǑN W}:ۇbOJ`jGW3拏o2"':[}_Zם|r-ͺ3jȺMƋ8mQ&]ft(ynƓn>^!ɚK_'7Ex?;=_XugbPswk+]}FIJ8%(r$VD {-;Df^יh^o;P=NE8*0az::كM3/(<:>+5'pvNAG_?h,5*HȉwGG˟J.zm?G_Gt»>MnAS)8(ٱiu>9er YAVru>̧RDW-XRcDzƃV*\\ 79VgNԦÝTڏ?-Bf3/~??/{#w;Dٔ[Ή//̛<;k=K?q4yw7d{Hh%v[t~5W'Ǡ;%_p5rm~~=PWF㳫Ż G3í7⡣"wTJs$%Yy|EGJ8)h|41 H?`mm嬕S:D[nv!'g_\$Kۅݔ-Uʻ7ֵlcjفX3 e"(["4joZZ$27rb͡ _v_^JȢ/`uJuVQLRO~x>%2djVc02*]H[g*R)+aϯsLPwGTXDaMiw-gL,:alx;N#OOAk$؆ U]>A s)w)Abt2uA/[הZUj,NZN!eRQ&FmrSdh[?֝Mu׭Z8 Z'LڃU芗kb1\z"J9E(^){faeBhƇZ1 ȏĬ,LΆ.u7u˓KJ&1u9kn J=䥺/SUڍԢg%Ӧ2g2zc=Խ`w*֒ ~(vOkWj_6' P{s\+pvZķ05 A9/3Dm3Q[9^$y / 'F/$!7ɳw"?ȓs؛A;xhtD3g-%ҸkqܺHfzGX_jn[jjCU̖*Jpe`h*"Yt&oTLEQHl|IWkͬ,NݩAdwȻYHbPzI@wp ӫbgf`y)\,^LV\sZiJEWYh7n9⧿=>NY_3* kLy N̊UD9{2"R\җ=/S̏o&eo= FUoK!`q6X\-*aVd nTiGVlXF=sV`&voM6=aât8+u0u05"SVs.&)0VceQ.E6iN j[#Lh8h =P 2DW]G_za1yUlg.530|31zq[-Q%*h>!rՊIOQE@l3Ev86n8㊵fиB '|q)5 xՉΉs)u>|p-<6PYKmg*CJv!}-` uV8-p*:הe`M7hkvi4`ɖXJ)0eeI'QjY,!\Xq&TIJJp%02$T`)cos9{bOl!VFHWObwK#ת DDYGKr~i, rHk덇]\Qbŏsq!Tb a8VG$rhC].NjOI+TVˑDSc m4ԁF`+<) n EnIJݧզ>xe¶˜oa<bA)Z΢Mm舂Z&0@ƨSIFL@dC!@\K3@ ՂSC44HrԎR!&4#"1lHw[hm5oPs樫Rno>t[5z@]ʶ,GbAˬUHeVCV/>U{`崢T1b~D2i_SK\`R^$&vJ.=|2ʘ!%hYH\̊=荩s5`)-)FfGepP3` ӎi˧'mǓ6mj7 Ϟ~{u~EU']5mN]J_@;b*w1Q2wB-vs4΁8fCz([pΩ"՚sXW+P.L h@"* A) f & v-yOf*fAYfW|,-&<:{ /qϟ?dsgtK%u٘QAA5J7rJfj^:ydmV#DRHCԶFbAT\eC66N&md6OfʠѮ%@lDBeuBbbȀmg=1jS#XU¤Z+ʵF@-268Mi]ۢs]E,LtO[72{V塞Dtr{bR^$-!z|nO 5Yۍ^Pt:Wul 7]L:ȹhv<qz F.T0^UR}td*i}`Ռd;rvẍ́89q8>+}c}kHrۦ0ՙ|g]JRU CqAFj gQbi^g9j@o>@Jd9r){p1bQX v&eU*U*VPքR.:9CFPL"埴Y)9ڳ,:OJw}DKrdg "WB&!%PP٨XbriY"=.꒩}3jј(کrͨ}͎^T"+.q(زW2Q.2Uh :ټ˝$]K]_~.WM@\ s~_X3v9L=*Y*gSO%Q{ꕛ CsHqV1_bzk-g+\eHW!A!܋?{WFdJ}Xvm`i1h`ƂG$5b(,HYG頦T2edm ^y>L:[>YK3pxS3'.Dw},+&-m*o2ߖ|=Ry:G0y'˄<)Vg `\na*A {懟?o73N/'*=){%1\C9GMe*B~_q,%F^65_XJ zuyQ?cGTKj`j .yTړ&ܵnkZmEÉ^O? ܞ>n0+";S@1WM CwK)z;iVV;Fc%WuoabRzubg0̑~#Kl Bs--ʛ8 }6*E9DMQ?jKpA^3T:#r)r.Zq/9"y_*qR]򢙸tyG˿6M+_t^N߾i:0dmC59Y5EC\h\ui>X#9;YWEv&gS kdnD4ꭞW+Nzl9`zg):ꢻz~̂R*Q _X֗#h̼}m>̹ˆa&p;ORU?aҭ&%/vZb$ctAs-;@=a [yBz,Sopdl/hnL 0:e=7@G+Pu"R H{ NkIav6y1ma#-:!Wzwxn:>L|DSiO|)qcA{ks "\ ?kƻTRP ːxJ5gwB^ կ?HL# Ko2*WC`yHRXM@8":n@nr3^_)SGNezDcɦD)*CrT\)Ye/14:ijc1+G,#kWayp:H@b[zf-#ݛ~dpЧʥfFp_І>] uT Dl]WB7Գ׃ja @1RTBK!=y2\ziV Gm+#,K'ၸb>^[ħSp1y,Ay*j=" Y5V4|UΡ.D1tr%bND8*—GM+W}\Y {r"r+mjlp\ǘΆE ~?̅q_~d  Լ֢&B܋o'wFHDPŲ?dgL( Q{gNq-C1CeJ80F+66q`1ksL(MPШ,]_/)鈸=pó@|t,$ntұpo@qG@g4!m\hR$Pu˄{\RrK9z,#қ`cZ}^/5%NgOh0u B\ϸ^N2g6C51wg;3#Up \!{5F"hʯRNp FXB鍖]:P2h2AyjX:RAiPr|@ KL)KSܗ04Z I1Hd83g:Gs1ܯon9x&!)NVҔZ\$/R 8@XFy)$C}~@S>^؆26Fұ|l_|KudȈ̲bY#^'E(*/Ӌ~ $ifPd4 +W`EgeAP=*"這 驑 ϽJY9UJs,0f8R!smbxe#@C ؽ8}׎{}{k-UGh+J4[冫$67HCD(Liץ?iKEQ0# ҂)y yKc3K ^ <#1F7II;AX`^kճ3AĨD %S TH\Qӹ&#5\K=;[-ޅr2^68,0w#8LK4D><5&5kOL2(cDl9;D= P$`ݡ0aJ$EʒۨW*T Q8m6$9P+mVA*$C(ՎFɵ wX<2"qҜtN2 -|Xc\/AAdCpI!WhHy2K /-c)ڤOQ 4$ cX#%!Q  G N0"L( P" S@7tjCY0,UeynvRT3(J$R)Mđh$`pƁ ZL N, TDMa2blNȘ2VaHBu( Wdrh=]._iXX MG,!r@?5BԍpwNI+c69=`  ĹAaFչ^_?{AT/!60҄n=Gmު ~eYY}fV|_M.hn&پ&]fs$5n_I.ד:sNT 2ga&W:4FE0;=GPoޯ6tIo ؃ɴh2G71.L%|"kCK.I]S}xq#[dX)W=,Nj>̅4(;=50rb zƑ?X\λo=B3)\wtEG'ó/B3n e;s?IN7"_w-Ն[7_vsu㩷9IjޱL!p$S~f+$?.=j}E/ VH~VVQl$!褜J0JJ#RRɜ73 CRvkaMN/_Tְq zY z~g+bvMMt(©}Ow6;]'O+zU8-H-Ɉ[$/Jk81%_;<| lʹ(!E#4=)yb; FuoLV * g8/"$ o0Ai-ޗ 'YoMVP/ DGJ;pCnPA{&f_SvZgھP(TޫXN}5ke$, Rr4L.+m ;\clENG{8j) xdW) I&bZr9H@*,ii.c%CDr1H! E,{wtTfҋBJG/vYil0W}+"x7' 7V+:C90%MC}66?9ϐ̛BsriQLF Z*;)A^prE?ԡ|C2' k?eH8~ ƹdZ^z!͘O9cZmuyR|5a:SF-i9Is%T~#u7nJbZt\4,"km$WEׄceAEy "9Jgw%ٖ=oi`0I+Vuɋ߮­Ys2jg^ᖋH!xR>a4jc ch5Clm+X \ 8hYqve{%6wC%0&Mpsbv"lJko)HYiF#db*USɘ|6jpL~06{? yMy-D2u5 ;/-+̱Ht|IN6Pnk orC~<,pX%~l}s4U*.мGn\BI9|trQO0o#do|W{ mPM V&RdhD!%6jFKy`pVmŸwl/˓ۄ|^@0; ֺϿ #;#1 \V֩M8&pΏ8z eAj"oQM9X]əSfkdnWLf4Tн?0sc-|]^M`/FZur "K?LaDѝIA+Q*VX `+q@L@v)Cn6kExаD фE,hG=? ?viO׽#3꒘:r\z9K\0T}s:XYdjO > ;<[[M"~=o>: H-"pR|[ոXjM1UMJ70U<=^zn{ ˠF~:F? lN-*%JojKJMHxj$<5 F·; U@QKTt< SlBOL޼w iT!'joMƧWr֥}qtfKsv2[sm|?=D_սħjaՠA7E~_QQ}}4[+a~ e%E _/ʘ7g_~͇O iC.%*ޘwzR,?rNjrڃ F۴fJ~a[χJϓdj=>2ќ-V?T.+ó/G EfOFiH4em ūsR:%dlP&>K&7~Ҹ#xI7qLf*lؚ<a#Z7h/$ Onb|2-b$!E(6d`d[+ ی1G8[&5y&w)pRvp>&1.[zbKil[=w )3AE DNN)9U͞CFylۂ@bS,=$;kBa׫:MNDu(~ Ǥ:|\E6tߜCT+g7bj# lC#=.˽Z55m|6;t7X( jswch󰎌gGӊ$Znd]Jn:-?IKʅ.513cb3\xJG ɦd^eVSP-,.F[,TZ1<$Ny-1tA]E^n&OՍ4nGeҎ7 EIL :A>H(\bC=-SZ$TB'mBnumM źG{u~'ѐyH񚢍ѤIr}3k="ڰ`1{ %j1s$kA d<%tUPW?i.4hXT#k\}/z8+1t_&+B=U{-@rdct ȶ3UE\dy`TSLto tFs>[Ɯ >$GM{jm-BN*{ )4,ŧ$a!`51k[CQqujlתh03Wyn*ϿtaDtvu,Wty$"+xErq\o{-B[hEHhmZX1rTl-oP9O$@#z]Ӻv:/ !dd]8]Q9b1qRN8|P)5+VeKIeûe,&W.&Eg6,`j<&`ѠkJc{U-LMvQNz׮DLf9_r,kv$3^e)fm id5+SɵwXO|Do7{;txc5ߍ&<&p-3|l?GɜA7 d~v{:v5Ʃ)+*WZ\w#;%6=ww>4=nk6,awGq/)wc{R\,l+zm{UzsfAR`}Fي9eJ|O#Rs+Yxϵ/w&&ُǏ}eVW][{>o%bw;y͘p}v\,JvZOdzE>*ts>0KS]MR`fyvTώgFȺ\Ⱥ#Вsh6V4ѻ; un8h2T\6k2&^S9&|g5}Wwb,8~՗y6<c,, qO9,O2_B]dIj @0f`VV|o<_ZG#[xeD;KoK׉,L:[|=89kTwƞ@%Vn㢟V/-K4~eYE7[VUӳbF[g?f;/vڽz{K`O~X>.壞̏g0FϾ چ{®WqTfה!r-waZq2&Zc[1Wj'J E?U/% ~gMu5›/P/H䋚Ͽaỳ'>o݈ DCcLI_8h)gӸME޷$@C|OZ' O*JG*eh8ːzy`R!C0^tJ0zv^ X0i  \Cv2 5Ai,s6JV:V{joWxZOzZ!9"\`r&gADVӪ!`ҺsFο}oGwnB")^ĆDN$U6ɇxE;H$1ɛ-<iwjg}WEM'2QQSS5 r ֐7m AtWw\ !&x\J*h[QXQ<}VEj%YM1z3HR&+@NZNI/q@]Z -~2`òG]ٻBY!K;__SOJ!g Yb,a%G2lO$l:8xB^rB,czqWyQGR q- t;ʭ/e;v'8.r"?RчjMA lw,]8 "XSL*Q 2GOFO]bN"AͻgYP LtV7tsbھV~MI1O551iˮXA|a F|gmG`,8oX_pBb ˕*10 2NP%T 0-?dIR7&,f42cRp e]gt‘jc/w>!Y﵋Ty>{~g1ҥd|} b)#QIAN|xR k18 ðe>(s8_~y~ ( zO @k?FKڲS=$DAz r8LP{l-.8cr/$G [7 .^\[f"~`QibeL GlL>X (hXA5l0aFgݏM#/q;`ZVKa{ ELYAwz{[FhyEAyު.$CRSPT2NHpQ@)k)T1 !Ѡv#~XSCDUk%~v h%#}})r^:BQ?aHZ(30[Ԙ41a?!EY6k <7fzbIKF^_I`HI7KQȖ`lvFaWϰ-UYdcxz* _if<9ٗx q^$Ehv\͆Q}Oqb1e@ZQqՋQ^׬L6fNͻ3d/"! /}p4z4*ֶ< ^r x ֐밹EySE:LREf ThRmR" 9 RGҿ,0FH9 6A("heh;$@o:&2U^717VQJ]"~DI 4)g ͠'c? _z֗o _h/IR`z(j_L&F$gA| H75b;*K rPSX\FmQB.f>Ŭȸfѣש}4Kydrr8&TO&YڏR8mC[M꽵*e**9^AQ$@a]FZr] ?orq=핵K[W0,6 kچa|N {`z7=lx_85H m37 {*5iIkAdL5;UјuXnƪ0?okJ~@'r!fp?:圬o0(îp:v?%*H19`)A,j (KښRZ lL1R A /p~юM 燉E`i}6IZvգĽ`n=_'['5o& ڼV/8>iC"#ؠcO 8- 13O] <E#NjOgkqs~R@1Z&K5QNlW=̪:TͳFZ.5Hڇ ;k]uzO4g{+? g)cLV8/pedVhBlHXc"L~vߖw@*-R]0G+H m1ZԵ;32VvWeS)cOa~~f44։X9p$)|J\N誒CYY^*, @^)mL۹c w~<>AA ʏ#-?mcnWL#Ido<(kTxKK' Wpd;NńH)j:+tAzNh_(YsЬmbYVђUج4Zŕ~8KdMjiu":L#x鷹0.9C` [i%c,5DX< UTp'xiS,N+ĪSus@.$pG$+f $LP=A;77B)!H6o6ɳ/ATG7bXA$˿3 -AfX>]LQ tn{N%޺k6tWqnt}7`R=2vE+ގh$ AȠmX"#|QG!N CQEzɑnbAfCT)gS9u@(%no׬+:rD8YW8u{eObfvI!e>KR${?<̧1t" U֡0uJkg,%mQ@O`CМMw|Þ[Y֩sʞ8yc[pZɋ(dÒ$H++t;g=Yh#}cрF #T HU5(A$S*UW bfSn,PQ/J5 $-b Z(Z{2Z%TYh;rnҁ5"}Uv++kr~sM.g_>zFQӗ#l Su! ltpªG'ﰸW1Te6':]jW_r\/;3N6mlhÔmSg= ijƯDwg#84l)AѶgUE( ȠNٝ,y~dv/qvgJ"I-NM|N!ƤDEd!h/#h]@&H+)U%V*d2e]AEE|)l:H60#(ٝΖ[ʿt9tuާğl]]OXV֯'˓ۛۛ7gܟO>m-yNڻ_nB.! C%IW * xenrыtDZ~>-v-O%rquyDW?-Kn`tWhd#_a5ɷ7cx75m̡Y)&oҸexS{xǻ|OS>UR\=\NinrS;oF'͸OFWVn3C3&gh QچoI6K^ZӴ/8š^Z 8pl'o,v C_̊e 1%(n2mQtN*b0UdɰIkf;GTnE77xQW2J}3o)kkutYPj$OTsja;u[KRls'P)uCܤ ν>1*',U ɮ:{b`lńe7EG6}|X ulBMf+Q%_}Bbhjf3fYr0j 6R/C6Vm^C>B-d蒲rG96lqz|~u?}M- hY[$l*#Sxwc&1gU|d(HVi&8G9:\}7ߙuv]xźtZYS뛫8 MO&mZX ۣrzO܈>cu1! k{X_4~׼_Ls|ޞMio1S=lS\U">ǐ~+YlL/qVώ/3)n]{;rcjbN'WmY?g["ykbI1-2>̎r4Z>}p76˯H_ .GWcRBnȸeHCΞwoslyϔΌtVj*`9sWM,_O;o_RٙyVEI@iw~џ~j7%79nid^ ɭ꾊6M->6K /sb XYwTjA,ezWw.E<k,7+dZbY#&FﭙQ\]īn`Ϳھ;?gm!݇ކLpdIof8۠v^|z{y_݇p6f"Bvtu{ {.ZMfm(yv켹<{'ךӏ6 9ӏi(:emr(:GfjPN^ox 8t.`,O'$Pwu氬 9w7M}Tl0 5R4D(1VĆ8ѤGoCoO6|PC( 9 WՄ6Ug_,KO=eC +n-T[+xH3ml-Xq/=(@(a!RoY =n>Z`ơ 1B_LT 9>5-Y/kBUHc LUǰR v~!d+ !ciWZĄghtBewVҩ HoEyAEB갶s/?\مco@ƙK f^>,<ϐC0BX56u"Hs0=؍b,B`@Z$|2֨ 7-f)w6$TyAKt'#бd(&R:C0H_ Ӕ8:-;~CTղUPQAJ|,jԋ,󾎬}k<>S@"BDS8n͙[Op? oľH˃?nܕ(#[p%T%)^ )t]`!Ȯz2h[]3Rz[!%bkst#$woZ9b^Cv{ofp46^a5/;$xÿe^$߾oX`bo:|# /O$0Z% 69~{9'o?ݿ@?\Ol!P> ɬpd_5x>N1'LB[dʮ-ؾwg]N}V@ FWMHe}rF@HFMfy i<.7K-ޗU\sTQD(*ךlJ*e9b `:Sd>>|y dIJ4 SO NaG릐| M0*xyE)kgEx) BGuN۔Z&=Wب 4N.OPRl\܇MYE2=2Z\vW[blv((*d0*z2-RVXx*7"q}8^Z0N*#Nd6Ai3/[90CD #LxQU(!tDX,.v#^en~*_8rRnd[Cul;iJGiEC1[]AA@mZ2~Gۉzn1ZI( e&LecG*uGf {dBcU?7"hMKUV3^rn2QybT^zs0*{ೳ[x,dUWܼ%K<: $Y¯Nw+wyH6̱Cvs{`r+2CYMW ^i E F%ӾVՉ|Q>BP1ӐJT,E!&+^N7 d`kX}iCk?tS_} ,oXX wlxB:V-PCxR4 #AWmgƩ9ʀ5 T x)~i{˗ɷ:W'^y-w{YI: C.{YwWB>ve8Q ;n;_EȎɗpzRZ ƦYb = w˾ȃH?Q[@.jTQ"Ogvh7O9}>F/P]S`JmCd}-YRi>oa$q 6_Y-ϊcFhjagH99MV$։ YB69-=".L_l|i^\iGxLy y %y"ZJ2& Pwz59#5)t Àv.LD#bo`7:Yb⻭Z$|o=ftҹnyչH̜$vt]-u[$vnO]) VH F?pgF_eRhfs>-X4ΒKLM@tYҗIįZ|"d<祘d1WΗ=J̷-'ʘ= oZι_d #8N= 1.&^fy9Vqxe|3HcD`/s"u}SΈ l>~ᨯrA$Ej5._`_sd0hN^Jfsr$1H"]d… Ur/yjaAY^p\a-G`}9fdžpRᖠ(迄KP(NVmXv8>uKY<绳KMmX`Q[~_OnS2TW^Z7BMVDeu%|骩B;=v7 DYbP4HA1*렕͹Lomg1ׂ1Qg,E9_jEj柇.֏fgݔ O7T-SթBO߿Sy%BThmqq7݊6XxeGh~tj*l(-*M:M+MtQ`eAc@X.uSS+ Ё&+= +4MUDOc d \];Һ(prɉ)xOJr%9bx/_cR ?)O }j :unp'*;TձʬMUZߦ"֮`MkHW#AX$1E?c%i[څ{N1t{%K`3sg9}?uߪ OD$(C3Ӑ^l6qzѡ~}͔S)ެ~b)'-Iûʋza*wQxu?gx/i􅜤f_Yk]Žs~T~zQKR.a ~-| S:/735X|^#_-j3[--?k }wn>:w[hxd?e !iK-ؗaOf%z}gXС_ܩDUUاL,҂5-%'S LLE%gj{/|N)ggKb9fѽwvK):0Y3ŢKKpt?ϐ }cP IN ڙ'oaXy(Zph%r_nZHPiSC9)!S:V{7ua;/G;~S#6MYrMDhyrzm$:{NfP/y*zh0p k1r6QQx Qde88GP|Sӝ3e(EH<{[:-_=$^;_^|t&͢zWG?jDtvx£п71 Am{@6*CŽ@g~vAgd}l:z|"WN]#т|zCoK!z#^m쿽ĸK'[gͯszWe%̯Cn~/Ao?Cwu'w>_[>ڶ(ܼ}uF޾c'n_|X}F>:;?jrn-~[f Ӣo|o>&k>=yc:xţ/?OwV^|٧zq^_O?st(LCbUY,Be SKL)ALY|RΉ[.OյI8g]G߯q|׻O'g1ט-%Å1b2Y0WKTQZ5ʹ\D7_$;熏ycH @PkC\(ݐ:nNLbr1G;gm!2cpʻYT9Kl+fPm b7FwŖ>J}gfI9dŘQz e Nx| Lu%3!c`J!  #>$7&T*1mVzFxF@1+qM8\cc"y3nzo:EY'@NBGN7t i,YX-. !1*u3ܨ `JR Rj%0/n2&U=[|! T$6ڂ_TP ^bȾ6P= tx\7[ U" H9 A(*6Ym(]^!(|DH[)6ycDcϐCAnB8u@< `<Ƞ'a kkJ rf;9GQ[ vghdcH{yֳ>]~Mck IOU52 &d1 ZzTww 6E$vUT[yU`4#c6Gx j, `VE *",>E?s5qֵ@dfXI^U 9ъ2}8ݯ-l.KSoAykMR k;LpCyFJ]D@@ 4k,ĹUcK+R(RF-ӭk>H/`Ѡׄ۬.Aoo0ZOoY>=ru \@*!BjY!Wé ڂYSfKGi ure& 8*"@=[ՒԴ)ǧ =uZAK6 Uf>BF1. N!qmwqNy=q^/tsw#zlfm䌨I":1vPxAKJ%|tup+6+ q* 74֡$*#t#Py@[IB'dp)(n7MD ,h!Fot =y辢{xWR#ӭՆ:8Wh7`8-2 W͈gHPba 獵茴 hPfee%ޡU5IJXؘʼ9~e`jBr!ʀ1)j]G+(rJgUK:(CWP5cY7ER,>B* M0,WigHD(@rV 6]њ=Pkko j;zNpCzQmlo8B4\Έ;.7;Z2vqΥ(\Qo Q m]-qo5Ҭt;d?Sᱶ4.նHnG#>q~tp`Ƶm[jZBv%r<뗗oX($7y&(}PEk܌ÀnRhxK$;Cj@TMQ A@ j ${ ! e :9\d\f{ 0!;]Wž9W8| &Yj&bބ s));H!g8 M-R3vz+_  ހ;eC@3h^h{)z6 9U6Cp 9DGg_HD<-l'D{Odzj7gnt9?NsJqͮ)2OԲrs[o/?q=#" & q4)2&0*//ݮ8z+#Gߜmd"kBOh"_Χ86n$@=iz}fsGߞ_^/ q ^gN=kCIW>oTݧ6Sm'hv m:)]m~hʪ'=2^|LO ִ舶a?{9/`_yH2y DI׶ݷ CVYUr"%Tv;]sq^vpXV=uӬaIa+a8,-8,a pX8,a pX8,a pX8,a pX8,a pX8,a pX^K }fo ^(r8,pX?˚&8,a pX8,a pX8,a pX8,a pX8,a pX8,a pXZ\-ᰘխa1x38,B<GaI_'a pX8,a pX8,a pX8,a pX8,a pX8,a pX88,%o \ఔEfpXk9=yJM0a pX8,a pX8,a pX8,a pX8,a pX8,a pX8cఆ"A`¿}ΥU Mg]iiz83Cp]š{>v4[c>??~#_7o.5vņ_dyb9 ,>}ni[wɟct^ejUT4pXm&jgw!,Zڹ"w_v>8 w>{[muooz>Wewg)??|S?G 9ZErpR~?V ,4lXl5]ȕxX>ҏ.I~ճ. 'W@jRB_ og?*f=a?/u7@^][O%*eByOk¿b, ǜL|Ǹwbhd+|S$2c3yT¾mX٭$Ѩf+QBI͜v7Vg} ˔o|77M=Ԝ"|̐Bq!rG Gt=SߢN 8K yuY0ff;2ЮДdXM[d9dڱRndWX! ,g+:})shм:BhD8-p_$٘5JjȨDʹ{yU3hq}YeFƇW3d<`Rr˦|Oᵮ Be 5Fh,Ny '2Ωl܇QxcigM4qc|v(52 J"y.SqsoUs,DNՌ)h0uǶvzTL+ݧ~lXO|b/x-`4v}66b;o\m#RgԄIare#~[C5aF2tQ֑O"暉5OW_=~ѣSԕeMHZ@F;(p>Uwwc !'*w/wȡ6uA\P+ ATAk$iKX̕fɨ~l5)zn`7:'N smnKe5E{0Za.(_?"y6FVR=x!4~zs|%4_?2yt1%*MÅ 5[pRbT C}&cEٽ;VPݴgtQ}eǝ!zNz>x޳em3F&R"VZ(r!Z&+܈ (X;̷VvqҲi(TAHȻO[sv:7%1ڈӐĩЬ|R(:їQ7g!s̈́Ïb3WM`?wi9-KTm*bR4|eFf W@'dzcWO©ϒ:Gxg#0C$R3)W`bׂ/AضNDN@u4EddAϛpF~2dۮD (RS#f KJ1o02Ҁi+m$bZ5|b%|دlƳ"IKp11tr, 7g[c[ sA[G7uH [*[!gey"fHi.8r%kzzaַ3&*Z5Chԫd@{CN q+zHɘ씺{_|G:<$+O]Bƴv딓%-Q[+lt*kF#M_!3zE Yg#h-燛b9C]ϾJ 5- m]\CSjouYq:!4g~ZJO6rSiE!Y<>"ڮA h8lĶ*zfYs#4 3IʮY(l|f6O qcߐʭp5i1= yzϕr3h!s܈ Z+ Ko"`R-ts#U\0$Ź!4=>K3.s ؙ{|{Ur]Xi_^%]KZ1bfm~*7>:R hhݭ W#jeч{!b3(5f[lwAqdeޖI\.4OOH\_|9hlN(@AnDQϛѾkgu\vcmgQtӴtz$5\ @_Gp^%NKޯԣ~˕uQշ1U申Rݥj ]aXUaX"]s)ڲ榕vG:~~ĕY}eˀ֤5G+`5*?o^ :܈vM@d)13TsGZ5B YFhBŠ1fh3Tg#D[cyl/W'~vTH`P~'﯅bbSl7Efk}%}7}ߧ"Aĩb%x^m|ӮIVT` %byl"Uē|΁wP/@\dĶegP++oK v07f=gőR\ܨp{n]8]TcV.։k_l5izu-Äg^_wO x:88Pk68m*0E1>UŔ+64۳ "ȏӁ5C|_x<r\nnMrRW;_rJ-x#xNO [ \ o-mכ{Y\ΐ&wsM'4Q6}hJ ?#SH:FP>h=4gxp4XkJf28YCzOH"ǤJɲZrev̾mxȘ ھfaQGґȸĉ€ڽF[22 P{Fǡz܇C/Tzqb(ﮃg *tow^(A$js)dS#۠T ĥTL0`ĥk"r~n H<ħhp`G0M"޸r[E!ͯvd76Y 1Sa7OIN$M7<6[;ZBR}z}*Bi@UPʆKV,>XkQU^kC(K <!aYh @^~͕iH;i,WiIZ~,+nW{gzhQWڢ  $@_DDBMVu8V nujl*`!뢱[@uG|w}J~ 5䥓Zkpq2/msxbw>9D'Rn`c35 J^X0d^^. 6樊<ӱ ?(rsTGNq ݑXZlBspx=Y~*t^0pryB/ *\YA՜`1c0O9 v YxR2s@1']HVRa44{AT-H˹@X񆷨D/l\>TsY5l/ !$C"W'.V)㲁C)m ;WP39AEQ1 ÚgkXQllOwEՠ9JӞZ6#\~MJJ.[Kޫ3]r!5j4͉" Hh ٶg'X{ML%]AKc4D`V;M%A]$v'w. M"oޜէK: >PjY[c ٷhBk $rQhtcZQYy])BFA)z~/)k9FۭGx^huٮf"dV@N qzD*a1TRtw)Su_42mML_c&Rh`@~g ?{h9F(쪔CW ER72~τb/40\{a?sqm8A8̪A׭4(Ӹso3c5n~"zۛ_JxpU+,ɧӇ/(tl˂ɲK{Ůbl?fGTB0=nWlKUP"wBJ;A1AMNcKx͙ݿGL- 95S D+7Au: $W]G^D:f﯀Йcjk,U-!+ʪR)ST ﺒ3T/]%+7V)K[BXMz E?6nMޡrooʚ̈tv()Hhվ3E n` LݹDGl`_, 2ρ2煏IGWKi KYvx6R[) N o>@[ҫnST )`"pʻMB(iSBSѐg45S4єܜXu9dny蔺K=;_ N!c@ r9Sވ鶢WHYY7z;{{Ե\芣J(@- Lк"@a`"t[?E'VL)Ê|quDO!T٠Uv+P*jpn!Fi'Qex]T=Gg`>IuzsSh-ox<,*N , gͿ(6x2e$ T?Oۏ*5@3T.xr0[3"apݒ"X^Q>Z +3L#D⧧/φ;iAQ4)m.CJ])@ pe1&Q\"wh cLGǏ$x׌SS׋MH}޹áz ͘G:[x,:G$<> Shh΅ ԈƻBVGZQ <9u u$tkUZn8q}*Ѣ@":3;0NS^c^f,W-&ou^ut 9d[f05`\.UcZc?Q?d\70zf^ŠZ !h!m9c:Ijggz(j yBu}=#jH<_S ~[a`iSMٿfjc}E5DƔ2)uؚJJuiѥc jچ\e@P6jXµg=Ox \_]BʳILP^HQó$6P.v4Wَ;V?1zQX0zǬzrRx'@'4x:'v:+l?1H-nQvJ^BXeV<\"#QLk3LQDZo3LӀe7$g(IwBK$a Zt̓ ;A>yIhQ `Pf+k5O)5,ٿt_KNXwU`ȟIap0*X+Ւ2oG=zQ zLBBM5AE=_( nב5|iݵ (ݵ_SBHNCqPd#hᆪ3s&3K(JN+]_m7opBhk 3D*KDtB4zrn_OݼkJv9ͷ[7ل 2k(4tr,O\Y \̌⽠ OEy(z%$6Ow*ӁaM@2uVs}L+6ɪB*I8=F^ nf&mʄYbrGgbk8vY)S iS"^fƏL_>GїPT ^Q۲J&nЫ z U"+ͱjg;Gg\Z t~.sڟĝԣտFK_ !TZPv%iPG "1&iGV7apL?Re50i*˲%fþIx(gYfgLpx%iQJ3d$a~d śh"GÇ{-q/DY ! JYW,lSsZ1èU~e懝J?w@,^8%@+Lsyͽ {}5%HkLs['IjB+ZI=z}~/DmJcD+KV׵@HO7i]ſE 9qcBH < B72?ԝ w^"ܔ%4%B^Ŧ$$E5JE-Vf RmD-еy>@|Z t?/H@5zA`)d$>2 74- 3.$\LG>p rɽ{y>!;ku.$TrJJWdm1imWdwW%,ˣP %e6DmE#ԯAפ(l_5oRD?*] Bd ]-}5AbIA2-p{#_>w !G(ta98~!;${#Uɜ !̫-;Y^&p]/Vz5d0EF 򂚸ہ*J00Ė„UN[eT-xyhіHTZC;Q˧zر%CՊWw!ĺT,+wᶅR\F 7BFs&}x~hLjo jNp㔃G4 tlC]l̡L7d[ ƽSW/R(mBQU]Ze7=omsGs¼ųy;[ɬյY30M8 Y>BHX vbl#lUfH%Ȣsq 1{6@jB*|1=f ɖOzlygk\uk^)e0=ٗsx1J3@mK#&;{c2:M0R&)u%J. gۉDk^NV;Qv!!L:Bwڠ#p\]!r[p^T` <BNNϥ4qj>͟&fb\lӴYtR;Po}dlz#JO'3i([d2&ޝy<h aX9u;;5IPQteہxQWty'TlM j.UͲǴ#ݖtcK.v 8M/`腐lmpZ v&B va?'hV`$V]N7ITO Úp뇩XԽNT]HujT-EG'L`z7H7Ut3+e8۠*V4ɯ橲%۶8͋KY`0si*q9iV?jW_>n>_kV6H%a-%/}]~;M7}iKְ!Ŭd?Y%.nzW鐓Ž"m?'uOMVV25W/Dz|]ӛ1/{NX`|*,,=#to\24-4-1^ek?H| U_)_R LRYv<|RMg画nv@GMJ9dž&Hz?\HZ)$A4ʕE>M5*ewbUXNkU~x6+|1Y Z;՘-[.X|d &$ XvLz X,:ڝ~+lj,"bڦlURcFDtAj;M1p?p"V%aiRcaeW,=|RX4gjZ/;8%ɘB񢀕(qR0'F4uZHQgZ t? hVq7xoHK9//S'ɱթZ#GUDjo4g+6)&,K/ (^E=HGoaPM&)+ZUb95ˮIv0РyB1*#[En _ł6{/ A)6VlfFǭSqKڴ(U1&Xa@2dFd ͣ!ect(msZ@ ;VFqG&mZ˯p2i8qA#25BL֜plsj=%uGpɣ8r%ܖ}:LjD`: j+-5ƵGlp{<="ۆҠ(}(rOmbwxWmO5"Qc`2VթEuV=V4'bGQA3uk63X&awl$+F) Pd9T9cj*} U^ =h(z&t{(<^%|D{5ˮgw<"ʎe4wV8-yfE fN=Ϸv>㬉\S-ꌀu^6 JX2ϊ$7"K Aݩe!eм=O%<ϚeW<|R=BSO5|r zO o5|~bTpXs3B(&o3rJDk%qnHZwGSҊR}O#\_U"-I@JD^-jb_{\**Xh24)xD /B4v}?o{'Lyhe?Vށ[ܸDMl!y1p"͋QXcAD:2ˮ^I'jutwn w\ղ5k66`*]7Im6Bgr\h|ǂ2ֽGcʊT\" ϜV A/ h>ZReiY56a󝊖w!zun:^p6ZދeB33<h#´Ge(arr|| X;UВ4Y#J@qkr,R̰D큛-L{='HhK:EOoZ΅3}'Dg qCEc[Ų+:>;t .R|aoZrFpꕛHg_.*7Z/b9 1!l(/FWlOjS1yD}SLNUm:p/k}°xJC0c3nfِ>,`T. o tӀ"/|3d=6۩:5>bOX7T^jUr&R"NB2&94Կb dVgeQ::BKqוy })%v)W-õKmĀ:2z3Q׶dvy>+w/K(K"*i1Qkl??`;u:i4"j1j10D n&2#ӫYv<-~,6^9{D͙O N'SK66]ݽT sı Tm3u")reP`Z-7EƐxZ6g;څ9uq>`J`l(]r/s6=|vx@Qu{kO7xZ74נˣhmlO nQu4QNJ|lҾ߰0Qz7j@jB)&gՓm[7]sv圼 e$IW/N<dz9nEtwcOGTE]jhm ^!@q'GTP`>^f-g3[vLuRmc]dAxϤ ,φO{;B+Ueh1n5NP6s-O_TˎP&̴|eqf8B-W㦼SӑlTb-ƀDuy[eJ"=V:z4]^4.CBR0p-L<X OjSF>N,vd] fN^ ??ۿϢ6iBF+m9AݗU.Jf yL4jm3Y+ɍ f=A`%EN>x=FH %o_Kd6h CZu l̆ÅіF%3)ӹ"7D>UH=FH&r._ j;Lާ-TE?ʽ]wU)T"Wta>1[7-  Jx–f{f&cov8P]dOSK/D":ٱD|=S= j]8rMn]Mzkǧʑ$wb5YUC|_d]bi_ۻM0hݷeuWus*Kl]2goMv 7-md=x\5?]nhk u9 cP]Fz=վv'|l/MbI|4!kS9vɔ=),;ryKv%W))t#K\}5l⅏83GflJVwh:Ӄ@B_:㙖ϥzbؖ 6TP `ju+7 QgQ"Xf-%@_k)Enn*U&s}ˮI kyc-ca+IL0>TZy( 3 5@ 4MGT\+]=}nNWo!~܈x&؂+ٺ˄iqD*@%^cR κ+PK  }FJMͱ}JF,@_7q.g&ùxuC_lViID.?}G߇8&bu!ߤCYŒewiX#}dRp6%9 Vr8TbAv1P$v0ǸWQ}ș)#HT<=TC"_s[4K ~317P@nRXV`7]!J##懴 c6-Xc+LŚ2jMⵡ6_2=9aa>YY|>s G0Ew9y;ԑSFvQ'ͯ[c?iwQ)p=}}QGN(JlMٳYs"-z5]-{R )*#1"*tqubpg>lFXm{fl[Wp뒄bZRmFKS ph%8!6[c)B@Śɇjt.n#-6f"s0Z@9R7@(K`c S6VsVl\z#y_=xC}Bs[=kLgL/?ZdO1ZĀ?ķn"7%}@]4Rn@w>j&?{<> ȗa/%>g/itp~}`Jݽt+y:'/V5]-{w;[mژȁCsj_XA.l Wl~g#h}u[ wܾc7׫[_!!>R!~a;-$;uG uBtiy_p^}%Jk:Io%@ Jѳw?cmϡkK(54`6bDvCkiHK4st̄:s?*uQ@fn-! ξO^۸d ሎu%z]jh 5?%wyl j#[@*t1)rv=so3 V]Q8=My%ѯ|99&fN2utE;EnI9CwԃySpcf N+gBX cGx˄H.!\&]Y:9mcr(l_z] G,jH6)f䭧*}^`fFrEbD`?KHZZEފͦUYH`8Cu% J yIzqm# r<糓m{<3kpO5G{*| `IГsWu C!%xi;̒kGi\#UȨTvY>^_PH) (UJB#iN}rsv%K08уIߺ2~@ 0ʸh+GC^3$AQ #N 5zvɗQqG&[ȩ1` aP%BR9MB)KLK#?_dd`zjmh,| WIVDA V6*Htڴ&a^ma Ι rXsG{ЫeiCS0yQ{ٴyR.nKWs)V1y8]S}$׌[6QOg_wхg= ͣdsWUϮ{ \6{i -%g VB!Oz CfڜG[L:{iR`;DEE{-l&oE~=iT#mʄ^LknE4>a0Kdi9!SEY C ?8|ja8KX!csߏ%&g>{#Tlύ eGSΠ$["%s 4hDh[}]BӡȤ-m*(ʢCr]r _x3DZP" T*fQ";Lm+nʿrEdqRZUv>zdIÀ]hم=I݂=<S Z:9Gh'.84I<> (9|Gtc/Tؚl4{Y 9.Ѣ TҀ5ЛZZbg#;"dKPfG8AIk97C=fw 6nN2tfH+3 PpBvkpC ,F.D^]-wܰ`97|xYx=1m+ .bZri}q\qa. 1UJ,o^yz@BKBbTK &?Zc< W"Aʾzk]fv͓V 'eT %=S6ps% !ŋd8̰}Z}7]4XI{Tλ`ﻥmȪ*uŴ$N+V2jF(EN.-r{_5.>Sԝ0UlxS4b^bBtU][V{c{fWf~T V9xf.jNLc_z. v0 %YɭVBXWT6th5t!eX'@*]8";a|oH o:4̃ z:T m`)#Aa)U۶ޢRA,_[|ܛQw~'0?WJޕ#E0އQ {.X`od,Xb rm3ʃ'oyZB29,mdDXe "6NJ,hw3_f:2<|;oTNݱw||C¸?VB_ysǛ7oNe8+?WW#-= ` %ѴfZ%-s__떗-$M>HpqKT~aߪ7i_FbW*|{bR~bnG\~ǰ_Υy[QL_pRyq \*㯐[8=uQʇ66˦WXj|6h?;N惚d _">JrE3>L!s2Ix#bL` 8چ, Bz.'䢓[4ݜG+'Zir-t}6 N))Q_&Kulـ愂ƠL:J.`&ĞY(*kQK|dFT|b3+;e˼G-ʱE79Og&DM[ݡs/JkիYKƯ_g#9RFőLxePs(ޝU8(J28Cxuِ*}0fL+Ȭ{^q^[3J^^`}T$~}s.ޜm|z?C7rox\ YpA,M3ı$OoNA.m/Q]ieyd S])ſ 5qKF'`uMo=GϒK e!e3c&M{%ŠW"^aL xpsP甿nΕmaѤ.a4wix>wP}jX);~VoKysmԽ><$hOCzd~3[C *PkSh@taD23VA2BV slk* IIs"j+So0*Z!>]ao uDL>! KL|'#3߇O-6l`KX 1'ןmD7O. ϿEuiBRAUue?ȅZG.s5<>(wlVBdkcI3 BR#diY?pC㻃),`ȏ.(РGֻhcZl񾐎 4 YYȋC!.rD1F0e)Q|*DwD݁ {; dž>JYv:u?:my.p0Q^jLxI>ߨ7HxBH@f c5TM=[jq6>,1uި€+/`CNyi]GnmFD Is0Vi;xL;@C-y'='n\?󊋂\ӫ"X5U/4pDF hz/6Hl'zuugl״5cG:7b]uiAG'_ʟZR4OZ \xFpkPJ4GO 01|tvqwVVZOL~6޿<`}ga =ռ4q \9:xU=OgjHsA:ѥZفX!!;kQ[( (+1?"O^ *W}mP2Ve% |ۀ6Kpʴ&Y%&|m0||I!ֆr0/sU:9y`6\ׂL}4fzו̕D=9T:tM̨M9Zɕe1ez%* q/,VRϱEaU~иGNA&jXy18T[\ bb'y\%p`KhhO\1i%voOU xH(eV<<0) 3eHoqiP2CҞF5%!=d*Bۄ5 {P3`m 9uɺGe5gFa΅nrQXo.Rrf(|u#\iZ"krr.k7'E݁]JSax~Їek%[=31j `ѧsvYɩf4GlxYI$91zָN W\JwΉ'@SZf ]y9ƃ d9N#(͒BCwK=dp#t,[XT[p"pE226NY ufTKhd - YRq!<w4lTY; pV*A'rlHv!@$႓' pJR*nVTSJfht0o3ZA&n TpJSw 8ȲN]uV``ս.yء7zRTq8"˪pI.q ?G=!,My-e2 "6%IyȚw!.pGHMH6t tHrUa띂>+EG9i㸜TJE\$b礊E9,J!t%j"zx$5 ¨$4s׵8+r-,4D#[9no%(+2E0)/({(2LFRtvIVfHɓ g<64sF)G0.Xue,oI!LqLRQi=J=\ AS.G56ɤ|Rnl {mSSS6Orh~^KD vצbtFg !5$`EK{<G46i/l$79D6y\ ګHB9"/L c) Av.PKûܧ:  pV{u!yJG1\ $A BHNpo\¤.hld#ʂtv;Ae@T)iIKfr[rv'M+xQgr/h5u_o(:t'tcPOC}~C>?8]C"2H&@?^O2撔X(@* qgy ucZ/q!@ 9rYt8eu>; gͽr0~0sԩndAxr-?@xQ!F™lĀK^QGU81ÅM(8!9є-R>ӨJSi]JFJSM5bER0ސA=A b+\o8H}9׶ؼrtZFIۏvJ(ͬgLl@VEV]d񑀍-luأEjtg MI.3YT-O9Fk3]APd ߦ6B DU9uXeOXE-OmtjSK~4Gskk6-1j**5Se#zDyfUX@sbƂk9^=}Y8BmV=QS- ? ˛_B73jgA>JR|{gWdR_<_=ϊ>@38=+իc;Ä* h CL HeB§ohϼFLǶۏ&mo6,ѥao } K, ~4U<Z(- ׼E4pz7 /ܴ\#t՛+g_Uzw[Ek|t)!ER. Ѐd*E @s]*q.ci=Ek ;Ott,#kbˁėKnP`QTɧS9Fܖt HҙVٻ6\Wn"`p0eE]mOY$_X,9_dWˊ== r%5, e΄8SGfd#a/~f(lt: ;j ($b`L1)B8 }+渮x́#e`]Og!g.e.|MEG RRX|k 55O,NgaʆiA&]-9(Tb5KYFeB+nxeGdKVQ? N AꜴ *&SR^=^7Ƿݲ@"waUym6=9R9bD&WE8$R4֞b\$IϷV-@%,P8#)k yIVH1q(cJX,޶8ևs+*ٸֿ<(L)^6:Zt*1tcQ!ddJ *L{tL b"gJadl5Yb }FYǨNgg;I;qLE~jswiS+(R=[E;}u 3àX/Iy({mk[9`pq2DoM>DNhN Q^X[meu&Ծʡ[zcW%q:xW?խS^/@(rbvR9bOw:"f;gEEP I:fqHmy,=B*< qyqz?W OToH#v9 x]`-Bͷ+~6s:;^~\C!b;{B .ÊD(@5X/Dɛs!F_"}V8@QkEDK Srƭ0+x$^z}}zѕZgFlq l|:@ofokkh$>l6wM8t%o1Oq:,2EA +d'RJ:[8ݤb 7BR93(3.,ZJEo)U_$(`#bA{}]w'ی+0@ySs; ^rkۑ%E,Qb(m>u E5qD6cEFR2HQB:CT2d +y^kaRUJt!cWntq;\>4" _T7x gۂ S']é䜨 %_) bY C>k1zgx:(x 겑ŌR\$IR25#AC&*,s0>7ׇ+0wxjjDC9X~j]o;FD" kGo|0"蘅iIK d?6y-%Ow }KTNc2 X4:J"#Uj;L5BHr2;X5`ʝ.wP#d9dq^)-lL.E(1!&`%崬1RdT>/-+ƪT u|Ҩ); aşlHg!27+RPV_[;*X5cڜaMrbv HiuLcD:;0VIG>1nD@B@ݖ/aghur’ږdwsUcYC)-]|ISŬJ6uzKz7v}[ygo/?X/;Nwd9⼊xk CfJS_qRk_ȜߧW)2u{?~Rɷ ca4=r`J |m߬7kq sh `J5ƂJԛl2~`e?,JT.JA%&c~p&AJm!d#jg}iL 2 0%X&V4I(+15ĈB%ZX7Ev ;io|vn7BAD88a]̰<`UF%}: E )hGl*WgPIH#y%I ksZ9i,ԪR2\)c5h$r!a{)] ta\Mr'kʠfF\1$0; Vjhp\ф4pcg@,Q j2)FMnLhqM8/` FKjL+**Tmt.o,)b K(sl`/̸U^ؒwo܇7v +۰2dd96iӐB*SY-R'a2-_&Ӳc=kj?zh0홆pfޜ')1AӧG&ov趔&lB?]McJFbd]"}2u)Vgc"~QHo+Ra ,KՄL?^䮫2Y|8uv=W|]sv͠_wVok}߭tz.'ksd|?sq9.V|5Yx/5vRWMޣrA:@i1&-;n^>[!F|~.|7z=>yk!5dMV6D2Jݷ[ƳZ$B/֫o]tg VݾD+Y+KpԽ{o.j>aC!"_S!H(=5rMuV$b%o}diY`Zꍕlg:M6qE"Kt}1Zϑ?٣mLpdظ)8ČTNvQL6VL~<gn6TxYHb_aRB _ا0Ogwb.]9n?53?ia֥ĘE=ZtG'ܡ@z)%tOMwQTx?.kG~1NbL=:ҨSC"R]'?\:(pv~9V$|/2 dºcs"EjKȻB jdmWaԞCkLb-\Y;lٰmjPYd[TDU&~]lzLtyچ^*PDcSԂU"$ekBU(s>!$if]bh RCV0:2Ib{ I3)KyYČҮn1Q0EޙU[˘&U{\-lNP)j51\3]Fe֟玈NcW'uI2Q{$ U6[qΠ $lGPc`4(]RN8oi%@8Ĉ0",$Et)Hs#@ڇq]ftq_]DОw}Y YNIXF"Wvo.?#<fjHYQRa}+"CUhW֧)T"GQo)̈Cz-Fd#!hجdIDOfw򄺘3F>ڋ.~7鋌ǫ-XɦV͗Rm("X q]bjsP~4im#fQkT+Ïy;qk[fQBVȔ#:D6jH@5gQH?Y㛻7_!./NN츷շ;{~0d`k\Zs!a0;i{2tKv+GWLq-J 8JX VjȐwލ7hyj6fa&![sAkQDLiѽpĜ@ʒ v"c@YaԘ:b׾eOVQV %5!036.~j5n ߩĶ犾ZU X%T 萭ì#޶#B֞Z>6*9R,]rih;؆`ۨbNq,M [g x[K'wۜmDJ{/QTcLUEV]crqȯm[o(: &>n;/GnZmC:U[y/\Pe;#ޑqcONg%NCmYvJ&NoCL.IP0 esfxLTmڍnF~O=_I"GA,CiJ~ocgD&9] o#9r+En|ɢE`5|mcl+y)dHlX=ȖZl6U*KEMutZwg)%J^0Vc_c_\mٸJZT.FYȠ@:ڶ Y\A)aEdq J"$H.IibA#xv JZLT;2{SoyL_{ Xzw!>dQf=:ykILFM%8jB=Qcck![Sᒲ%N-\Z([&쓅8Y=7Z=7->{ 'QC<eH7`>If)5JJ%TͱzHrUdefTA)ko|J(;)-6C v#( $MHQ 㣕&C2NP`d\b~U>^TRIjR$AA)tѲPbܓ7` y3zcVVǚ+9dْ+}H^ R*V_t2T /W_WJ$K<hN.DI]T%y,Ih<\(4Hɉb 0"hj]nY%jŖx[k^uv,s@!VO;mywF|u`t{`# 7^wL jcIkiI@ 0m&V?~ǔ>4tn/JxLlTz#k=.d*ZBOD RI>>[ h-YZKʺ,j(1ZdQ:"+i L*ɧB҈!LRř,K,Ts#LGsi_,2[!1%0I$򄚔D)P1"Et8YR&tD#Etamt> Jzg+Hb,V`5݉Ep[4E:A;6sH1&2U+2 G<|"*I(EHYÀ~1Gosx⡌a˵R[{V ]XM:UU`V hJr͔NK;=9U^ᯣp7N>_kVs)׬bN9^ֳ[MTBvw |]$,hz)QJx{^_}|/LIg,`{qqgpv?l:m>`z}Y/K-fǓ<'(Y# [`.UK&\mzbHEu{D0_4~V{Z@2rZ8OWX'os w={I/y#yp+5'-SjTVj;{Gy z/LtO?M/Xѩo]@50c YS$B뢣Vt<['9^[P<3q<}u9'+:88oDE54wGuLX{sɵM"6.H-9:? *3~"=f*PA{z_ |2e?l:_oOx.l\7duV.r{t0uop_u#mǥ4LL|nYvۺz.#@ZhwkmM0=x|(ect)t!(v@>$P`%κ0R^dk2v^s^Ù2 J>j,Q'Q8{`эK(2SڌɊ)@>\K.>c0Əo,y]wsxS?gʛΔFT H5D+P2[W&XYPLA瑜6xjqu YRPDehT>6D QRKнI'hC| $#h/ ċXCieeSbj}tZZ Ho^Bsn"dV)jQXc5 RejW Q1@O$Ih@I7ْ h єi ';"'Z:+BMEoMI60=PAIUgAzx@:J (^i]A-[FyY( Da [%,?.cܒ'X'ff#fa튨(rxjQe(w"d B Dc>\Q}+E5pθ}&*6tJ$"W﬜drbdozuB2נH>([ġᙘHbR}Hګ L.;|߉{N^ku1dEgW 01AzQJD/"}+z{o{%}ߴywu\޴3?|;6__m5CBxnq3=%:od}D:onwKmn~]jO޽k^G(5.rӓt;ɼNx;Oq~S{N/3Ħ㏓E{svm}g<`}Ꝿ7wY8P7ems~y?z|9!"KuUR)}\\ҴZ[kr¿fsbڌ 8}hjkլŘ{js+ӽh/5&8J,.Vģ){R9W̠Uʯx]^//<3%~a<;wd7t|U|m͏sk +=.9To/,Xֱ߽#^ _Q Bj0vNg_{ONG ӓBs:f2>= 0r  63-sތ97? n" 2ЬVQ2Xʼ*ZdKE' j ĜN:fXr2dTm+ \/of9 ҂ ԃ$0[SZWU}`0mN`439hB MJzBTނHs1rj{ypMjHgt F%$iDtB.d'QB% ^}VC# g dC. l*J@Z}'fYvg޺$ԞOƟ>wq!(h^8e EZ*cY."()&TrflD}DUI}Vc%%PYDR1T66 AFIC I&J2wFv[ְtVElduA媊 3zB~A21Ң=26r)F!j޳ D㕊8,G308 '%aHl!()R1bS- XL`DQ6J&fJdbP/,t|tdٵH{D4Zhg J%x` "G&*;0XzD BSJDAeRkubƄ=n84퀥'`)9*zv2TEҕŰ^.)D09|:/jKoG.?L]鲚8_h|g<2ލQWIwg&Fـkۖ={~^j66jq^jQk'h׵񯌢&պ&cFs$5_gJȑ_'%b8'G=_сL$$ NU IE|zKD~Kz$iB@88|>|wm:bK|棺-%%;9ʱ'Gf=(rGqJϷ`릇>~z3\Y;OWFvu pۣ1\ ,R[99)*/|z2s{tR T~[׬|ug,mn3y* pt|/Qs~+9X뙏]WjzעNBqkE`!n OHΊИ &Γ HD~ĴfꃢST`62둘zjQ2džSX=,B)x=bX$llaz%'J@JW{ޞ ~m_}g&L ed3dW:_6* T뀔PBjNQrnR־,))`dț(6Cβ)花وZg őI$*z5Gar5-?\~]6XoNۍ͂262M`ܳv ⃽SoS8"絅%$; )y]9`,mAK۠mntYņG.AE]Nn7/Xq2!qSnX>V=@qZ`;k![o1xVoxq %&Q頁4-@bup[K.F|nEe$?C6x:5s#,U tt y<ڇ)1[#jLIe>g)6p*?R^@W;f6"*I9lP%gj >n9B\ [B=?If I=h=rb\N) JHŬkhX)l.rZh$Dlfxlwml3hzgS+a>[Z0e T0T5ށ]߉7oz>GJ7Җ`,q} |9̵'yW4o䬠V9W9vvk~Kӓ[gAlwgOtF^}G~_]:n[bh~6%.썓dNW^یoTxߞpV#:B9;lR)*cIn;ËVF_\Eco!xR<|/,p7J1Z'[vl)ң^L5m5\,4L^~SwbdZ\0O b}N>Ylڊ3)8Zctp"ixWY g oO)TGn@AqPQ&&zyZnIz){72$<, /[(5U0>뱆!ZBEbQ4T4J6R*cŻE~Pl/v4¤7o79= $VaLW^_IJ'>{4s\:픭 JNa:{? y"_Z a ֠g#A(j p,݀4KIl"j?(jrFC NQ8k:;5܏K56Is`6umRȣJO,!dtd36Lk_BNQ Es9g\Q,8fxNulq#͟o4wB9+Mo5&f;e0oG,짛e?%RSQ8P,+Ơؠ&㓍1pHP M{6ۆƴw6_7&'!VGkﲃѦR3 1C'aG^ Av!ABzavOOnYI- ~~sƮ8F{v]u *&+G/41:_ffE x{vP#fMFRsHVahńFF|bC"9eAZM?o)h/f W}< V[KX'\i_Ft3+ w$-!olYHxi$4^ F̂*6* 4> uՅu% αn~P]V`TTfjs[\ x{v#z^ uü&ܔ,>s=nҭHs5U/ hv삲mS 8;X*.9;SLp8lڣԺY$K9,>=uJ[מy(D䧸Ϡ !o';ՏWRWN3~?tTj3eb[ fUyj{3\㋦\3QGf]hV|QddoK66hbK)=38 aq,bŠb-ACc3,Q G-rNm{nZ.) 灩9wny&B *1v%ג>!۔n#)nqt)ʰܯ|}r<=q0)y\wfqpAAk*ej)dJUG[GcU"êhR :XpѐАW㩚5/ZV6G%pfQ] v;gw#ϋd\24xSR#%=,Lo tF/S^,@仃 R)@M@>}j.S*$Ǟ|ӂfS,Y{Dėe`L qUaPQ#M Z5 p@6*o:ـJ!zXJi/*hSh /<"הDVʼn)Pm)f|{-^Jf jb"F-Tcmq UWx$.Xs^aK! ƓpÝUū #wOB)UH8SjUkB>7B Zޤ_&ٯ;'"'&IHr|m8D+׆>Rq.H QYƴG{!{_~ Rn!R|ي(P Td $/Yo#G ѯȌl1 y1ٗ6<[%J-Jjw,R%JUMV#/꼷3Xo /zޡ]ӱF)j7+˼L]w/,e ٧!**DLjCRy{´Xܻx*d=.E`VR-֔Re>~l1ZԵƢb}㞔^%k)c7AzDu+I?aY dQ@A( ֐Y#ii<&R Cf%֦ɢ+Zr9J: AfeԔR(]+mL;rG ́6rɋd 7搰;Zs$v6q/]_L\Xm|/˄?C/w,w'ݤSq^E<_?*>Ɛ(,.>~2+i| y&4q>rRqns&l6Ǣ5}/`)J6ee_7ӷ:s./IZ$AZ#ͅt2ww3ԇy£E'de"_TnKDpW3J)9J<()*I  Ektn[+@?f@^Fzp'|Z2`ڦ]LQ<A_[r ݙ_orbuĺ+GD>S “G08EZ ^'Cz}5LFẴ"[ Y%@:ZT%F3FCͶ(\'_8BJgKݱ|?{;#Nq=ne:f)nMes#_lXic`E‚lp0W0}^ǝdhjі|T0s{enĺh}2sJ 9QNf4b[E칝%/?| !a(eqPNI{切e- 8:/)M}*޺zr>F:H 0IlE{U5(LT]u*]U@I4lu?fH"fj[(Z{2Z%TYh;p9|9iϪ|sFvCzY˲+hp/3|Ϛw>zFQӧNJ*x2Q ɉd8gdjX-? T[>,MTSѴAh*- J-I5"OC,M=%y׶mFyn`{t%\_61,Lstfq_*ݷqݷNE3Wח\['_56Ieݙ{[6:ܞ2;Jӓc>qOVח˦^\߰r_NF]""*/ .0%g_TUIaj5T{k/OLPΧsЫNn0ܦsV|~rp "&p1[evS/G{mV6ɪk:Jq:_cA7'w9:$3sHD+I6.>D۶KrztZbY"?ZEcAlzeA]^쮃gw/LlU*D8;Vz3a5@,ZjMlt3N@@-XUmY$GγO1Zfol[q:yT[za׽m[ <@^6WHln@dir+Iaρ)AԜS,mYteVC316(O§qvT3T&G盫~hG{ҥ.,5̂jH%i-H,&y*o=sg_`j&$2xli*7譌 䞥8zuhF\iY2++qsA\]՗^JE~&ID$i"2Nc0=knVM j7T~YugTrBN[n:]OOl>XxKh=py-5T)[NA65ޏ sB^_,3nsba{ KI\cAXEEʾzj ]S Hfj%F[adnX~r 1"d-U{Ω*kWowqC1pyv-&:eq:#x0iݗ tRhx@uTdz5\|;uQxarvxYDq<.5?|ƛ?;$-Uqm#aku仰3B.WrD `"j+AK7~ Q{xd L* !6Yi+)(!0&"Q{H$Dv"_ ׬<[_[_\KfA*}T( F%b>#|2@dX/ ء}6=:yuU*U9a!Rsb}d^umsYeGы[.xsmcUE( +"rȩ$O͊6Ȏ;my*ilJ"I1H. [BI[C!gZц@we{b DWZ| Γɔuq[SFunK1ZT '9S K+65U,V(.$y 1Touۘ- 1~{D?{cߣ3)'I<'}2~=8aV̺׳ˣeu3dq9Gbٽ ^u.O!HҶ+TBP2V79kzv~sZx?ֵ'2Y8Y i8h^̓':>wGDk|LyMEmmԀ f_e@v߽Y75  *JJx6@k/8ks}LSn8I+66 ]^c@ H7٣yҀtB|6 _c _JbJQ~~6@3mB˒TA'ÈiSc~i{wc5!e3J$hSjttPj$OTsj*@%{Rvx"6;#?,AƅnnR^_Dçͺ`lńe=4oު|yiʧ]E]Q9kf&1nؚe +ᨥA 2$kc&5-C)BFy.)+)|WkT5<߷Vtt;rF vR!a33))gU|d(HVi]aDrzCl5ŬȸҊ=r6H;{vx{x O(h ]~Ӯw.Wx6H"9A%5yB>})-(Dka"P. *rD51ŤG .h($=yBZ~5̎P9IY.ܝ\\*Sr)|EG)H)@&'BuAHTSPxBQ?GGd:2hkɹFI^mHFw$tQbˠGJVQ7Yn N(AKA4 JdHj&2QQV/s&i)OIzhU盉Zџȟz]]or+zLKEɢ $HyKp5dh,i5_4}X-42 ifz"a`|6jq^@lcoZb\s7>:!ʳo}J/T/tn3m5+8uѿ5m2I}-lvs[K]ֿkL.muJhQ*DJ4p CԔjRyqn&5EԆI!Cs>sAJFWLlLS D] bɵ2f5sJ5W#c>o/~`\l>.מXvQ?jx {e ~3QvPUPwœ]j->NQTjh#)9h>|{3ݍ5;^ķ5'I9/lx|6n2K3sd]k?koO_oiN94-76 <5F4k@pQ>qE\V|~x1Wo< ްGڸּD1ۢ?Ul/l+JBKH#QYNY|$̮XŖGc;6ݚ)eQ:{M5j^`!>WzuԵgtzw.έoV)="K_n>M~{edO_U4 b{pVlBs " Ƀ%_Xr!>Z OVkRaH#'$6IתFF]d5p=ceaVvk[5J&>B:1$6jCP3a:7ގ{s3(( sI]>(g|)a~m '@6%YDZmHdJUZ'2#@6$<_欃YkSc C5}ҁli.aFΥL̦DS}+ipЌ||i,xzҹ_]?2Er-&OC-_`xq/ >ozcjƞUP{:.c&K.mXlI(sns/ lpŝfиPc;P`31>]Rs&h]+QFa nǦMqLY*Ҽ}W76☰Eo@oQGۼV_jU'#2`.D[raS96 ull27j,[,΢gqq֫ >*!J-*; Eb:Z۹Ɂ.=?OA&ܧ/7[l [d=-z #׀99MAo0p7C5Ȩy"|}}[s~;Ģo>^kp1SO}԰BEKa;ϱڂ"ggE'R3"<5[wx{7#-!"#lggԤ@Z&Z\aOۀt[5zTٝr Kl]E(Ģв:ʃ%IbE2дܥ ?4 5.9\sy@[[0d3C#<@g:ZԪzqPZдU􇟦`gLF@zeNj6h *i!ϖ _q89褴ѡ"lWhG| Vю lb֣'g#$݅! 9q9`=ށk[O^0Q S%%kcu (yu;=Ŭnf[9Y^2oJ0D& _//S C-Bx7K˦z_ wJ.pI)1SbLkk 0b2D;DbszEOfg1]3 X]bX:VՈ X^R✍r u;1ّ{@kj#/xS.zA5%Jsds1ݩ[53B5UM>|rr#*5l'Wzp2^S Sψ}<$ ݑW .g3w0aNo0!o0 ۞%9g_H$}g<4A'%@Ql 1>W~*atvo{uDql qxik+|j$u)O!MU5luFOKFy#bQD9X*S/f{gC#[@Mr[YC\b@W)s%תO& M'y˷PTuA#jgg%g>g* |1ߟl`lG ^\lF2F]n41%i1&&|x'{Dž6K>ы||TWd6.$kkܩ̠`r.^[ `I8i@16r)a\ZgCLcfl1@penkJՀ% cR*l5B(P!y[z BM%ϕ'Ν*\*P6gw]X^s ZkdX[2ŷc~]vnWEqv>6SdŹV4|810lWaWgc%k\3sI+ S.Q,Ԑ%2+E@`1"GvhHb9=9;m]e2*q*=\O=#w_%CG8[" b&x&Yrz&WL\ FrFl0jzXc3ۏ¿9n'-!v=-+%ظbZZs5 eDbê($)G>E NȖ %ݨn5V#[D4HScjZ^2w`9c&*{lڐW#fIHWo#EAKXTj[%" ,\ e)BÑ*nvbR;BU<C\l-Z=Z깧^P?pC_|p+F*RZ(lFVA*9 3IF Eg~7*o՞)ytJc.Fpsڀjp!<ˠvYO{Hax /DCnBY~8U>O}gNOƙ{u%uo o`եMTW^9n{7O$Tџ(@Z:>>x!a'ue?, D2 [7HަڊOp1׈T=|H=.g޿ e4iaw{?~֬H=$ỈtD|ӓ螕_{q=ϐt[{<룻a{ʛ{Q lSlu@B>գosu:(.sgy˚)zU|*u"XXnOןT}5+SĎ^kx^z,v%epSLXS&4__/n+!`R֩&f٨}I5-lv *ZwD KR&I6ƒ{T͕D:8fnؾvOFG4`5~CCiñv!/.[&j| X'@!8 T) ՙ\wVR{F!Bn5. ]o04ՇJ6b`9 R gsE@w;vf{ňoMK?dK9dI\c7_oS(X|RvgQa9F,2)qgj{ɍ_iẇC;ldw Eۻd{&A[-k$_/Ѵ%6!YO:oKmӐЍ1N[_bQ3$i h,%֛LBi&bt1D() Ξ-c.[,E-ɬ730gb55< 2w7\IPLWc[OZl#Ҳ˘LFh6!.(k:Zִ,wi3cn=ȿ l\0K◍ ws<<}?jm#Ri߯* ;PRtxaufI)Y?ŋϾ-C)",ռ f̥%KTH|g %e#0Et9A1uՍR_H͇KݍjWc6-o!JYfgnSp>~(ZiRmje47AbSjPTjΉ>c} v?]1dZr0ahA'4P\g(3yu6[<~ڗj?!P)Ve#ZSR m!Pg4}c2# Wq]Ȭ{'.xx8 §Z2NfFj6m-#_`KG SԷyO:^tz4\9Oncvn>|ӂ3'(cwOt9ҿ*b^srjUOp?W^4/ժe{"ױgf~3k~U7a;3g֗\:ENVݎ69| .iN[WVj/}ukg{=&2JS̵sU(fEV[ٵ`y"gtöݽĞOr7 n~ҴOzf{|wIn΃Wq9[-J9|YD*:JJtR1#. -GmR(r2Y!Z *^H.E]j Ѻw޺j._PUUsFdxTdgFJ-,a멱&=sMBzCЋJ(o2kUx<%fNH/Yd)KD%d7+a*}WN91KZԒD!klrV ޺sK$uKBWhDG!`v6,4|W  I$%H)@JK?B<ʈJMtؠ0޻R,ND1 d/*C@oW^5>3\w+OJ<"M!*i| clBa>ogc8D;=\I (E)B r* t~ҼZ>Ȓ Zd٠'qz0GgcI/evyvuV^mz^()+F?̅-l=dL˅x uk6%-R?i0f=ʶ7XoB!bkBB^֠299Ϋ\@JF0@P[o^_f[Ҥ7?oAx\Jx`v>/\$kE-Nq&!G|pmZw&fɯ&!}=@2P+Eu)FA肆%5xj@ )@2hBm]{}y);F|+>4gॿ-ڱПy"t.bzq k]xv4mk«l WkdQg ^ӑӦ{6*a:ݟߘ}G$f߮ 񻙴6?kM:m}55q@޵ٿ"Q5`gl 觥Dɢ$OSMR$-ɾ,2KoΩ:?;@;K'Hޢ4| E%6@c\qΏs֏oY[@5Z\h52V !kY"($+ Y]M0&TC,Q`x`5~aU#ty%] P7Y\I,S//AlmQ(rR%%( 45'MTՌxe~\p1<)WV`@ e5]9H1 v_vߡErTemrʇL1dtDZ& R620T Af ,Cטc߆o%~R9!6ݦgexcrti܅X|NzR|LUsEVۏlOo74JBRI_5[%0cPUQvf[\ y*XlJI=X|V3gOA<@k#orq4 /~_ :IɦK?BIrt1)ȬMMsiVط؁.phN߆wP$jcv 5^?"#& 8a#YJwh]|="3죸s/PO?n([Ϣ3N;vַCK,KAxyX,Ѓb&K8Y=9˫I[֬]yZо|5Ϝ?A~teCZ{/L 'c,(Wtp<=lΓg}.sFM2Kmx]^rB&#}\/iNk?rj`{ P,e>i)sToz*.\|HO+z9Z ͟d:gG?9غ)Qexmc,z^\-ousYLX*O U+? FHTBN"iLP0ǹATl>WYmע|w/L.B IЗt{'7V7p)"zqs} 6EE].3*>;9rFх-E$z!%Th ]\ {4wfK:*,آlY(Ei.ښ#á)UA)  Rkœ,5ǫjK~hm̗o!ph&'coٰcsu!az=[KCr#$N1 b4TR];X?ln=j Rjۣ{UJH@TRS X2tjZ4 h[5rHGٮ;uVȗ*\G'.R$ί]!K'_,Ό,~cd<S$Ҭ(dVFi)':xTj_YZĭK'YNL k9-3=ke۝i"Ķ5QYe$g'/ѧ촷 Wm K Qy.- aآ\=Ρ攖bn%{UY0ɡBRQOkyuo=8WiWazҧ'~wk..j=RZNg'?x&g' >TLQTxA ;{r$EҊ n۞J\q#h3WpM"DAUjWS4ihog 9¸6%z- 6VЭ2f%mЕc0{vsO@D`"Xk_1vOkFc;VѯebCv(jφ?v|̓1]UcFSTkG5 ,\{-UVIV$U,Jz)%84I]8XoS+d )d{` XKh:^z6Щ;o3;KV@ q~YB1*9dQ!c;FKKn_"!EYP mFDrk$%UTHtk[1zr!6-ʳ@5#jMi)LڊP Ԓ(jA;C.Qmߎe+On$ AȠ2c1+h-*qtI?rmܜcUU]UĒȄ HA52*xN  ۔XҦ%n8.96BXD 8KseQb* @זec$cu ʭ K\U1/eP5XC-{ ldaƩ?ԃ+Ѓfl!`TE`dg ڴ%erTeG+ 0_DO 1x[ / l#lPαO*TT_gUjL~]DY1%. #S ҡA'~ s􌤪YFmd8gaJYg#Bd)t_V\|zϻ5԰!u aNrat#|1#uXg~ (|yq:] ~|!j[SŠ5. B.UHYI'4UcXǖSp3Z-H}nV@AC)J\ȩhTc j;s9NiDw8@.g`yF9ZI `Cߔϩ"71*)],rA,q;8q.&5|t,vV]2OVzx-&5chd 9u A:1"ܲ+[l9PpM=JwH˔+nnviH|}]!gr!o5|Q|$"v'4@yS46 6 }^G3d^vj/䚲clX E*UU)XBH [P8܈q>NV9ˇ<*jSublUjiKH WmUF"o8޸[m7gnSJ֖*tr TK<{ϿIl-u\t~^T>? H/{WQoz)}M*<"nIW |sq]T?9uޞy^IE槛f@*J7'WO"//8YTAix{j;Ѡ=f%#.lzv#3_;jyG},5GeQ3 7DWs<24s0<娗1vlM褳ED3 t.(eͤQ_yI\;zhr}<w|?]#Ky+2,{B%\4HL1:uT '?aϟBS^38G5.-9@vs|%Z|ʿ֞/w!1oܳᅬ{27ӓZM1CT*TֿLowKkw_o*hاirVɃ:tyes<>'{3$K=GHvi_g}_̚c:?!L>=i5OjOʇN}aT8B-]-uf>1B y"'4;uy.qBX,v:TRw{6)#.noߍѧ՞Q& sq@Ɯ @ׅ$8_B`=dL3Sߝ &r_N$:1uEz:B[>YP QK/ }x!_P\!X)䯌 lU$m F1TzoE,g Cv7)Fi7'3SDDlr[lh囼Z_IyTlέ?$[S&}H4q$~3{ʩy"}iqܗ'Es&R pnNO܇3d%o+3&|Сx#ȄmE+JK<ǟ3f>{:!OU yZ#W oxXe1B:P1!dl9gq<f٨bS4q@Zq!6o3q\"Eg*NXX[Uq2uıۘonvf07.cI9X/CyJxԴ5;i :61}alPYVW:7Vq,9C^y*~;[-ReA/2τ>18̓NtaZ CVWBXτWK!\}Ϸa5)+<ʹA"e0w[<`3=SZQ+-gƀxd́k: iܧb'e[-ֻU!`Č\0bʮWvRZ* f567W^׆%G{oM qC85ϩ1tM-rL_!;10Z(pl-%TLScjfd-*1d`c=`":MX1Egxesm>"(X!uN}G"kT,! ȤF=svCt5Y5TZ아mJr/5G=`iVbUgGg;%\B <: >{џ,L-L zDp]5.`2?9rR-uRsJ&W]UBo1z?Xd;16/1R#R5=IQ%Yjg D6n{Nu;@hxp bm?( ?hŃl_+׿_l<p=e~h^Ey}m{ r+7>.hh{qڎv1=95/7! ^X 39}L{J(7z.Ջ˨?YZms|w|'G{x˽[Fs(B5 :fVx2{X.@]=[yg?^|O xߐsl?"tVw]Bpe'%+<3v fm = 58/U#k,-/胳js8ŏCC$IX.Yi*]"h2Z~ O&ztss8xg[$e35kG?`:y$hUd9`*ʛ;Z|nUf`z/k{Wͺw`^zA+1r0?5'"H6u95OŜlx?6Xt [+˒(vJ蠥ѥ=eeKQwm?CR+z}E{SYˇDbUphS׾l [ҵDU›RUhLt$SZUCQ^d>7)\JЉT2NJ(B1vҍvTRχumcU%TIS1s"Rk6EFڛ*ߡiE3٘b }ޚE!ܻ"DvQ< {t kFR~Ҫ)$O"x8?a:JxAoPZ1="*@rڢib/MbdVD]bLK:.ո,fG0Va0ⴋhA}thh =9jw 2i *<l=OziN_ЍZAI;!rG01JQV1h;Pk1Dq aD"+&-s1@oB[Ռp̤;ǪZrbd4%CJِP]R7KvA"6(A|`aM+hYQ.Zꠗ@/ `'Rh)6g: +Ȏf\ St~&hߨoH}o#nWzm 8779sQqs[w57m}%d\AQ, Oâp~,>,6o݇ $E>ߐ -؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb֯r><,|Dâp|4>,D75}XEm/b؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}X Z>|D>, 8GB^âh}X%?7A}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a>,㍰ɇe}Dâp}4>,P ?<ߤ /a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>,a}Xb؇>c]+L aog V7;ìW[p@2 ~Fqj<S3 ,*/s[bt&a0OSif2NFg=秳;Mm|DV/͞.ޱSwWNbI=`amҪZjF`֧ԝ٪Wcs~|Wi*qM9p\}Xv˼ujWn][`J:\VxָlzOߕ-|:Ľ\@\ezg߯ P1YErv}gRnCٹy?{Wײc[ARq#Gc`o%vatd9a*cr/f-]$*[C]/壥b!7T\?WA4zbwOޡo)Ts.kP>S".tB$at[EʎzdB؋Iolֳ#7c[WK3ߍF؃GP]';K{◫ ;^T[,J.nTO?^\y;_B=15щ o|,;2/+쀱v1É)Tvs;TS]p@5ךEFctVd 7)<}N=1}-D.9"Hg'^#WC!~/W?d r귓G?|#p˲'FnV+\ϲKDvka\@RήׯOٟ~؛0%8OgΟJXnogCVgןgqQVoM wV_qݯ-ɯ /Ɣtb/yUbnjPc-&H2-h#Mۇah띻BeW\SQ:wcc;y_pʻM ,f[u)ݲY,M19K#V_zQ/rjG ֔SX7d{Hi&cfuIA<6ft+W`DfՑ {"0cB4"Ny7(ݡ;5G@jY+Dt P%GG^gUm̾K-lmZ)KO~AJ*̎VB(GTDY3li~qf`PbN)ljQf&[E;f0# >5ըTЧD诫M *Z̝%g61ѐtEY{p UqkƗk4-MJȹ'HʳsF[h+N7arü%US:QhgSTjL1WO,.W)Na vѭʉ#g[a&B$4Hg al!9!:%FGkA>ڃ{%p{v:BFWڠYg,0[B" x͡kRml\ow*ɬotrޟZ:rT4)Jh9$mQA,etlSzx^^(Of&誤o:pPyWE辘cW7ޞI1΅1ց/?1Yߟ>~V{**5! z*4-tQt,4Ta/ʝ{KMױrR*iseRRڀ2֬s֌u+!4nVx'x+&ѧ,*shk؛F]`8*u[!8-h8̏ $/Γq!ހ]c8 2*X63՗.eZơno P7S()*tF4̀I(y@=\sZu (;:QOSDM`_%r^@kEyUcdbY:~K1Fpÿx<{.J885qog/<9DgpZ{=1pW<~gg_e'$KDoV>{rbd˳ EՕZxIctD{.mFB/lO1 v[uIrOb݋t =¡VyEmБ7>ʎ!MͲVwL7wzvN kTvo:Y45`[®k m z-|H%˒ի|`0_$ƙ8[#~ғֱ47YZA.2^ut*oo1N۷`˙H$ ά$ѴaQbW-4hT@\'v*asC헹-d] g?͏,`pat{K ʼXؓdXMz&zwzZoΈMSFCȃ'{khQ, s|ugiEc"j0 5ilJ` X{T!.7 9c^nz jm%I*TC.݄sC6-y*~gQSul%" W2AG{tBYRNz̋ϥCbAhS "1% ksjߘj2e[-(SXW g cԹAzJFwxI3 X/)GY Φ+A?=Rzv;ןqOu?ן\_)A3|vzPj9Xz 7;2exsaxoh^{P],Qf_iσRPV[ )ei7Q$an'@A8'#+^+hZ"d`ʗ{ grz[WJs׻!\)^Z+YxZ!vL#~ {=Nr.s跧- R`Bt~ 7=`6JLx+J+X)zHi& GZ7pI{ &w!JkfΑQӕ#u_V_rC$]oZ6;9D[%쯽TP#\㟢aD< ]:9)V :whlh(Pi0ve5N=GMt # B<)*5DefjXI4'bNW3dَFOе_eڊVYr9j {ʩdcR]8o $@n{eLY VHIwt7}qgWCY ^Hr-;9O'PIwDE7a|[Bd i-^G07@//ne րJUk3+dJaedL/+0R,ef>>[ 6R+V!`j0fųSihN DVw.kgT4!ZtOSy%֪lGpHSAU8]Zc-8U2Ϥ0yD||~ (R 'i?elxעԜfǂU}2[kfxBѐK.5{SQ"5||]Ƃ q6$t߅d{vd$4U GˬK4ycdz>M=Rآ֣3et9I7hXR&f[uTA6RqVR,f? |IR'r(eϡ5Q^ɨkvD{ŷt6at% Mu;s7g|h.n4XTT;_fہnai\qYD6W)36z _/VE@ 8|^Zf=N$ aԡtgF{M:N] VI>Ozl:d]ܽ9-MMv&ACcmnZ1 :[A|wFAmjs CxH$HG'KQYȁU;$QYg>;sbx^u@-Y7ϴ#=z)*ay[\ N;: /KV[Nyx{רSgge2x۵r53RW =o\_%>lg£߾c3NkHs*R^yNj׈7ۯb[8mjrOZFg󦼽FX*UlT!O1m%H]GZݸ/VIOi}{@mQKWȒWG5{B% R l56MW]arllzq.8NS *~7tmD8\vgxSu$XCG{x8:}wmH*mU|X.nm d (&Qd$'_H#6b-6]dX, ";a8h2h/`UgVE8 6?=_૟_~|l]mO,t|iMךu}D_9U]769 .SLꂩKa4X=h4(,  lUwe[ZKYq1..^Ke oιvy=Zl4]/[8,7ZbE ~hE\ 5noh6j[ضt<t9d~&8"jy-zۡH /;f&}{yd]Zo&1 /7K6O׾S ㋠wdGRl $ybKPPjb,Uda~LRTѢ2h *҉h)e2+"S WBȻzQ;PԶtN9P|{$ei~0 K1:]= Y}p˜ :YJ/w)-E!f`0&L:Ȟ2X|䁁a(KA4I#=%q"OSvG#!\[Uټ~!S>=*Yy8a9! h5(IŁڡm R8EvB BH9A6hjrVǴSlqPKf}cD^P=ua8cf}IZ|K)b+ e  :f-RP&y%@E:Cinysh=/F`Uc\O@G" vhLR\Mh< Cbt!g(37#U]6Klë X 캛Ϣ%[g{Wod#-'47?Pmk՛%⻍ڭz9RlDwQd|Wo^rI2ҳ)r| uNR*!ME wfQ%YJ&PΘXJ;B൱O3'Ju+UnΪsv&x\[0aBWPCCfcN>Y8aD)磑:Ǥ-Pp%(ös)cԩ,TRFTS6䫼$ P@J,dߛQd~e7sߩ$MDa @6iQ٬W~[B^C?V$[ jhX}\dyE+,X-..)kwzEZŽya#xpk2QQkF}ԒZ!Vu?+5mBf|` Yv47)4o~c7eoX7}<_ozN_]ɿo;fmOj-,7Hnq 䅖HDiB8TPC`!IP|ݽ x m3,i%gd7:OӴ$ :[\D}#kC'tى-_]2"g Fz\'ι)ͮlW2gfo|Ne>SǶ<*KcF8ﰹo],* Ԩr1Iy9`}N\lHs *i֕$wd1ۺ-q! ]H5Sec[d奖J(G# 6/]< iz<6$YIcu@!5Q425SHтdMw{)UV:] ]']f":0$X78pbHFoD(%l6pߢ6VZ?pr$81kΒu1xMi>.uZATl)jsO<#1/7rXszHO {q̻n;#߫tN1,W WaRWZ=ؚUo_?t%؀}bxPOwPb.6oӲsGּQ)c>4$LV ||DN#=l#Wg({8NX] +z8P&PNE(̨Lҗ` J #bp VDوFX  " ZM#60h-ؐ: WZ r[ޥ[{H,wܯ}7/iktD[՞^8H"(œ /h"]KTb}2* ZFaf!9#gGہ|Fwi9=q8(ݽh [w/B?yu"V4iЙxϏ*L2k#[^~#Frqk+D2PBg'uBLޒM\$J9Er hʑ\tVd]_"BʎkjҲcSoo E_Sf7l-Cmll7^~BX!`ӗljQRtPu@A[g'0*#wh6=ryteѯ;LO! #[Gd谑TPwookM޾m~>6g]MhYQ>lvǒ&"ߗbj])[Zٵ>wXqڇ[}}Ӟ|*gL؀r{2SsGʧ?5g5ۻ'4iL3.2FS"Ť~##LFWe9hшkD#s+1A=6㽦MߏwƸ^_~6wV,M\p5g_ 'gE*NUU&T`q+&Z>qQ2!dM.)(^W|P7I/*x v|:@ev;׹"k.XMhC v7.8~2,X[zX_L`QmѤwdQ4N̤0 ų Ĩ󂄌yP/$g)@#Q Lk r: ry2- #6Pݮ!]Leq/:f9Z4w3)0(Qd`_V- OJ*x+3X)(3(?iamiZG8m6F3 `"^K,q|[`=LX0+g.-y^xFb#=_(-3T!@ iM>gD=0V?%{x7Ѵh#Y,n}Tq*dR:0ld/ (( Ȍ  ݡ,KQr=]^w_MA-/xV E,cjcmܹП E=bc:tR8g74[<$]>'~ +>HN݃>N׀?uj<,-[TtXXZ_O|DDY[V"VǏ+EeE2H,+<ܻ~IڞwJ՜lUaVZTwb)~{s6pn6o./ۛ8ΛYfE4lLaYbϷN@޽y_hLvWх]]7vj~GGw~T~=r;GFh手+|%x3s箞u{SXҹf=;O\Yv=g`wޏ8pu!۹h}Jʫ'EҎJifNexI?\vu}eSVIzt';uR H`!VY;&Im{}֐(E׷BDVT Ĭ?yaVj/8IH}q1ZgT-YcZiUL`4O1 v@X/4JV?:>| Ԗ; K l<-+2kɟVu/QknbDyH\Pekfm?. CrIʇ˼%)>pBI P$BR^S>9=݃͜sUJX5{J՗2Q%!&ӎPۜXv 䳛&O}y],w6xR; [G o`I y-&cג#櫄.P@0:Cr!ΐ\u'YgnL†vW܂0WSdZZ 1cxHTys#gO78oNU Ic٢gE(#DWS0H)~ѭPEA2!*DDV:=էhUZ>[unr hJUqR2|^*JlDYl#jfhɬ2^qv&sWYA/1$8/tDR,ymbĘKT@~$,?\(91KZTU"HkgXo$V!珗$} dU یN}B`v6,4|W  I$K/XEJK?B<ʈJ=tؠ0޻R,ND1 d/*H7L+He I'+Sܴ:j(II"lmH%&$?C=Sr׿O?}]1]LF~[MӍvT4>~Ǡ nj#+b}fc=gn'l}Y{G;4̈ˬ6vBs|7}oĔÛ6qÛHU[(X6 [_R"PEoαU>>4GLZ 7wEϽ.H#lxx>\<:-T5 9iԺ(3ѐ7TN5aL4j00Ϝ2L95+]P/UGGYLM9(mҎ' ?'kc+Xdk,cX[lG3b @WSuI{D@+@oy*{6ɋvCc;y{9<j;9WAY@xz@l>u$ڷ"ՆR4$is6<xk9ױM?ZVI^SMQ hMV9NPpƨ9b5l`5{}dfA/s|@] l._wQJ)P Y@QB򐼶I9.? D4nhY bCUFub0$ekSNJb4,0F+)TVm^&G'k!e%vN:|xAREhF#^r pGnpA든 `@п,P|eexz`h5?[1ƶš2Χ6I>jU<5WcwE1ZJfA Z &jԙW5!Wk1ZjM6Dml=-$(/,:`s&\h&hu 6? M+\|,rdŃ39]4~{s|UӚ'7.+_7r:n}<@?;w7Wcwc"Ӣ&LjDyJ.~9p7g % ӿ>7Y)+=NsEo3pm[nf-.q=4&YVN=6 {bi]{ˈ'WsM{ S<^'/:}t];'7޵6+Wy"Y lwױw=Dzgf7Ȼ(ɒlIlYk[ȮW7xTl;?Y?~m~̘dv%]0g&=0>Fr.b"엏gVVK9Wsmٌ֣qu -ߵH=g y;gc|3hS)/?Fj~l4AZT{.gC#n|<@ɠgqs&E8}\ClZ?=濹L̟z}_^~pꬲ{ۄ5&C>p,9^.jX1eon[dRVZPPbH]cb$.ƽdo됧"8=D-}Ëxyq^Gw,ٳ[#'Hf &*SHѹ"+t'J8l9 $D Cŕ kgM3ʷ7'z)n9N\|?i>qExtIksz% --wPK}H*QhԭXFa_u̲||d)mGZ5ܵ- Of^x|W}k3Yb7 l)";1أmѣЫm?A rN<ʓ9<S*cؑc{8hɱIY-"1;Ø p,BVj5j_aT\.%WÌ}껶=zIꥰ2W~( h7X%EbT 8fQ!t:%Пӵ!+_MW{pM0P=y7R@8z `~"\BV[{r}ͬ\ۿFӋ.ccR:=]ٺ@[6]Ԫl[!0S$BfdQӧ6(_ޜσbW @LUncw.j!&k+FFVFUˠiD2I6J(?Kl,ɩtq,$P_PƓ+|. sh(#/UD tPm|?i׮ zKM N;7kYCѓcfNzc:ջ "WP _@ U5:dвOu jf rtWGO# wE}`]jEq-i"<))%X\Jb&ĘRԘRHVNV.K*Kon=:N[t[tm5el7dQ{3l'[JHeI^4Pcր=@חΜ qŐ?F~v!"004RjbJhkj?Kuuw?=!6J "pcۑ?v|Mb 1/]EhlČRcj&"GJ1Ŕs!M~Ά/ɅؠV!wfZX3R`_*A>Rt̎"tHYfJy zThlՈ .:HuVV9MVUVW* 7;ڏS o} /?-Ⅸ}⋮JHRc%dg`pj͈(,LLt2dM}j *wyВڡ!(: JK0X̮Ќ3ܯkeQ3 C%6ILheF.Z5*A|*W}ViA9C+H"lPBt&\e5sLn f% =mV*#={蹹/oUg$ *)SS""" 2.j;vPW;UO֊ jdEґ‡[ZJכVA1f,|К^"m{cMEQ`՞m, h`LO%VrW8{t6ŴNasMKV5:]FqE{J~B (b䀡 ۙ\k7CQ/RQ]r`l<0gKҁRbh;/$Edu)H`SJ!ʁ)JkkHH5Sð ?JS(cRS~z9{)%TŒ PUQ;dd$lK!O9BO$i/Z Dk' =vSv#92=O_1F`*xeL#zFm YJz29*z ]TEht'|P K-8 X303ȌD8Xk U<[p%RKS].\!WD R pyg&~-_U>#;lUd::EEbBPB6H\F/MPK$8tB7%ϝjy}fؠ8e Qg)՚*RxB`q* [3κǁXP'c8@`ׂ)皳t-ɵC}lyqlEՔNVA`B^NE}ʦ0X -e+SMg"^"]܊(7WG(lck$f pTT*&M%6/:iOO1XrbO$oJS햊TDp[PɰJv Vv:䒭- s5amYL.eV5;BvA%aLhiiBouj-J1ɺ;FP@ƞb,30u^D`qlX ~@%v#Eؕs.O%$=%ʰأfl=Ss [^WbwRb-K q͙l|(L28ޒ1$!YŸ`̑bmʐYʶŀN*戠mfP#? 0HŎr y31w*v`Xڞfc:D(OGAXʮZoheܓ $Y{`,gS셔1sODli6!(D6}DUm15b6Fj4l,jqTalMQQ!_ ;F'Ơԙa6^ReT [pu?RCo&nCy3AD92krc<,ybKFp)fpiZacFs ui?(|s}9_~|!j[S6kŠ5. B.UHYI'<{oKo0D~g&JrV(!ܪm!D>H$SVtbYH,jidYV2Pv5gVf='WwM0j;ފ/y{-m@/p{h{ +nq^)yqӸ'[mqdc%_]mojmm&ـe &2HO"gHya?(!_+ww&JJ-<#<7Ϟ-' XyzDB:2'6Ŀ'Sz bLtʗ,!@ZƜr8Gc_/mÃ~p?S 9Z&{TMDZK,h RRE6ALZA,UY%BM)M0hР]RWB%9t`ٱ|3C̍6Iqfj ge)!`[M2J ]bqpR>OT=h["^*gVf{ q?/η\γb`&- i-4Y8SM"GXXHoajpCIwBmtTv0U#r9r!k#k1Xŀ'f̬wZݲPꬄW 0U-,eF2v흶kR1T ;̕2(H1dA9'+};r_wbH$/EyIc[v[x$_dK:Z闶e鈗bWdJv)4];rHWw[PjJ hCL)2$*vH2e=o~xth J\o[pRޓnB7եMݏM5Wɦ_&٤ə,8O=XO7&g! VEW' e0x^`=_&[n볆qK@zբZFM}p:opW,Ia=ѺT }8g4;\F/I W^zz4j[F<|hK1Z^'K%NvjvP=̢:TͳDZAj{,ٗ^] iG])?/& Z_XKdVjDa "]LF8a:\xnt2Z fhT 'M]e!~9еƚwf[X;_^f=Qww[dckqƲ6+ 2 [tD4 W3GBDpwd!99JzQfZZ@.Z JicȺ;e:7AAu)ox/?Re>ϐno kHB%L`5H C>c^8Op;NST 4Uy'*`P*{ .mz{@Ŏ.@)A)}$qZtrr&L'}$:Bk(w6MYj$PՆQL⥍TVU%?%dt@ б'p|9[g$O J^M6"=%#Y!9VI/bUCLr#9Y-Z')^Lg<TYҩU^A"XCҶ Z+#]JWɔJ:FR Bp>- juӉKnoI(AJg..^M H@Ή&}2 `b2,6E`ZT(d)!V:j`=tXX"'i258ME bJ2WQƜLm{*Pz+4BwTjiYM({b-y)w/wts @e(8wmRx ъAl> j>zOj+ץC٪u,(-xJBRre10(Hپfr;aO-B9){P [Z#Rė?tK8ٰ$ [тj˜=;gC\[*VGt'ɣ+X\![qfO'->(+U15 :pu6KǻI/4H ztPyh|FoE>s"@yB .4ެЀ1K:bЀd/I;U 6*teJSU\lʝŖRek%g-T-Y RThc+l{k *+Dؾd+'+a7'sM,g;@!@u$jLUb>v[HX)c r8S;ʄK^TM5ek5FhULuTaZ%F$B")-Tnh:5NR.? Ԉ tq;CgӘ,Mf['7שH`=VLɲZioIa%*xSUN±uTl<pT9X]mY a,Тyó 'avaw:ypz Ɋ{ VPtNXV/UX3:fPx"HA&Зh+F"(S"֓KNb\)J eH8;DԎ:s @dO \g\7y&RWg§Y" >$ @X"%*B+w5fHջ!HdX.ءCV=:Mꪊ쨦*X#]N@:F]j!Pr>!̶pBގgUGljVqψ%fgK $_D;X$!"vDoigDk4N@օRNZ(D񺝲(rDI;1g 'Uц$&,ݖ&J'+TEN@4VBn(I AJsjo͓U`oXE`4TerPm񤒒P{kՙb F:_YN_2~[>V''|6gR~O3:˫oGs2~:cQـLFgdr I31'f8zs:T(uew֠-AGzqyέZ$t9)Gaɱ\\G `Lr#7kd?;effO㴽pl^#7k45bnYv`q}׌Vtp\ߌ3ߍ ?}>QR\ގxk~L޷kGO/.J>kҵCG[ d7ӅΜ%:J?G{ `úW~͓78 7EC d}fTh0Zʞ.0*Djكv.e+S bcҵGȳ^t=( <_+в$;)go܇'\'0#:K)1ڑZLQz/,*(*0bփ~ t[w "`iDb vN{^!M R Ö[ Rv|QmNG{㝅]W&EquQY=AJȀZDt\g=Dlo*yD9&&dL*1vAWYӐM}b\ͲFi^w[fe/cr.ɠ- !+hTI9ب1[s6H0S>?S󲟾 7R]'e$nNȅkB+ †"Q+H7pmzpάoڣ!$^.Z-Pz+Ǔg'1bQI'MIdUwK"X`HEphDzt< !GVQ 2+i s@ʱd4`KhI%cKHUMQ1YYS*nS݅Xw- 'etp8.a|gFt /}A`W-lFDWA̙gꀿGc|v(um0jPl; +1$J%Fz@~v#p=N5̞PYt꣜]M#<.Z|r5= & RRX|+FmKuYoKEoKgHrvherjɹĠ .J@YI4x%)uk,Mv!ؒ)zǭ2 1!J`A%nj2(CuJjS9KT 4A5(5`OdOݣΞfTY6< '<&ғʣgPvj}ܒ溴\WYYh#Ny|o ŌXx޽Vz~|øR׳ S|)#rpEXL$*JBP 0 |M}XoֱoǧUԀr 3em >P2&Q&J R41۞2|{e3  FZG][Ց#89#W>R 'B'l1]Iv8'^)X3.©6Q x|Dm;hW^zOr}g^i\xI\_|>}c֬!'auDUjRSXJ.ChA;hKrh6C|^47ӳsҖi@rFhukіy-`nKeoŃm)yUAeLETVF68[,8AWk"Yrk#yd~A>EvP%נ+vP޵0GWk>36ߎ-ߒy~r^紁 |ƹ|ï\57H?`#5ŸέXEѧa7t"Xh!:ɞoIgz?)^YQQ˗dч{]MikbˊG<ˏtˎ vl~XY)OZ E _yy薍9ەV4k1SFxr1U5EVg //1ft֬Qc}hS&07y0<鯹MVFy&$C"[vҟgX'98Tp0Ocd4]Ͽf~I mVp*7V~GMţ'g$&b t.P˨$Y,RY[a٢1]sqWgJmiX#zĶAl[qkАс`D`g.k#&Մ> ٶc.?՟Vg,DaW㫣v[یM?Erx[3K@ʦm kJ0a\6pgu+dv^Z̮<ѷ}-yO&/$=$@\µ|Йr #YIt Nj D]뉅EhD0Nǚ6%t``d_.mam=װG]j|Jj}vzz1_iwF:ZPmc C.C;BڱD|Q\;-}kW oَb,~H_"3R{~+p#\xP)!1r2@ɃNs=B*cCtm ؽw]9VC|ӵa%$J]HN[ K|I'=`m{"RбϿ1ڵ1onwnv~3e:;C:檳Rk5]Lm_^:~l=VRx0XHf3$=eÛogf֌u+q񐋶E/۹Zg\Nj,jg]?;e[5,8r$.# h01M t% "d',ͯa 2 7̃D,e@,1j2\wcc%%`D"BCNV/HUgkmXP3k fW,Ѥj՝Un-#SAXB(IW$jPѣ)[v:<ɨӘAM@K&ll/r8y*!0Ͻc7$#䊃b6ؤT}ޫ50MW囸`F` ?$cVIHZ@f)6y(-zR:K/ )쮴2]Zำ5)[آ= y,V -r Iz ܄$cP: 7Z'#YQ PЯ> @Bgk~P+@=7iM+unjhvԓ!'BCgɺS ˓ NbQkYY+-9]Tr!sWYU{v#=yw5W\ s-{zD|ccN(p^\Biw![mof[mo^5"Gx\9bQE ˱()O(K{ צzW꩛^'kz$4" YE ̡2zJi6茊RM^{+WYp{Y!(Y @X@+Rw &DϳJ!`笲&'5ٍ='2o$;Sgs?Qnw2g6}z6SqIN0Ra]bSmZ}߀ft:}hfNpLlm€⾿ p}pVflql$OO:q%tȠQ ]-T=|. 8\ay nCmPF*^\ިuxJ)^ QqSnÖn =a(^i}:Kė?W9g4jEx 30@SsLY$mIuzzz'z4GoS%Nȃ5Rv6@(@R1LAY}ww羿 1Pv^}&eFgcsy?9AhdkPP&\ oN5X16b̽n%~%{Ǎ7{إš'7o|ԫd hTA Oc\j="ԑO$B prYPX JS+b$q(p`Ƃ&FgaH<|C,J&п! Fh/=zQ[} H$U1V;R%& *,J녏!jKQd((*J ͛sЅ޹1:{bUj+p\Rr>:n=W6E ȳɓ^`,@EjX!Vn)l:<UbNgʆ?̵]?x[n5wJIɹhsnTOdz@!2~VK˗; 3@~5v[WӚ+}㊨]p1?烳&o8<$oS;Փx}5>~^{R@=`V,MoN,n o{4㦺{hoi[߸`^wPsW7B %8-}?pr?='sO7$(CV{aA&q50~SEf5'z:E3,/ûV Mt]N4_Cp90EdIM#,8wk>/~47C z~Ǵ*j0$ ώICVО ˵bB]߹LfnkMMWZjl`ͫܬRFn7y@^fUVkAjʼnh"r$qSIb,҉=Ͱ7A?ݰʷ`6s Жh\PX䀴!rN`B&K&=t뜛3&^ʙZwfoa~jbjR>j4c)ؒe,و Vdq]Oz%Iٓ|4$zr;KH.ɀՠCG$tk D?ee,dCI-7plI$ ~]AtONM♘bNBZ+L$kXf*g-K*Z jVYK̢uM8\H^DhԘ4W.$6(2XNDK@aXR2:Ǟr@{}ܧz2u5Ī'DU2xGpY  sR pS62>2>ݡOZ _pcpm.$qM*L!Rs)h AMcTIhF8/  dʼPNAUu]4 )6Mk,l/ ǕRx'&lM⪷EĐ|`Otp^s{W56Z*txGgoZc~ vyݻe$@Gym\k? >.9. 0׾yŪF^"M.&5z-zO5{>^]`+e ^]?`7C!}\ٻ6r$W|C|+ȇ]nq 3n7bX*NN>c-RQK"ILIWLWd]{ԭ`4UԥxkEὡR+8~ۿmD #B;;_vr~X&bX("l*Ptr(GQ+KfRbm,x >Q ss]Bk%AŃNY“CrTXg] bݾQEHB i9'a_\aYw 6FD4w`ͩF@Z JDaA"cQ:4z^>/5_ZJ*x+3tXr)(-3( y=+%vcP Z&ghYm ;2W<[hxz3Ai~szEc&yD33Tf=.Qk2;?{eNO=vC=@Kc6G,m^I9v3t1L,C36P 2Q}qlƯλ 7W%vVkF Ym,p!Iv{u69I~'G3RBL$0Ic?-z_$DR;0$J,YȊ=1O^ +ԩ8F芍,-eITLhV1  rؼy\]Wzaӳ U=A, X*}rh4u@6L1Zh)aEY ғA60 F<{Z-Y+Ho#DAZ#(H]0F ID$ TRlV)I-RP[lIF|;ɢOsNltUeWlΦ(:sr QMb5gs6}Zbd{l{ePs^SS<꒒s|AZlktiJZ6A{OW`gl= qxm]Ax8 z əu]u$5¸"ՄRSHlXyk9wM7VI!D' 9 *`vS-<ɪfז9VPƨ9bH=#3+xl z#VS/`巍 )J4*јk^@C(}\1I|̳ gwȺφ#-2٨ )h/WbQSR$MFԝ8R8]m$R[2ZlI>)b2|^R=:AќF$o-eG@m7+E՜5hkfHf<>Z F/@#Ɛ)M.5ѫD+}WbMZ`9f-JO1}*E4SJ: &:w)0g 󉘿us1b ZjM6L0Yֺs&lFJ4zXo?a-4u!^x3ռw™g1 kA& m 6HeZZ6ey4Jچon|~`*2kjrHggVb,rdA2bݍUɢ)+U<#o,Tf: n>Qi?Y:`{7ا9gZzi_gˈgWdOgʛ眩~#yu|8Z9NgcVvH?~ӂ=eN~4mxV5sꟋ<ʼӵUwZu_oo`R/EXߵJ*Lh~_5?kf;3-jǖ˾9uzAV-^y}ƓYLժMNoۑmShSvQqEFm#egg:Zd%D ֩x\t)/A!]}GBm\lSoHz޼+lE[rƛ[r4? ZOVFfzqׂ{b8mj» 7rp(~xp1ݠNsŘ9O+S߲ x3s%mFIߛO妾6qDüq&裛O<&hdۏ(Gɼ7kzӇsfL޿uK{ob "iTM7QXݸyU@[7H~Lb=׽Rӥb0QϥN0NHQ#䡑 Iݫ}ސ q#H*Mh!PEE!,X+LsAuNO;`_Y-\0)2d0GpZtRdBׄN)e&f4&( !qË^i9iT ~@с4,E"QdUJ/)3C]ɤ ѾɌ:6E`bz: 񴿧]d|QBU%PzG)I% RA9,oM(@@6FGe̱wFX7%OO=vD=4xOP3s|BSfH.ǀH|qMcZIjjVFqtɶ [8Qlz=k?=eu5GSS\q1hMbƬKBP5ՍGβO`@䛰sd^㐾ъLd- =Ud*T)K*)OPF;8GnWKl,j$oDɚ#^[y~rUnf7ʹ!"0-0RjbJhkhm_Ώ,R<.gt+z1\(q6.aOwd}+]?=U쨫=Ra-U" hFxb71KQ=>W̷֣GQgq2Etk3՘g.}⋮tJQ?{@vU"mE)Cdӥ KTї[9rxxBrdDKcXed 2dh^D;~[Q; !UBlp2C Bat5 EEV;akDS "KQ}ODor3gl>dxbZVE‡SdT.ֈ>+H]Huž~^b66(j^]h]}3lJ2[SRx08 Z؝Yʺ*)*=mF2;^gK׳G;4YJD4!TfkAE!KʞХK$pVCct&Q uHZbv=(EJd5]>e]ϱG{\;5jogWg-f^B6=tұM XKJdKe^WuQ, nkYV-݆>j 12:L"tH 01)G zT (F%T,%RlfktȠe=L iUhP}Vi^GAJa>@q)ǢA =rڲ+GtX(QDƭZ?$>i1BҬBxr#?$7N% E Ur8cr *)F rXjX'Y=GvIp4a6쥽,ޣ%v<c~,ViRups"3Xes]trl'NT-śQjL'GJr1ŔRHOnLPhGNYPk@j E64KF'|]}v%{>[)/,r˰CQĿjjhퟧ=V~^(sJ1<U*YSr A[t*%IijU <_AV:4^aT8 C` 4@@*VQ9TIY򏄑 KO:r )25'hc} 3:)+끋ҠVa4i"-1ALbSL298 >tm_m%2Oyy*"GkId񪶜EM1#5@6dqɛт6͒-)**Yc:3MC@6i%U,ݬj`:01;޶rl2m-@ n/8MdGvW9DGVbB<ùE.RfpaU5lΦ^D(';/b;Q)#G.&d8ChĽ{Ā:`z,WñAl t6e6q0] Tg D4ًܦEz_9˳ (Es&lbu,(mZM6y ,L uVӇ3jNO!pIBBp@g碮-qѧcwh` @=9䛹d!,RA:VԵ;/Q!dHBB^g^OPegq.U(gy}@1Y-d:As.iʮ6M)đ]C*-ypb50& P(@aӹzcۖ=G!,S>zjJWQdU }k#XVM4C]ֺJP)fPÍ  %] r9a߱yo8Yxl A]Xbܠ*j 8 RR:wAeυ 9Y+XK$N c` ޾+au5;G5幦R]L%[bZш{d*)}s4eOsvruIO/ȿ-F>yz,bvԝ>qbr,^_ mbxM9C]hM59sI!y2&Z٨t%x*F*mIokĮ=qJDn\ݩ35PP"G d2EaU(g~UR N)-TXgX=KL=C[.[vlH&e.6˦zykδ 30k@1SX$yGZh)?;}T /n$oJ9Ogmy 2@ts=ݼvΩL+@*inl2Gr:+>x~"qbI#@AȞWk#yrdz} | .Vw!9|m>nYKU_xnWo|Alv Oӯ/)l-P ^}774P(B(|t6oyEb3!}NkF'#nYEQzRD(@ 52/y`wzQl圾?Y$opSH?s싇\r{2"6X}¦(ĦIoեdRJ6@V$%VGߍ#ͤpYew; N7` R( :̥%O1Y[:& $cahzUvɆzn@~vA}3 Xz;SDFBdsdZ'Z{wڃlF(E]!D z37VE4[߲F*p[Q[ac'8hבqmi y r |=EuJQVCzI?t*4Ji͠mjp:c eSŢi+ 9!^ {. '1iҪ>W޹6#`1oEl@09HN^dp3d{f~}-Y۲ew)ۭ/Ūb1KT Mq5r6)vWS\tաtkk@GT%J#M&4J) ~bfuPncV@9Pogȳ'1ªdtO#&5:8nͷhH NÁRQ(8Ğvб |hU9;^c)pCQbUB%sG[˹ Kf Q)GͥM촧f=qZnqfoBy M,AU ~49Wk^󀞿_e~Ǽ=;RH`(}|?a0g"mvBI6>TgSf`slYӳ"e^6r pۥ^򗕶7W2`ͻsi?n5%n)[H>Νy+O+fJӢPิf] /qO@mb!.H^c轢n9%>̫Io8'c;~ht=bXדu"VŖ e,vcPRE!$Kk!`t>{'%':H" %"E8)R˦|GV F(ghe[|ʚ[5cN'Ұd҂se1$J%$Md]#JtQ"Lvbr89P>jZ-yRwvO0@00,(N{*3R9S nNtT+cE$ȐdR5Z{JБ6;6JަvۛQ(VJ%v淄 n8(؅tPaFӖZ{DzOYq))ԡGvZ NHW8Z CNZxɗdИrv2dzHi )%[YS){;J67T6Ka+4|;fVL hb,8kZ-G|X-2"d2G2ӈRp:99d r DR:ԨPП>t]KHJ*]tׄZOY9e2QA̋XY(-:6B~Z'7\vPc#s{BD͎ų|)e YVSC&W77jv]E/KE^}ZYxb,Y[;*v];MUod|2 I^JSZA&^mRmBLV3A0 ]+Aܵy~^pJ|:L[UƠ{e6,5-zx>0JQ\YR^R^wKYk N\EWk dٖ:lQmGq utq`LrG7Зg$!Yȩ*:Ee39:J^2RBQ = e,[@]׀$`Ǭu]UԮY "9w_3>ȧ'l!t5u<6 A:3EP<4),,PeϬ;f5oi|a"T`j|JlMUZٓjmh}:CbX"jUEEgѪ=ϖY[΢;9g= ]YH1q!6o[}ӭ2~,wurZ(e$QOD"!hdJ6pkV"fUExd0NLDp}v;Zq2&X}l=a /$S yɋ I/\q]rZ+rX<ڔb5_ v\BNfYY*>'=:+0Rx`Af56 *+mLԼ qvl垨}fY_ylPgOt;8S}Ps8dXρ ;!KEϢF6uAb&oߒ[ If~=1Dz*AY AR6-2,x*R#(HMc6Xo5:mA PzPnZ,/usLfCg+f3وTRrң/Ȏ((^8<\d1\{E)7g#usq#Bz,OIU^{(1O c5fY3* nʆʮXFz6q`?_t`lC05LRa’Q9%M71+&L;Ժq`&BD"w/ƿceHDK@aXR%_s7ۜA;=?$9MɳqQH=A.q̓7J4"Vz+^?̟$PȊ,R$AraK-9h$$UWLIxNys~1d"wR.N|J86 1e%g ~|~&7^=ƯkЙE{'Tky/] T0.!SbRǴˉ̓f:Mqxwrtzo3 C{t3֥G=û?+_xͦhKɤ\@zL\j> kzH`cX.{tXC@R=2: fdEdу+"/L"Bz/sIɹBD[Q%l;oSE+lkzVHok1ž"N:mAIk*%[PaV~nJޙhF\]S!!%5d@"[٬x!JЧhʢ?:]A`q Å :T ,ZTVea)'be9Ί%\I?cfnD<_HO2s25[ǤIq=Lt|NjbNȼSNTا`Y +"`GƤz>[Z9[zl*?$̛ev`ɐ>XdBH*or4u>,evg~:˞rTJbF1w^l Xq~W:op+Zg![&qy_'>!v<L-NgN̒6ݤ7 B>_0ܰmmV )?*_ب_62;eq 1h~rտQgW\MZ)VaÐojC=_P1Lsh#8Ksz?&AJ_AJOԬ{(Hݜy4R Dx"y!r% B&w 8s"%ʣU2)0LQj "5*J1w  Z觉VZ7=Y{{'{ jѹ=}1lqboRcqU'M׵:E2";gB('u20$*PDT^(E%>MU;6} >nWRE''=>[mQ~B>%uu;֜@[R F)Av#JeC[/=t ;8õ%ƫG=sPfy$U̯]b2WO:RdZzd^z6!q4i_ƣx1A#-syU ")':gW$41= xa{=.?{h;&[$5z$_5$-|'2 *Z*Q{Gx.+ɢU!sb|b${Mj#AZ TCլ$9^|/y/gwM<˱ezܟJ3Ω ?1950] ŢbQN~%@pfa "D*POa2s)oKu꯷+R椫'[{:YE`NB%lJхVqSqS:};7GG:Qf.㟴j(x|q/9bb!n~Qm1+OwCR5 *E/#[OI^r\koK9űx?s9~Wﺜ=|i$["]>zN=Ѵprы?T EMb٧!2X7("#(:rVԪqsz${@0aNuZvyeMs4Ru4 UR?O#y1YlhǽfN>HjljF*5/uMu(/*xD1*hGT6$UF^})W;\HcD6VR]59l BF{ѡEe/ֳAbiK kXәOc%HYDY@Hf#mui9`)X Q{obT?IPklmz ޳r^jn! 9Ȁ)XZ93r-q7a.dze T/l2H/ڏdRJ6H6>UjlBƞǡlw &8| J!yDԌ1VJ cʺ DZ /w#i3HC*]ύI.f~mC +X{+SRDFB~0?ڛ=2U{ RŬ=7}`E͐NDbnHE4SZWHujwt{觽du/84 ZsgZTSDaxRr9uJQgWj]<~Th93٦u1#}Io>M]ۊ$*dhCETؗd2XMvp9cҠY W%6n9>c-ؓ2Z+J;c7atڹ8` C%9dӞfd$t !Yj Ffșa:6|!mxD Z/Fʳ%ʃ^ዏɺa]<թrTsl-REhl vH;Z5@GvzZDߚ)j !Ps䌎SSZcB, 0m4M-e!b՞֊WlWDߌg7}@! Z(~Z ZLTR'gOTkvQ/iW@2}OIaZ#RwL>WKXIeĔc#SztzvZm!88Dw{`T *s) (i#ٛ {UUhB/^-`~reL%X]L9뵏eGL˴/ 0#II`ax3]9S2w{fh R~PbޱTE y 5A3#{vԙ]B! 6]OȦ:k2iCFJUL-2[5wR;ؗ;活:cGG2\OJ=Ig2'e(2WcT<XHwG$Qϕ U1g5lc.} Cv?p~㒩d3i/Z ȟI4C "adXa6;dӍHNCNZ.ߜK "364:Dp mUyNUZS7WD8ed+ug )֑}.m-3- +ZxB^&LeoB$xs/#.&'P)F˪Msچ1dE}-#O ll2Őd1b@ l9ؒtlY30Y&vD`{L0,jbK!dFY5=,t~T%&%!1;vznI(\H 0nR=UN~ߨN\nK\!mGџd&"@k8ZPg ~qʒTYRJh|F Êt Nk?#`99$t%F<3PV\+ΖGVv#Fٍj#^SrZIY+H{\Dťbf%MC̖gBmZTCƘ3 Q(X1l75;RQl7t;QnGؒ [u={p-JD.* +b(Txo"dy[#KT~|#'ICe<xurQͱa `}@KK57]8(7.BdBJ&qRd;#zX 8O`c3ݐ!>H)~)cwlO6QP|p:Z#6QIj\eP,J. ɤhלi\#s|1O`y*Tg*jzuV/\W\S:$B-H5dKƠ<ȿbG\mW.${<_*3>;3e$[N\ [l\"Y#zK*zJEE5(#ݺD%QDSm)sRwÐ)[.WW˹$83 =jh0ؒ dn:r 3\vVV$٦Xjl&oi&6PnkE,,V)&b 0X{NFBq B?V)Ѐfƹf@Fn9() ZL.h'ʶ8!f`4+s;MUHȠ <&k_tٖ[υKNhDwѸlnB12߹0}rCL:n5 5B  OL`\A+<ˆfYcPr1j\!P$m#sTA&9¾nTs2KzR JstXDp:jtoG`{Zיh5,RO~AP g=봫'gS]8iSӓ㳮XT:m'WΨۃ^H >ϴy>'g&&{sZo?z𞯤e.Ύ?P4 ˷y]Wp<&xcyȟ/gEۃ;D]]^6nݑON\(dzN(-,9=N/R?0_4Y+i!;u.5=\>C-~s߰sx9v?_!*wxޑxmP}h4D/{SO^O/E|xͧQ->]Zתs0+йمLfaAw4̼ᬹJf}+!ϸTePl!?"L/}+E&//ZOgucNȒӷۅ=ZM/k:{}H*("_ A{z"_}m>sW">.7=_e.:ZX$>~2sXvV$Fn!|—.M";$kY1޵$enZWuw9XCpc|š|Ȥ&)EYR%F3_UXB?IJ'+\~H e9jx mO&87mGAdK8ed Ԃ8A$U`-"t(1mv~G3.O;p s+I#hr &\6`t%EE rfa!RAy!0bh娗B. a˳ Iґt#3Gϼʞ`E1~Hrv];F.7CAc0*]0K@QC )N]d:PpZ{u@O<u?>Vj/:T]>w|O©pYu̴ CS :)[񹲅告[!ؗkuL!`H&UC)h3zvi:I D̨ hzidHܑ4CJBe^z(T"ZeNL10)WW_.8/+R<W6UE'3I~؂NMQF/:?^*;7g5omccɿ-k٘Npv{矷Ykry',YK7⛟ϛ7{S͛7|2ub۔Nu/J-ʞqKb&)%&x3ޓO~Jlqn cWΎO\MT$*ǃYn&+\NWyn/c:yek U&U C2t@Nփf0720yδ/^gsJ<*xTԒ82W&`3.9mSpG·ѷn`@#8))2LLw.tYMPEd ]Qq1@y@Q&2{ 5%eJ%) g: 9)9Xk+K"8sxv']pCK@}W_5ZfXk}{@vinZ(S'&TrClmC!vZDY鮇>w0H0 d&zt즄nO0IIl85m:qgh.[AۂZMf ^N..t\~1ՍgYmFRyFh$xs 4;s=1('(Ʒ @K=)tM@wE$8ַ)>\x4s St!ӕ csmܥȪp::iAk9$϶b%5K) eD[B 1kG½dh{WA-_G i)W[x x˪6(ZIf]76TJM98@Dž+l)dJ8*d˄rh3Ы"wԷ T@,Y=8ڤ4CIGb])k̉kp7MkU;.0:2AE<ܴךXGgN _Xu"!Ee9֛bjcok|!^aEh>P ȡe : 4 ֦-F m]Ʃ40y_KG"di}b:ƳiV0r)1nMr.AUӤp/^^YӪz"n.?UsuP3EbZLPJjVQ51*corD}gNLcl#6à|HGʀ<JKלV7HBGL1HH k(+A3Ē}q[{mZcLApaW3|gs[b~E\I&4s&̴Jٔ ԍ&Z?9|bm@pGH'DI"lm+}JDl,x½ZzvY &7J%oboJVi'CFxLUBKϜ2K IRܾ!DSD!&fo00k?CvX#Ө@d,42砸h1cjٰCC5{ph$$@__2D`Vykb^ʚ|{zq k[*(2e2tN2pV 1,&7zZL8ZֲZ UH؆MDag "Ɍm ݖo!Ȅ=z/IZ(,ڗ>w@ b9IPj9:gDA`-}W5)1A0̑EkgɶTi#m˥}+v n)\)q׼(( K,(4nOP onpBa.IIQiW7c6Q Vo ݮi "$l 'TDf0:A̢%{ov^erbg=lͅD -Uf7v8nиX:p &"Kb` s N؀ѕAFAz74ml"d?^fU^3=^!z0|qmgP@wE˄3/ 3~ ]o|P|h$-h"R)/.jO][cbVi m4[|8!$ RGcM[ 5&]ָ=9\H\̎Wi -r@ +p. <߱@i/CVqpc,> 9g9O0F^se{&Z[MfQ'+%lHȘEA&p I8e/UD{{[kV4# ѻ碊 ȭI"L.ѦBɼV|`P ɘ mJ[D: GUM=]51>4VW}x&+1ѡ7)&`Vt5$DWIQs^$P֢!I%IX$5 [\z1'O7r왱z{o3֌+  ū>+9L0\d(e&kJ鉟 D8/.k!p@ .0{-u/2H2!%̢CPUg>Nd~zzw*}EcZB>)ҕ KOd9QɁH9-:2Ykfꏙ^asj%p^(: qq$:.brơN!8e^z/r? tHacXo:gi]=id*\W+qU/uuA+"OW~Bl>?_zM|2n=G[ -|Oߟ"jȲrt%VeF-i^,GzSdl_*tq{Goycޛ{b5NU|_ή_dS][o\9r+Bvd0BiA&̔'/J&6Tb$ֻ\" Sj_(luIt,'G$[E}͍aq0݄=$hNX]\ɯW:+5dl)LYZ蔔Mڝ.))*ap75Ddli5eCVR1bb+@ $ Vn^mK kϓz}{]ܮl%A.MWKnחzXS!~.3ɤeؔI @Q`YײnE O]%O`5{q%<Բϐ&Q˰p&_L(}W R&b#I&J/ro;x)u=0ok̯;  ٻkKV3韦a #.jJdcۄ{2`sfXۂ||alg=165mx3fWj@L n>5d$kjų,f^aW1j.[.Ɂ1áˍNFdLl\)mr̈́-1ORlcH VyL|D蚾թGl|7K쪫3dz_jq6hr) W袘HK"Djßw7 s۠3FB52[6TR\hDD۹7ݡ̥IF>^-zY*RPVt2b'1O(ئLL9˘WMNO{גcvף;R&!"@ȥddDj^憩g(~9na\]H >e@.jSdA2{%?/p240Sp;esŹvZ}"{aOKU_[E+\NI_Dz%~?볣.?1N;U()H]ې {x*hcֺ#;y9E|lԺ=|S smvr*>vϞ޻]:1 ybYB53ѓ'ݟnyhIwvQGun4?9 |Y1UO!9d#[8__xwٜ}oOxYTcOObBgO#W{dNg5#xDGXu8}Xک@c)xPWԽzt,-bi|ޘpSu 1' %'蕑0Ք.~q=mgށ(r>YcWoüQ1"xP<0XknoΑIB59fa6 LɃԚlSD1-J{\Ege,av$3ZPmH=Z%@"Zւ%ܣ &ŚM FA/n`NB4-X)(^BIM[Q;-kh'R-j9բܪGi!MLlB FImk-7MIÖ!%͔DHpBJ;hKIuB17sS>I_I\`\D0'/̸:.btT(̑Z3%>A2 J*ɔ9-D6^T+a4Vj5V_->Z X+M\اPBOSXUIupt%:~)1D4=1iXLRK+,e}Z2!V$荌TT  u$Citr&l fBJvJAeuPl*/= dvP9&: dB$j.CJFͻ|TEd^Pۓa-/gqwU#݅L* r> sM%M&opΰ@2⛷KQ*nN~w^ۖ[hS4?>T$7&_KပGG/81:a#2\Bpoyvף9b'4SuY2Xbc+)UXU$$^,G:s%}Î?yI='9QV7!_?}SmS&pfȉ0v&JJ<>Y64G_X0_|D62vG"T8퟾xp<䎆 +6v؄i2)yE3Qp]8Z枥d-3ICY wO\ Qz5M 8lV 'lNjТޙ;AtT![!қXJETrHj-6,1Ԭ0 [ΆpwS<, - uX)0Θkm*[k¡_sk[*@|\o` )a!={]kPf!Ãor6hʱSwVS؇vhATWdm;hNCP$-ETyjXA}Zi9@7{jvfbvVNjrbcXP_^iZbopPKT$L@8G-<2Na@8+=z57_++ޛ*ɦw{.JV-ӔBIrRrFuo'NU#PTVK Ɯ{=#;[^ηp F eUع\BRAQmجQLkчXu5*Z {w"]p_(3I)FOuos c"/7Cy# x(, @q"sHrAEI2 ΧAUp9PB\t->UwiM!*/Öm/!dzP%YèArՙxp|&kjIUYlgUȎV$y y11:43}y~9ATb Gm'2ٛzpv~ }L~Ϝ.mDg쀍 kc&bnuVR7zvDl>lWgRmݧFޥRarz"Xւf)X{ jcoM^Խ64;'ptuJ;Ru ラ2kg;Th?I]5t>_7hCzp?O[uy/4x~[C~Yۺ隃3sIall>vd)2?ll'5WV :_6t{FGĿ2qZe&Kꇑ;,еhzv#37[[ouDŽex? c3e jp5B 仵l}dX~eyTyJ)2;G߽N(}_߼aQ1Sv`Ū04UNBT,A30zA~lYV=,^ox { ;of69^gJVR:+p2@#V+( 56𿷌a$F>k^' VHG0 )!7uu;@)"MTHG` @e!)'A)1,"60]xK. 1GgM1FX u'X)$bUޞb;''$7HrA#%-A"Dex64nɰ%"ށ9q5 )% -bS|Hb!?y@&*3..3oVl*OȠMto=O"J|KcDM)*(RjSʖ\v2օ\Q:EsÉ]70kJ0iePF鮀c-<4dzo+NslӴHZ},0H$LlZvmk3u8-`2+4qC JL̊ 'ǵ餬v֞nN!LuXӣ'8guɆu8bY)9 wZ{~_mu}\\ ../mhAƙk~vP//O=M%֗Ccݘk< } M~ˋW#9r,->])MVKطo@Kg X6 O,e.Zzףw϶+KH>FIX'vqm&xm]Lj}[59- F4C9e% GFI~(B]~3Nxї%l[KN/ǽc?Qe\]YV}Ke`5??iȪ⌝!'E䲧媋E{'uBkr bla}d-m%E.B{ץE'HXe̬l`k8Xp_{_CEΈZϟOЖw#B[۠u]*٬\ DvhR6GN;H|\qU +t )`RF">ڥdj&w]mZ5[X.th]Ew`z[,q2Cv].Y6U.,?Ctض'MFIޛ !eL5VCAQȏeR՛jM-K( }jC\fV3 *n@}QUQ{U_F\z 4]G^Ǚ+gQZC"+0uNJNTes(ǘ'ijOl֡QS`Pq"8(}ʘn.muH B%# AZ "1qz9r{FiԊ'ëX $P |tZlu]iTyɄx}uҧlt#]`p/BYMN,ۄ$ jV(] F0?bv0I,(P5'&cO˹mL=igYhlK9n<&-E희19kFS Zh־VQϚ/[U熔|39o:}v5coHClx?Eh,#.f)G*- ֞zí;-tRJBX'TZ*DVJL`ȷZyqGo4oxMx 4Bׁ 6:ݻ5)rॲ=[;jwl`]@a]p:͆KvlpƼ>:\3¡rE]!L(-1o* wJ>lA+{(9e 6(*zkFb@}g89:lML8>Κ帛>mU7wML3:O=$8 Ϧ2cH 8 W'U!-̶9ARe#jt*RHѹ"PA3g 7#Z)y1o,cL͓- rzvUV;!eU1i&$HKPY”yrN6DQ"i+:j<1QX +j zV!%zǘ "7:8;:"мhV='k'/l#Bk>q> @f-kLC&͐"3 j˹WQh3*/Ju3h!cΖ.^;?1Z>)\Ϋ!0tm~լEuaEx۝ƭөBRB&^6HgM0ř;`l~m agpĂ-ptuJ]i8Ufێ +6 J6K2EV8vȲP#Հ̐`P3Jk?HE.6[9]o*J!Q]KJVˡxi8b}~ݥrEҌ1t%׫2m B;`0WAWF%bv1Yӈv0z'U+;*e|t$ ̰˹x=/]\A[ʺk[oN|Lh(bv"FYD9\N=( \i M+mfo3\\yA_fSe y4./0ҺлI8 b fuy6=:.뮬|uzr ,g kG>X$.O;Ѹꍁ:9:sn{(LZCHM qݷEjV[ 0/1,u]n,ǔxEh,(vق/(VSEYݧތ],~zw;HL8GF X*I)2r·U*JE۞3ՖV*=P#qe(4%uVb==!6e5A lM`]lӄb e㤝Iy*S^F-sԕ38VCtq*#)aw$$;b YcD9/'lC\ mt]"76zy+wÿ^GkmH͵f ~5V$H%v{$q%Ygb6_uNs'Og>[뗌|O}5;u%΂e_]1pnx=7Q}Owr:j )ڇwjOپ+߻) W(~UܽLdozR~_-~>9=3=dl\k@Fftf#tCF.;DW8|z}wޙ V8h"D>ާ\OVoPr7XF sS3;lJ-K{{[mzXbs6z?ȶ?#l}Actvhrmw̻2SU_f⠼5/7|VQE6=RyX6.VQ9d ]KSմL!V)\c* J7 nӃyǷmHicșmƒoGe:9kuZD0a]: >^ M[k#tKǷLף6޾ӕ+e΄ҵ5cӹS^dGǘ;g[VJ{>u?܉ERQ9f{jzTQ\-^,6)TpP'3SK1i=*[(c*O"A!=z}D;;+HG1RȺ!*O+Fj.!t-`KfD74>ie!&+)8nxS!EtauѺn6BӔ45FKD*Ġ3zUWo3d;S1Z-^Him^@q0Z;N`1#k/lJ!uu`Jy T>Y#{2Zk6!sF O`޷Hֆ`ǨcF28PBE'ZR|K-F| KIGݥFSX0YtI;C(VBِDj=C2(?BU =\ϐȣx: lZ &X='4b@-8Rڨ>Mbbo)M) FSЪ*ȎخƇ\fJLQ3 /A ɒlzT>(( WHK)h%=5rH`]vEdfS!|ȉV Odb,"_4@jRjٜj!4860j |ׁob79B=C(]V*Y9 ުAk^+%@8,4;nIgjAsxS3h`tDT DAfZgAcKhhRʌZ{% `м:i(d E._[f5 s`x}~l9KtN]ErJUtiU ZN9xP@m(\67~M٬0X@"j T UIiȯfgre ŤYJC=ze#d41)@;Uj(\&o;yG-!rGºvݎn2ktx  %{XIăRACMB HFV#KVL1~`4)}펕W39К X#Q\AZBqQ~{FVk@3fR ;클s4ldmK:zT_-0EK%xݡG +4Mtg*~Rh `AIa/H8 { CiHx E5= Ebb:Փ:jN)d+FrF`MF SW >0d ޑA{B{((+ V_! Z,t5iā-pB#,`{AUY*IV D5 )~kڌ̕[֓AYWpA7cp )s ovR/:RJ`x. ai`ɠ%k*#t#PyFC[I6PSPn>XSTZ @D{$gm#[h硡0oxrlY%X5\GE| WKl@mtZ|ǫlR"!B a<,a2a/'1,UE2T%u):<NAT惍4(!\8  VJ0aK p.0@~UcjJ?}LգF%t,Ʃ G0ޘhAa Wi7qHR2 %psV\˙5<Yvyq|y;h\ : @ښ[U$0S( Vۡ$+>512͖JX=: {4Q d7n ~tpdƥvX$NL)/&S!3 &1%]#p3tPUqr9OODm A2B`aW<в. Zꡗ@/`KmTڼ,{hh^ ;u@dfM4xߑh:.cl..Z>!'Mvt=EtIrThB;2SeL>a yz௿ƭ+;#p7g Z.QNjOOy= )=ۮzI>Lv=NjjX?)vop!66Mllbc&66Mllbc&66Mllbc&66Mllbc&66Mllbc&66MllbcOӱŘH=[佝xg;t?\K|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,a}ASN ?8\o т~>,%$NwÊk'>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xća1MOɇ.'B6|>,D~>,K|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,a3|XX}K憾km>|]{oˍaQϯVĘMk~qRaqy0\8o/1yh|u\Me5q7k~\"׾S8/V>^7Xs{/8.@^n͔߱OVގ9^86 t1L?F_6q>n?z]\>ī~>v8 oZ=a8a.x|_?z>]eQ_z>y@>xZd˝?ڼۋh;}G/>e9ދSi&bf i(Umn}zq96K|VO͞wN3*ތGΨmgY#4yPP۾u.8? h_u}y4~ D?}S;pyNô3􋓺|~:e/ uZNŞ5秀I5O/ƃRjJEص'x5~m7ݺ3~ŽڪcLz8" 2M =n^i,}Y 5)/~LKn{#M]@hʴ)aj WϜly;!P_M'iweb/09# X򋼂%as83wdMvMV6MI2+"3a>}:-)ʧ&u"CH@*4<>4R}S_|悜47,|q9fS^3P+keMQ\-:Z?];Z]t,۝J)32nRqWЪ}!<0"3te$$Vu͍kvf^b磒xALW>A69HE`ײ4Z4ƞ\H*hODem⊊yS4#39²%YF!bi:uRǘq&0_|My8'­pGh=mowk@]4S yjYLX{yYʛm2~۳8e%>X~Y`S|a"db(s!Hքgk^+ 8׽Vv[ݧLVJτSi8sp![N&L^r&y0(0Z`s/b9uj']yXy~b M쳎 pEz.ACv":Y 8H5Y}y>:+L`!N:urS/((pMJ) "'=u E[bQA]Z"XB8 3=*{VRɣB=lB4R[=ł:gFCެ ZP;L0 ÂqF`A(eT {X֥ԢB|C֢'}/F.Wt1":_\ZߟnN.3[MIA5Թ~>YT?CwFbŏf~oDj:Bw'޳5?痟:+,=x49gp'v7w5fyO)-d%Vv̖^vG]7E-zekeIcӪ9Nݮt{-d5S']^^Yޟ?/>e޿yژ䇹i*|_wL2] fyA\ X8ژ-c٪hx#ƘP+?͂\kQM{7x[+ !6SJu5e̒h{k#,sa#2_(-@X ׬.0fZRQԲ}F6&?^|qɶV$?rz[Ɇ7wxc4H_g!;u)#/[& {;hf/B: ,[IE#v=M*EfQ!Dހ1)snƜ-:՘a-Dp:igy%z.2}]Y#:Xk}ڎboiK)v90 ZdyӜٗ,_f)S 2 SljbG]D) d+%'Gɨ3'{Yh p }M9~]:-Ŀow2m2{M y҂?-O 8ОbO.i]:ݹt !ZHY15 oE=w,ƒud+ >k3/0M_kad$e>]<vA5~ 3}1GF[T7mF{N(~S7[f238r~q9>+{yty']KȚ~'7jb}89Y6=Nwki\:0mJ7s u[,?|\|5rm6?OVZ=ݥ䶰{3Q3PQ%In+zrۄD1@N,9Ņ۠؎29Ѣ@4z;8{xil2V LG06UU+X4{?Ժs#6e vaSpς璁M8j]9/r >rF~ھ |!׆ JBdFU*i˼t-@,6=hgDx{5 x.,<Pk֮͵ T2%kn*ǔJJ,Um:um{ڢ@C5\Y0R*Uߣ֭r |َ{Lf~^[Q[K:)gUň Xu-6/MeEՆ2l8H;eL*!RTݥ(/EO12eXCɠvJuٓ΀b9/PĦR$JD+J!Hpp{} }^4+p@ Oy- ϕ V2fd>dL&b-L1\8]j׷/3dUqFA$WEhgd:gf9jhx@{LE<.ףh6 >2dH K8+%S4\9K8p}aT҂17oo,Ig!mrM̒;d`bJ hj/.[Ic@.nI݀>Hs0gD%2 sE$ @} f8]]7/8ᣠ'AJ't*8XJ(6$dX 6Fp^IOz{,K`#HastaQ7dK39*hE;<\u1c :w"ZZzw[KZXeY@D59T\0ηIq\ZW)tgg ׬9+ESF z+Y\p_mOaj<')7q۝'ǹ}vdq%WlmH#*襉( .~ O6D@AAHA, N?fW|gwV²eg\\,|%d67SV~tو(l)p91@i`]ʘdgoʼH7͟g~'P󇢅LΓgsUTʶ: Řs, )*d%P/y;j DGܲ>Oۆ#6ST @EZt}LGħw^6׉þR= ( `L9! Uqg@Yfa!}1ICODAk ^(rbZ'̙CC,t.1BȚ8%F1JG LrY$ےS 7z[P l9;ӘKz jhhdSo˺FuMiivv{>5BfTJBEFP-Ino߄NYzsz];벮Iʗwࠏe7>LV$:mj0Vf W~7,u%`/U#* Y>ǏAF=aIg= K`iN f֍ױj hhԴAƇV7] i#曘gk5.頓3Jy±AGJax@ dxHc650s{ǐZQ r %iz%0JԅgIeѲ_%k({{ӫaS3ð ˅Cɜ+/ 1M m)030j2ϼ RK8Zx*J)Uv:;F\q6EmV{ϓ0Y\5XPT3c؞F?ЏwA9ȱ}0^ wxW5K6:޶LޔCL 1Z0A*J=uuOxJB9U SDc ܊C,\GENXÈ:K.0i)}(:ʲ>ѵ]m`` ۞ݯmO_2y{mNhMfliK\86XJβ?9xI/n*` D:#X+*XQEmB"cY7?m{dtd HǁHXiW_v<]4M|bx&F BhQ,i.u R 9~:>򁄼Û:St;c*{+:5P'bX(ͤzɡVR-EK輪5Z1(D4޵$O .ea w@[$_1H_SJ{(Y,r=U]WUƧֺé]=}IQ~!;@sۥwީaZ֐&{J sP!` X ).,bDyf&SZEH\DZeRlgmJV)d=@PzktK^}SGd zoH*B p(I!@9Pޖ!i2mZ?|k )v)r\jqoR%FJs%̕M9;2f3~;aF;i)/S,Ngk:m X_?颗 pҒVH x ^D>rΆd9>vZ&8<D;U<꜍ f hL*D6Omv^`1Yy*yA՚30ꌴ/Oh fǕP|-#=ҫ1IF/A4)QrK\s7h@Jf[_jFʟмxVbE3 (לeս_d< Z}FL Ze!UVZ^Yr*ʙz0nJ!U!sކxC5"3 e_eV|ig%`r9,:@R r$UONXQ˘ {jԙZ({j!4UbI)$Qh㹏! 2ha3Il+#c 2 M=yGکo by!v018_[瓫Ỳ`p.XQLJ o1P) -x bC$X:U4/8+}""P! tSώo?^y|BRC)-urRu_큁X5EAZ"6+^Uh)|R m'CF*0bQX=rYlAdS&訙R{^Ƹ1=W%RyIj !Y#?Ay!"nMD:ka?ΪXH뫧r2ALKb.ZՂ#^^oV򛣺 NMwEW܅%}_,Szo믿,|9_|;ߍ7nu@Y}&U˟-VC|w@$̿# qHsru4~:~Mٴ/hg,JR|Jn>,oMp 2 (9$PhkJK0%lE!8o6tӗl³L  ׁ'"%%)*Pз5J w*,v2(l0'biTCT)CZ|?16-8N?*xH!A ;8 |t6\g'Y\ dv B);IޫWf>J#Y;"\q-M`SM ݁Gz11ΦQ?]coDId?%|16VCA:5%K,BK?=#n\Oe0-?%k3Ub qE7]x)ߦ`r=rft%֣{ow鬲n:jX_{pHMv}K}`/FH\@}۫GaRH*kYy5ڎmoڇNN&Gϖܭ" k0^Pa=x)SRVibD 0cG'<3I(JiC# zC^oyEIG\, :&09\ C,7E[RqRCȳ1yCˎ/r7tla  v'܉Yy\80g%y%*C`bvZBA.! Be"5#_\DRP+ÓZ`]IXLYz`N{3%lqBs(VrDMYya]=G{ |=}oX0_0V0oFd$/e6qM ˉ8'U fI=/}GAok΃ooڣ;epx]jBjW9O&z`L4?ķ>4r)BRkN!+p0ЊۺtLFs?Nam-u!ROZi%t9q:Z$]8lWDM2n < -ӓRxz~9_ ߸vni ̊Oag*!DRtSӄ8H'm瓭&VBSkiËvѨ{:F<-0d1KmI50 'O]D]KhiڦWr hY0ܙa%4+  gi9Q-㴬i/o1oWѲw[ rF\{[_Ա1[Vzէʕ-hݦ\^5οhїj^=p/񸷣N⭆:MV6K9½uZCA@9 eM*l9?y0 xa ]|-)8yh;MbYFF.&oڮT' A'>&]b mM݆ W \y[J'mT+Rats0Zi\PI_Oؗޚ(PyPYV؂^ Eo 2)YR{i4Qa_nPmZ]MuEyb4!m4mPe]sH!H DM̊bnO5[IxxM쏵tf!ƒ@ڽPX$:oLfE5T"iu|>i֪}'`(w-7.AՃ*AqetPL8"SZ r (@x^pb.yʋ sW٨ܤ6=KM2kB֖ fȈ*TaS>!|MۻoYYMS9h秧m];l؞Q).uJeZT4Bţ c$Dv rqZ$Ibn >:rbd- <ʪw-|oݶ=AcE,Z=A6u,&'4S&ϟx21(H){̀eB 1cIhbx˃] TQ`F Z֔L;k5oRSN[m}F qCbF2kUK:2I_SMϞp>[dI|:yQ3׻{4Do5 s󹉊%m D&JeE0N5ɫ›Pbq~l&+̷#pYƘM"Mҿ?."gĝ9Lprzbm*=,ޢ{mn<$eQ#T\^ cJ )!҆ vƇ@-ՠPjeTM6|v)S5:gE:儆j]` 52 $I&%%^X$J"[<!1]m{<VGڜ UgA4 J!Ɣ$Xi$ +Tr,rdƹ/{WW'( :ƼwmDA y_EIīJ&Qt s(XX"d BegXmȤϜ 10ejPH+9YJFNٻFW i20%`*kGv,L&ǒdUd7Q/R>qD%]I?}싲u$ ]!u\RĤeʐ㉲/RY^_YvRh!A;FPb^2Bͺrrxa}dϽVV.>FT)oj*0Mq%Ѕu)A+Ц\Y<]Yz5%`\ZbTF|Tշ]݃(ԍy^?u_~~׃n %z!H vk.*0u0etn]ҳuk,jՈztPrTr 1TGaoC-Q"j^u m9O֧+mGα8/ L-%2>N*e|e&PA(+(X eBhH !,g SAWP-)eT`9+Z6O*$4b2GoT],ܺy8u :ZY$0G siۋܩZ,z)N kqQD =Q=ÅR[^7C./ ,vK,,viNRkoPp(b-}K;deGOE~ͣe/Ci؉jmȑЯuo@rm)2 ɢŪYQmESjii'|p9B֚KP;%\Lj; 468fи@ TcKTeީFnA;jwiH=Zjb2΢e,:טE0M7s+)ƁIKy4EMYIlԪj,ۡ,|q'.fVנ1X b(e!bgsM]' Ye]dOX  8HcunB=3p-q 탉y2+H_/׎]ܳPQ.nWPRGvj}J-ц|r8=E fbʑ`֍VZ2Q{(m2 j}MbV): 1 3U)VM^tDAn+)Vbu,hWRr&jT8ɚ{5=em4 $uK$58YནT&xt9jG1אILtGDt r MrRyӅ3ںN >XW*[ZDxcEUe[]bfV1SoۢvP[RV_Pb<^A (A:v/G. ur}C} $+Y81E-.ڭՉwd>k9Y/NMvJU,9VlЅmB+PR6 tֆ`kT15|F"gD8huܠX|.8_ )+gUMߙW.)Zr@j%bR W+*U*V CRrRl+*0ը$g/?2o.g몌y3zks5ҍ?u*+ExJpeBkb! E`KŐ)Nې\mWL?/5|3+dZ?֜ٱ2!ȠJjOLw1ăEG6r'&IJruWkmKm^֮Lmsa%&ܜ9;7nn#eE/m*]?lVo?cy 98LɌO_[*dB"tU Rwpݑ\c>ݨMn8^~:ow~:~%#tLBKǣrq~:Ough &O /YZvp|ABn5]RT=>zyÎ#%oyo/'~EO3u&w;p3TYڋ!aAw|up^rK1wUr_EMOXVGuO<4˯/wX߼XJ7/hs 2upx0+ q`j|eQ+!,4|h|%JnodM-I2&աǛkysgSz;&d&Sp j,6m\+&`ys89YRdrz8ݎ!K৙V6IH~n>Y0͓[kiLcާ Y-, *Y4d٩|Եdm,ycMbR@fm.EߵYY}_h {} oF8:} )8tZ'.Q0dm"v6 "`c) aQ MkzM+2kgkْPlfCƸD+rTXڂN?onW.^~s__<%]<2ogYn鴯f֫#>󋃯wߋ=[Cm@p=~z8;뷇'l~},W%\oU}TXXLYΜD@B58 ^gK2S#m7?V%oulmb8 XTllc;D%hl NJ+̺^Y5Z-/ü/)Mv?}cԛsגt։`sE|`l9E"yۄ֓e&A6i)|}0B^=VL~ Ms?7~ZO8k=-fx⹙yx y$4cTjӏ_"X _(ܠ菉S9볭"]lw>|3DT ]PjHƻ'y>3C[|Qbɳ*Ƥ*ǼO1'1k3ܬfQr;$]7#Xۿu|( ܎hG'w#S_7uU U| i1کBbFC=a!i%$k'r)s;[bZ4$Ff#b0j+ dMnv4)@NDN,D4txkY|&s%O,0EWM GCܸ77^w@YzaljM7Em$Wn_E7؇/E 7Y,ޱ-G|ml˶FemI0MK&هUSKg[ b2A\|҅iS8XЇVp~T"Ή1(ջTb=SZe¨,qTZQf/94iT :H>qMJr0TY~BN%c{r[Άtۖ,{D1keLс":]d8Sѱu2Fy[Q;jX0VV%']\@ur%fV{.钉Nz 9Xw6W]8Lh]{v;|ZxcMF97_م Ůe)KjwSo{&wd:2q=?` prjU-"4yK1ʟV ?3@gzh7j<9z^Ό}qlq((3'@}\qCh쳩[*UCU1ޗ` @3j.)X[Btj$ (x0E!袩VYflS{ڋ+]Nhws5~nwKmwݻk=y{5]9b)ݜuUğn% 7sL@l68mU6|oM2as;}oZ;(wYlO?-.Ǧib's/YdleƹuN/ꮿ?sݔ/ |$"VͺI'2~?o.HJFiDzTpN&gTX?t%ǗƸ؜:?_g~4~\7-zH}HF [-w:H!{ 6i<:gm*7L&d$`Qһ%&3 `~eE:kVocLa Y !f &ҼM[dM+WrǟW[q^QZ{ kҮ Ujs-c9g'EHaVFvc11K8+(g6\o.IoA%/ZmY/H UB[bW6)d3<IG\u3%]}Φ؄|JDXy ;-)3 )&yẁ'CpWQJ2qYa=G%@UB::K b4$Y6:{i%EG8w%r װP\}<;o逾փ/RN+W:|-4 G P/NP23,>垅T㘻VoaiC9TE@V6LPizA}AjM,GX 0M1@T|H=D=^5LH:!8h)}>0AbХ/:ށ=F!U>N>:>ᴿ d,dd!e1\HFCF hRk28_EIiϳχP uˈM3>'˩&.@P5s3>\Q56.1gƠ9h'iFA s ӈKS(56'hoh;N6lU +D9t^7E cyi{-"1)$(ݤ0(UZCY!(BzsKg8: ό*ksPrɕ/ hT9dB1Tj>R[ ˆATԧ-Æf9§MFMM.X tD6!9utd<AF,S1$]l1ECKN_VT&Vk< 3Y; ЏAU4ZުեlfƳ XIf3 :AQ% 1"ъ glpt.3a$b0JaaJuԈA0Ydrxtгm{X%`ͦLQPBtUI]p8DaJap`MGS=c)J&vePġx,AU4@0$rf+-yF@j%U?ڰİ˹ЯtG鬜\+k|V!?~P_vy8?q|)r0;\`NW ϣ~~5+U:9Y'zv#хzS[Htxnfdތҹs&-j)~8Ϋ|eNiչ{1VNK3#c[*^Ӿy@"{T &)MM 6&LV9;-LJ uU)^mnU嗇zg3-yaZCӏ0Yᇈ&PB߳gLG+>2x$cٚlXf,[֬3##܂/4U2PEB57Qa=Bg`*aϩHU&PEu*ܽWuډ9(BP*bJE61`j؉Vkl9%RD-\ɚ{LAV枸˩-5Hï\!\z^kO6hYLvѤ+$tZ[LM w TVY* A;r7e*TMde30kѢ@ӶTj2 QaSNAW4Y>֒1{M/*VX!{0JC#il9r% G)f0Wŷ99mm!ƈ>)+A h3ItqK Rhm(\Y'}mjoD­ S*Dq ۩ 0 EATW`&CkK2Ulf*5*1C-RI,YÏ0}]YA<|c jUZ9\rdOݒ QHqg¡d(,뮬Ձ̏Iڻ^u}WUXBt A698m{exsv3MFbt[(P>D8Yް>XtaP[mJ/lHF޼%#[>,1+z颜hZ,@KR}KIwqp#M~4yIjt#EyHo ɣ{KuK kn *rH1ycYGXBP[eA59#ވ|xK|8 /P]dmJ7gYU]%>[2NSP#a~#lS_jx@`Q?Uu֜Ū2(-時 6XZ<湉ok/WΥF%ec=|Ѝ } Drg>q0hkW.1>֖P=s[o 3}`5 A ra7 \1%D^qΙj%QM-*fd5oÅ10l_rC9Xy_·@IkoSؤ0IcBGjUe#'SL!l0;5\Q:j 5eJ4U[) <.ޖ ^{h\˯yn19$LۚROX b8h]ш]yXzS{HжDLqxHyL> K?ZT^KcWCV*%/7tVjrΎ.U]{oIr*Jկ`w,|OKkIʲ}wO)(P4`9]]]GWuf*Br "c<@Jep^iJ ʓ(*X0ƧZ(!Hӌ&mYpvÅ9f>?6yu;*Wњr MRn.sV<%9y&JF݌;-AGMnc9oFbr3jcM&'}V)+U=:6yoTfaչib|>QiI]T={DYe痢߈1+4Qj)P$oh#(KŤJ0 /.ꏃTt;ѫq6ݻ|׼ZׄI#qƾ)Rt6Ɗ>͛ܪƜqƐf&ɗa"_iGH3/Әb _d5: h?̟ 8V|_Bͫm}Y~{G)JVrHL"(fKe!hl`Z>e]ܡ2)^^gGLt7c<^ۻ0tn߯EÙ,~ "P" T`3ڋG2,Z2϶6$Ht4F,o2fD>p܉y" WHmo~P'e|rnuP`"TFEp[f|Q tst"$^ ]Qw)%o$e,>2 o&IF03ͭe>G סX#@6Ytw7>sG6}ΒbT\ X-6M'  !IwQ;&߳ZKV,N0m@*mA==N e݁gKCX-H0}e0afq\!mu $s[(b\T\p"E9'P[w.COև|d&2>?u~NW#}^R{ >2حKުgzl%;b O呆gO]L11vlR ]DeA sJf}5б"|yu#; T!E!:j/ыb[jHH#1(襉Nrϵ0:\Y=iDhc#^^?G1$$8u#i=y;L0%#D?+{gDhv|7 KoLk1x?V mqvpӜ{d&SG?`V-hE>($VtYf9y,//$KnG Io{`eBjYº;ϼʞibpJ-̰66g2"ۍ*/]CR`q):L[=_{m.ܶ\]<@Rz&$)`C89&.0D׎7yIu5҆%r=Kag%w*5_da`ٚbqz$h3b!㧩mðVH3~vˬ-Y7h8ӑkjPXW[/eK,y[Pg|"NY{5T"R%/>C7>~V,εt al:v,MIApY 6?}j7s⎍ ָ"Rбn7H7CKqK Kv>Ö8{9tFV3A0 qtڸL\-\Sd(axüIZ*> j[\ĥΐH#JɐĬ**:fQXug:yńKՄV_ }d%nANڑ'UdNo^soBYv~YNOɆN1o 6=0Z-`y[M &GZ$Ӊ c^V'7ke9F`P˨hBA#PJ6Zmmn` *h=Kh>i!NHVX̧Y5Zsm8C?Sʸ^犠iI*]1ZH#KYDJHM3%RN4p,v_{[Ǿ0ְm CLwݾ m{jK; ?5ǗlDbFoL҈u2tƘ ZɭS%"h1ٚd\ FNK#8U+D~)F@4gm0R:%sj1W*[QȁgG=7ojM|ٳVi ټܼʅ+4^ف^A[\nW~=9 }cVZU>KF>M-UkupO;@;dc%3t`fH(bvNI7g/4@9 `ɕ\i(EĐxTX5Jp猏蹷s$A, WLOk}>;=\<([L++jOu--5؄\F=[& fCyuqI$' DʃQ9sxN 2:1 E(#/.[oTtydhJT%huT"e-ANb8UmA>;F M59=8 SC=tMi<q#Ԇr h sHe"%z&[r_Ʒ{udGJRAt+śEI yLf2Re r4RLX $-FÃ26uH@!&'5 N HIgljImSͪ׶ 6 lEmllt\ޏ϶;PAeiu\Ì9c#+V\(Gh3whq.E/q2E*}N0wa` W` . B^sk O$B :D9 4&Z]JUqʁh#3x,eؠy? ( >4K}(-&ߐGt/6b6**bTbIUǐEWE#}6C!p=yy./TXݶcݝDTd)q빲)FL!{4F>jn??}2{UޔY(!MnsUs@LD|ud':z$사:v3O[V?sS;cIq̮Lq\~r4Woγ8wa %2q, =+ԍ;UP]oIrWF/ٻK8-.8,abL\ O%QDI15V:"B .L{3x0F/8L`YtNӑ$FY rD{nL@F% A @ 0I#i BK )SBTbFXtcB.t+2ڭ{V#0 J ,[,% cA6iJ%(UQEngxsݬK,I ,I36dlvٔ$p:% J/CO/;P1kWZ Z4N5DLG 1D ƍDSCss]\&0*f6hP 8Z.|LR]}Ԟξ>Ko`;q*.۴΃)WSz:z|M|A6;6d?VRhmHxƣl:/jﴦJ)c)ʟ ~kf^Q 5:-fx. }*j7Jj(xD24>USp-_k$!Jf]N4w&G@nyÒXˋGKy,ӱyV}[+9aj+ܑgմ#'}-_q_s9TcYȶjAVK4i.c`8FOx32:8t1G9I ͵FKJ=7 L۫ Uu7Ae)j4O' {eҌ`|Q˿.Nf tN7CsOtadk7"7X:EA|Ѹ|)C 2c?YQNQOS2YȢ.sdF鸷*oï4 azl<~eWw?J$]to䇊ŘUBxG#RG΃PQGˬG:d&"c KxS7g6Nۓ"XvAdd2:nek2nQC{)NfG-x;Xjّ}ȧهT)>dg#xVd, JF(1"-6:HH Y2a:{"rM&-hS-cVhÝqQ+oZ6"zrdeՓvWgSFxTnplșBI#73*+B]&p/,x+dlf*+Dϓf1Lk QRFj `4>}l 2n0j] ~_"e;,DX΀x'_p6=jS;n#?ՋY7mLܫ3=Wメl_-BC^B(3 TX2$ $2Ȝrf]b8ڂdw{6Xi5BO[a d~s3!'B-BQZO,IJ_7ٔo 8w'=z8L\9=gƚ!-?K$š OƱ: r-=u@ݽ"%uHuW<&Իo ه7:(lrQ c҉ 3y[E@p:Akܢܬ6j}ud. gMfXURH;W/>l&O</.(JVI]e nإEQ\Aoe)'{ݯB`o󖈼Rf";ry1B:P"/XD^]1\\ &*W( I]̃:x#`O4\<tZZC` WQ#: ">j"$9r8JCRHھ 3⤴&tB́玕 1$вyyF~W-k%^ƯXB>Y˻鴪kOFÏiat>TIeV߿_|*q\6}W] Yo/ޫm۷Ogc]GAHi\5|,. ".MGp3`Ż1uXcqZU{[RVTnU}aB;H MXֺ9xxUpZd@ yN9FFdF̪\%۸4cT&yNژOU3+;2AKzWO?wߋ2DvԛL T&g7A+T%EEv{ŋN<6>dmzɽD\G"7dV'S6'+妮:)l0„ޭԙEVd&e)MR6e*~9AZ"-׭_e~>- wN!w Xva% 萓.`9( L7F8"x/@QJGzun>'C_3+yo4E.7oQKPGCHQwFH͟!mrՄyftnL>>F4<北3c*\8oACCq&@=q3Ƙ̹&VD򠑺$3> {Ӱ6Þf3 IX 8~ _L(F60-֨hb%H0YFHuY[E]znۊe%K+ 䗈`4[=)*B8<oC7WOuF֠$#9q!"xcS^.i% Pȍ(dS~wH&ԁ#TU`d-=]iRIӣ~ $aiuіK5-l4霊 HCv^#kK8!9TVqAmv!H a%wDPfe,ʢʂjνv/ubc Xu@9|{Ju@>\LsJ~ Bl-peQ(efGQx!TNy3r& }G!R~-1̒pcF'}s4)q~,*ڛH2F=FK}\2l9)_ *a>q=Ah)&ܘNF XIFGsUl7<)ia$yPlGLcej:7+ MgA =J1tВ5KAPN 49RozҧCTbd3 AQBk܃:B G.(g%gOtyz]\lǰQBBDoU+G&kxc㚣(WɠcD}`, #H0nFk̂--`4%[kgTg]G.`RQlx<d1*njL4GB2v#h\K>/ylbf/ԨDF)"2@+`TBCK$yB|n:@&I(0źmn6ŽQ ^gք+KV;u+fSFJJdqtj>>Kc{,+S2'vp8]<^y(=Qx H[ሿ>NmnS O;G~>:9qx2[Ȫ7u*+pO6%|~=>ߕʼA4TKZr<8FV\-O*_S^ ]OR?_z']/9}`җ`)<Kuor/>LJSbާ:ֽ:w)-NYakn­A0s:z$ʝ / vSqzWa}~ks2 W~3';tp ` R^R;uzX6w)R{}ϾSwβ=ry m=kڤP0u/-T"qL-k(Maٶ{}r̓^:k<<.>/^KuCùkA= dC1V{1t-A5We.^ ;/mmPl[+B_j1W ReT"9w`5lù=zgľ!>q{tN'EH n]5s`0e_!r5)trHl캗ƙwkw]mO'ksL(8(rԐŵ`5NAD֫92o2\Ϟ9Y#F**BA,A- ({<$6;J$9_ =Pr65tn1,6B'~y1I$>}NBd+Ds34uQE*G] ۔"=+EfgU6L<Mdt)$ʥ>1 sEX5ơ$J Q-2rEÁ:C_sQDgV{1Vrbg{"GiԉU>FK6Ir=@@HA `Ce~E;fjGY;E5]r]kmqpldS2$TO?6[H旿C?yєЁ`b0 ˠI#4K';WKM^B$<Taޝ\UΔ4Vl}`2\G.)Z3ڍ✘X]XQ.0df@,LjPl:tF/y+Y,@z,5z6NJUvsЂA9o~iN2p;_g̶CkKADѕD}N .4ҼK'ZmO۱dN~gy p\YpcnE֊scNm%m\E>gY<$Ιs ^L_*! xS -8e5yfP P풤PAMcy+R9Z)wr#+fA5+xW-eK%+'/ݕ 7qzn|Ckvכm8w<$=#o9 iUc.1Ƹb n{GwSh5Z7ISlJ f{=JA+:lߓDYBװmp, ]WkBE=<0g/^T۠fCJjDjosZUD/xs&Tuy;8|IޅFYTW"έ# vϦI"ZmiVp ,6fO!&ŕ8fj3'kAxVadV}Ȳcɩ`h$5@$NrE-\VGklù]+H#s8hlP͈jF;_E=kB( i9JF]Hrc;Y5d=G@J'og(pyI¡R4ШaAu/f1QsPCu̬A,RP| [Db)Ƥ\Rs3"S΅'󒃢eJQd5Y߄g߳! 7{aVzW Z4wRlùCG7O!GЈ9EIɃ4f5DwhÃ5:ۨPն6UZ{@Γu*ǦhCPuEC6!F3撎jFY3}UO%#bHGw mGs:U?Z AlF>HY_ 8{"Ov@c-Z]l)P >bj$=$G5ZnX)\=E`B9T´AG4d[:,?jf80pnj3,:Uqtq[r\Mu}ؙ9Q4_>18I#V\4 V )$lV*^|kMF.zYͦLYYdV@= P.]gZ˴.Tg]% rrlfLr3B9hbj{S=;l$Wz窖S2viUuԄ_ S{*V9@e /d^[ᙎ |=Zsg|s;OG7 k)77(jJ517Z|9U}F߄y@KfѽJ6>ۦ,^4ߎn` ;Y)V=>ʊ[>rZ(sT{ͭd~P>Gk41b~||{o k'\^ r 8hôBV~>kyiO3J3.K3X5j!L%cɥ(),!>I zlɷ}k|7Bםߔ-檑HE?ha9E􈵥 բ \Xzod>MJ}jTѫtLV$)L75#9 d]$ID4ڇH/mgG`( Tu4`ǸmVm8}qduVᢣ "q,MħY=} }yx/ 9Fֻ }w{~ 4ǫ 60m)ʚZLdZ~LeYȎ z9G|6&WMO1q}9c-Y،&S&ͩA{Iv"6 f:c6m.㐼Lf__@Qi=}+ouG}{|nlӫC9ۻ^oǯ5y0{wHL*[ ܦRiz}O~'߯nV)<_p[ `?XB06BpU Z⪅$ڱ1X};=UlmwKh+'aIk@x@کG;?I(J!(\SL9ZC]RvR ت]&h$ݞ50~r՞ek(`U9OO0XE )K'uJ0a鞞WlLk>ܥjbGU}..4L-Uۣ@Fk=2Q/ KS:XVF<#헁UiEyߣ}W׭}:XE obM R+4_W/w P|G5\y-.[:Ct`t"0{WVQ%{Tr\2$Cu2Ҁ>*YHڒm&芟H*֍LRr]n̨3Fvagsu"R[p%VaҬK9~s( : |JF|y\lDXbD1bdxdڡ@+K(_Ty1^\_m嘌EuO Q7vڤHM̹ vZn]ކتJ:"O T|F'|FU,C 3*##H0v`vӌOyVSIJˑra\<kEFR W+ѺxLBҏ#Zz =8aQJ.)U𚭓 EJYb>n*[Tʖ96o1+b.[I/OoH;n-meT5utɵcxA[P|*1m{RێIn`_&OfIXe3Nj0_Y)Y j>YG1u6c4=ٹM02yY#-k}(5H/O'kl(Rpd<㫎?'ዷ]͈22+/B? ݍ49F="@DpPI'.ɐ-gs"[ҰʔZ&Riьw:B|lF'✠/F>9:-wJ є(AՄ/$A{0W//#"9Dx.8aS1Q*%:k.SX1T}#QQdv -w, sBĐZ{%;&Ul6LRl&4j#BeC6fK \tږP[LŰhd79TĶzUKJWr5Z|{cCiުfs؇` 'ѱ+/odxǁ7 AI:,RPdFstFcL%i튕A^bi(emFN^Pi({(hwQ>llm)GZpӐ$HUR-)l@QEC(ZRr./TLAJo&;a/=Ci))ZʆO?.ժӃm]aÏuz98`I@@R`PEll>ckꈪ#p<@fG}PGSQB͕*%h| &e%R9H}q;vM>o'yOY{KvpĔ^~y @"G9rJKYlNQKlFܳh@~բՋ-ZX! .Զh@𗤌Az ҙDJR!YLUʍKrELɬX7(:a`Q;t6R*`p+j2}wn/ ҁ-"xuپkdG/'/k8G8{k!oxP{&BMÿ́Vd`3zQWB cM6|GФBj2! 2'dAVlԒdC$_bW|$H7&EkжNlD7{B77;!p:<i󭳫Tu%~;ɢ`o $O :h99X)d ! $#t&ؔB)H!68 ѓBԎe[=4="np XisYplJ%2>$) %(1V55dJ՚H :\\2 AN-[TTUUY0p*#*MDQJKNh v|%M6s~ (v=Gpۇ՛7vgBL|ROVduTv trr79ݤ'7<9+?\I=iwק7!*$_ϼp؋H1"`crkg캋tl!ILtĐ~ wmm%G~Jݷcd$@0ξ0-z$&)y_Ë(Y(C$_drԗˌ(Y |͠z ك q{Gt&" S%аKViM# +1[؆XLWF /^;߬ =ez%8ߌx)g/kK'rVҌuö]hXhfcX[@ ,x2Kh^yh,K6o6FPM> .[4.5OJw)O[VoaRזdy}=;!IFGDfGv;/ڋ&pkTX %*ھrn;`:]giD!K.W4FZ:X D'lJ'Q׻<[<¶뜆m)ޛIpĐ)lЩ T:{4kyASҏMНq徺w_{lA\Q30]˥CAU-0e;/o`'d0E >Ǖ,"N^` 0œ t6H,?tXGf}, FZ&S*;/2x&ppD\2(THlr4xsQW *.J PQ9\XΆt,})լK^I bu '#DcX3좛PƃHR,3RQsW ut:ȔAPK"{JY1:MViY+uMiHFJr[V'Yݖ/#((N*i>8l|oIIK6%{)٥}$6Ez *Z!8`V'S}@z oS:6酔!% :ؠu29FUBI&z$EÈ;iuIvyʑx,TvmeE䕖 #*f.A2et!Y23!+[`u>{)>( =Pq49[T l'F`09%"KBDP"/yD{;66-5vDc\f u[uiA^Mi5-w2@8=pO꩘M FgR6z2h'{=͵ 焖?~'K߰ŧ68lƅq-{WXQbu_}=X2v@I62ZյTŲe`_r~uQZu98pm`ڿX+17/m>{z"Oj %+9&F1[bNQτ7/O_"x.z{< , UJB&0]"J/#bd:X]#pv6u?*;%3BI!30UCy-%-`|d;оVD&&y,Ces^9R/}&mZt{)w LZ˨B $=(YJ(*#tk(4Js½z˔zvs$vc؛! Ç-gk7B7b3m]qn>̻2E185sW'2a'\b(Q#O$yʎtwqv~w(;ǔOA zGBbBr->BkJ;W}}RMOu@M٢Y 0Mⅹ%%CҒMϫ@N Q˹I8NBC9d͵ WY`1y1]B>DP;qc:'"G"~#z>Cqb?0Jqz1y)J:P?"4(Gዟ0./~άK58l>cz?=6Rh\+XH67d<ƒj`p2ߖhqrݸ漾%S:uVϋq9(w~0t`)V&t4[5Q,BiHM.Eyn1o4j~9! d:S3][D͐+(F)*1Bo=@ـ5{2؊ѷCmsW&o.p ֙mк#$+e`Bz d.q"W2\֘O_]]e}g}j@j'zHn[*:ape3=st3o[҂3(zVcQgjRK`ݖN < `l߾5mꈞ *V=` Y R FJ; u֠t&&3k_e񜩜"KPznAٮ&} z%TvpmP`]2|CJ%Y%'V@$B=jyk GªZ0YP3$S8jY|߆*..׳_2&+|43^TVZ+,wk8B1.Q\&v,C)0 l̾M8Z[o B"JypD+>k6]_\)1-{@R:`A6__ίo,|:K!_GS*Sjʈ=cFL̓MN[],;請Vq5W]Mv5{'?ƌ3hjOq4<}gm u4.-O>wKɵR]m>9mΣ$˩?msڤN.[=޳soVuz`.hٚb}U ݗqchEg,dY6'ǍϺvA~ٵޡ`l6?gCH49 /^K`}o;;nkY.Ժ\-pkY뼸TױJrӃUO_gl[^gRa D QދQ0M|* XW>D-:l't )A$z$z feEd@[ ȅ"e2Jxz{Rvж4kjZ=oN k 8[RJڥ\tA&Wz\{X\ПE$lA0v1 EݲF俧(ɒdپl͕?X"yuX2IRkwx[,o(5z*'*c͂]_8F)Ѱt@YtE&)D! T%uOY9binbi6XSj$+RE>If]R Ҡ ȬL*Gi,M1Z**+M $Fkotk, ,Y0^+Rb[j+N>w*3W#,9*k)QVJd8g{$9Lw[wDǹ_[B~w/WZU/2>JTrs_5qWDN{n  /Y[`ʞ1\v!ꊦd6>d !y,` J%L=N#TH`0zCj"dօ.AG˒ sP .:gGMŬ!!׻ZyKz:Av'^ˮؼ}5m_az (/;w9#|s&HӥcH9f//lqYrmq==5:x65xA9}P3팏Pjk'Ë{cz'zzYk>J?~.h(:H [6sX=E^A_dZ̿ 3gȒҗqKv6f;zC**#*3[*[+ٖV(c*f`Q6`IIEbd<ب5f\rîQN;ųB RLuΡ>RGjIgGцsf|-[1οr_HЮy49gVa/'w_s"bz &Ƣf:F{2""luȔaxCu}#Z*oKe8 ,XJ[ZdLYuY{i{wx;Rlwc=s=))r&`vUls'3m*0j(NG̦:X@ @Qu`DIULEյl*r kOh%P{"Vtr,MwO̗ zfw`*~uU%ދ6$b˩efa=>@9^qZ#j>UAR25EKAvJr1dYȥ>ɱsبafkLO5C5ҹΩƘ{n^u\pl` `zuk!t.JDlQKH.yWBP*G+$a^-Qx b2΂YXt1dZ4$[KΡvL1䩕%*Mm46j[ S=Q/};q1*UB퀕Zt*IuaH**̻VJ}nB{>c/_MZH\IUrE)!:gA\.NbIZM.Ɣ#,<5Zi@h/bzEB2LKzR6vQH뜒 ¦D*dER ;2UA]҄VhSjnbP`芊,&K4ڕjT9Ӌv:Vku.Jt"0=IItkXW~ι8;<\<(kL++BZh\׍b: !틠h;AzC}@ ƣYʗn<]Zhxʨo`>1YGYvAV(9bJϦ.Xk˔ EXSZ|UbR6vfOTx_@[S h!Vy:Y( A;vt8[V]]L/*o_t5 2pd)+t-`dYBMZ )`L`bNTbVg@3_AEQjQqO_HmHW՘e5ۭjDliJҚDU>..XI9&RTHl=XRݤS2OVM|gHR,JlV iGHD[Cޢsh"EW~q_j&RTX|=Sp< G*8ay_GSoԇ⻙58lzſݹ"]ِW`{.[ט*Թ~A ӟN/^ _(;oo9f:gMZlOV7؃p[B'oBMX RLf>>/79/<K!A^֭+-z'mAn ݃[}3ĩlUHI N% &8MTΦ[+x1Sz&5S}{AfBvX'i-Y* lP>.Ѳ2i)*( e)xDz\' ]^^̪~W8U/u 6:"5zp;϶B+Fuհ!a3* Pv [ W@n71aˡZyf%O-T]>JzHE Q4ܒp/-F6"ȞHƩt:.>w3 #.'ge,%9PHبEhI׾ܡ@Zz IzTј s;[ҚuP6UgcIbMM6#MS^?Kr$'H*.%7}$cU|:_H9i8k-YZ@UDWM 5},C7ޑ<;1e3E螞olnw|g`wuyvXL2~LҰ/4lR,B?Ҭ0] wc;?_K~7U:VD*?}{@|]Xn(6T; ]bK8 8&gs9Gl*'7f1 cnv*-'_~::=^|ru0\^LU/_|왏[p5Lg+l(yht$᫩YI!.*spl *^׮´+ms絙}t5i2Oc3ҏ3oGnϣ@\NH0TYdڹF OEUHE.1ly\ʺ*)* `>54zr^ *" R[UAJ6YP.cѼLTǧq>׹sCn~dzj¦V|SWؿq6ΐ:wuH΋uu_V _ٝm}W8bLt) /6wQ]1ws}hȟq3# I2Em!,bd!0%ɢAi ~߱hA Jh[AM+b>}1jsA07=%9]v }z0KU: "VEMOsl;jy[*DnY- wn/YM^Ӯ.Xkd^"A69Fe: L#) mfcHQcL`@JA^%U8ITmi"`fDҲG-fG]dGiH{@&Sz%(m?N$㐤l{4vb[b\uv^k뜘÷cb¢En,6{\%qONr2]r74Q_,uL:fڿTCv$oQ|:nc$č#f^I)֨^&Xp@j3=J|HӚQh|v*GENso2O-교6zt#g7eZt>J -hv,7n#NGe >M@I]19 INUQ4I(GGvoFN%gLd)dxq䲽ae52D4~<@]Ћ̸mzALJ?n?*>zzi1N?@2[E   *ۤRP2t^a[jJ-,M?ԅჭzu, JSBXlNc+XCB9VB&y<ɕyaMA^\D]`caFMM :3E(mDY tҩ 񥠵$Ӑ" hZ#OO`GH v4dl%^+b7ݦ:Puݦh]~YFm_~f *xeL-mOYJz29*z ]TEѧ|zK-8GfOb0 `Ylʏޔ6ϒ&J]hbZ%B[)B6 H4M4µص_7|/![ s gE.F `TDdjJP>gA*ҰYBtdm[9O *L_*)Sd[7#v"u:0Xs$K?ёhsF4eJG xe$Ŭ&spu%;L#>@ePnݿMnL' D_X'"MZB'tͿ_fpzv/|.l_'l2;)|Ӳd{{ޯMZ7V4YKokv~?b/%Οsŷ/}v[-s{{z#Ksfm?j'w=%h߳>;j³-OtMb8'7&{2@K8O4O͝]#g9K&Ozy+'-zN.ϸ4ƭO٩8>/'ܟ2n)?~Àޫ0śn,w>H'sh(BcۀAI<'?.>5dCu&@&+N; >!uKX]fmTsM:XN>~\4*^Ew*6J>yM~|7 ~8JOZPp;\P XMѺxgcS5eЮV̤0Jy&:Y yP+sdT5sPk -P%ChUj"%wz:ul|3Tg%iK2S5Iw}Z ڍbJr#1P*/_Kyim[.@%_]eY> !%lmXȞL$2HOXƲ>gCpnD.hX\ Lʥ*s*E`̠׉3;*r F8!8^sÜ[Lؗ%e$~'[Ń6F7Axv пR{R藻OOkIIYF9*"LH/IDPOV90{L& 6F5,^Dѳ5| KZK+Ktt=P(ל:17 dO0 P# < lGhىu}H ,rQ؏:}܇e Cdd!# y;& +@~gժfaz~Rn!bbr1[q=;>T=k7֌'ac@䧡$`5و놧w\^LZbThuRFY T*Z{]t0nJB N/7GteώUQslN$V#~%MD~AgWb-U/6e9 AMQ H?9"iEBG"SC%o1JHxKe F%h% EgCQKz'enYRU2:%UkwQv@RQ<:YaI٬jʖ\9"թV&ʀKʕ .gV@7"j'jzvؤ[mVN[V'Nt #eS9e*R;ٓ$g:(PfI.9a܎j7B %k:8`,\1Ik<SoA #QT%mFR!D: !"i-|Mo4 GK!==2oNv!}ȼ͍ `-n[^v>905bQG)˸W}{/7Y%퍇h/Z) :צoe` + CU- "Qp ii]3;‹r=:is;0.DSB&''N@hRdYmT4$a]$lU[㓃ڍ&MzƅZі7ɿ=ַĿheiWĔ ʪ vY[gIbvç\Jܒ 2ιf;XeK3#SzK;M{D69Rr-4=d2gX01B:mw&gw\gL(cM(ÁQyvhw",hnzV[(7cLq \5:e 0&hg"ڙv^6΍~f[,;c*yġCF&4P029_B*qЉġNOšs̡S =kB(buXq*EE75F"` vGGX9D0 ΃w~j\ bLXrns{ȥgޥ>4hZ;p]4)XA!7vTA ,' &AϮHFgV}UTY,TJdducPS*5}d2Dk0Y\%ׂOcv*Um١5U-IngEI%$HpAMdKUGm2'V*/ Dc(I_MШlLR7DǻITd e.7+9҉WQYle.ǜS%ugM&rFa)In8}3&B8k`+3 J1F}TlcI@C> sQn"r_prJqUJ+KKvI~-)un.\F1(˄I:@E;I}#t֎LA1I%/O5,Z$ڑXT}(>oMtVà `!gZu<,KffK2<6!:(Me8\vX]oߑE&ܼ&,#Ҷ[^m  $5!j̓K;,{; {nn %LP19R+T"^JPsWP+́T3&TH*jٵ刈WC^vFKiVc._`m%!cYI+)`_H*B%5MhTlW= U8[]RLGgJZV̄jB5saVUtYtAkֵz4@I`U& i(#'Rn*O[⅞Wf֟R;2 5QsA%UF_SH7Zew,া]"M<Xt+'ƶ Ypglr(߫NS%ba.RIu#dzտi)FV'BӤ4'J1M8UJN.hAwrΖbMƖuM$ ,>dX U/kH4` ,R G4qZ`d[F1?vvPn0~,|pdSǹ8Z̾y~ʻLkaj$e#~dϷ1dџ?nZ~y?(脈, F7/OYgώ>~8z }·-Jbĩ,G'B;)|ɼUǏ :/dOTlV\e%>66\-TF> ]`rw2 ^ ny|rSKuoi~>ӻw}7:</m -<ӻ%7Wۭs2t ڗ>ӛq*ؓ2&|kkߓKɜkK/NaupnQYST+Nn=ڙt C+.i)T ۍKV-Ԁ^ E=D8 B@-t7`I<+ Q{ֲqG99 bIJ9cHb ޒ (޺kog5bگⵅ0aeK8G^TbN}%*1X{r+)%{vO1d{QwOm=-0<'ho Xr&o ŁzsUjlh P%޳vo/#6{Y67c&{U 5hQOlC9hr6>U3+@uRfkFJѨ} !퓋hu X1'Pz묃EAMQJ;cDa֚R^Y1HUlB|gم4'Bt!gbMRj)z9ǒBpX|$]:V`a`u1C6;OdPnTρʶ9wn"qn [ǖxxɽxďaO^$J]M{IPcboNo暉<;P%1XU_q14\ah+lUYmj\htXr$Uzg{H۹]GnZM2h;k_:RRU(0 ` G[:\$6VT99j`(-ˎ'-懁d.$; lHq{F, .胵ܮYv_ݤ㾚jށWIs,>{_B̖Vj>MJnnغuՌڒjp`Ucpfb3a1F#lV},E۹]3F0Z~,TF7nκ9eesj,*+Hyy1.9!oA]P/ny,*⟃< *7 yBjI|Z␷AvV'hoqgxϹi_HՂk c&ࡂ!D~_vtՕSyL|^RG[zfF?f*g"ih3_/n|ѿ~WyjbяL#yq"Q|q|*ǟKf؏˯׋[2DKIm59y2s쵪.} .O;|~}7z~y., CP  (,b QR&VQi`! $Fc@Ζȼ1(7l۽MyBbC/.aE<;bTui9STS\b<M@O^.fq%,(FQ}/烀>HitTXX%:P!S*RC`PX~R{3du|C@QOyh_ Gxjmxa DrJ]:j$B-qU2ŤI28swb9|(+}_x0Mr\\܊3pf|za!F,']|8sSu1(uvnd.sUjBd n2櫥ГE7Z䵘Brs 7`НpC]X=DI%㻻aw+,s;ڐ1n1u9+DF՟5 Q3FHޤf3lmJƨBR:l"5:D`pJ4l;U* l_UH֪j\o}/Ͽ^͏ڗ<`RSqbHˠɇfҳ^b,fԻjTUC hU͉fv6%Q޶AvN&mBa$ZVd)5f8}0Ef!N{qa?ƀzr4m(s"NI狌\RkʴD D#epAr]MrId%$EtH6w(T0lƢbaYԍ@BqQ\v2K1z-ek) M X[,?8wV$|\}t2dYE8=?޽G1;6rD+jRT,VS6r|ku\׋ruyzq{s?pQO @ƕVz`jyrrq-uϤu 4mQd YA99X>@R׍5a)i0Y;\c/&n;WΉ" ٹ>T}s_̢-Lmamag'~!nE[]Ϡi[Tr 58IN~>ܯ1w|]NgϯD=/m \\jUPV\*x6{K}Y^(f7h8`;zkqz qo:m?kudS t' 94'A'P]&)G~ ceQնaWs1>O O,]+2z3VHN7yާlXԧ% M4?]hXHr$bm (C(hTF _BֿIg.4ђ~i|}w/`[3_jΟYSϠ,? S jÝh[j7j>)2qZk)f4BHM\!j]wٲn Gl$g$y e(ZPiϹ"꽗D)3vQ'f{) =&^UZ=j3-/B?x-Mkᠠ`.MucXeS&30$@M]0-L\\z?yq'k"z'6{Q=#PrP"MmFK q9Qxa,Sʈ\3 SŊC/,SS%tr޺d9r pQWqEƌ:QSHxokՑs-mF |jlr8)y jz᥷hAX+'ø/p.l~l*%YFO Lz?'c' :2, ggjvR=`fxYzB8Yp?Jjt`o&zOݏ;Fj1mLpWtaBlc0 #"J[qX[H5MWexrg}:xL,cO,#"ظ3(za-~/cAf5\ IK&v]õia ucll%W)w6)v\J/0{QOc Xwci3 (~(Wfm;A f$۴`$ᒓ xJ!Z3irA렃ɜi:Zq:LM< vb%Sxw/,(@W٦O_6xX`)Hꤦۓ`:\z2$/DK9lmԵS+*lY68׀[ % Ɋu&h"!h7z39}|Kr~z3oT s`"cPxϊ8*%kFEFu?KT]DB>Hf3x9nyTegE&R 8;\(&+DU\BĶ KPz1}H%tԆf7w`/IQQvr_q9cʡ(o}DP(ܫ~S!Z'-a3n{Î֍0C`ko%_M65(DA{b`)3TEH )8F >2udC4=@KE}'1XZ ->udq7Ѧg2i\1{Ȓ'r∵61Tmb7Y}T4)DNCN nr{b̾}%6sЊrYD(kD8ɇ`#-c. ֊b-#,/TwbWKUG= 9ĖB6 @{mdLF_d׽@LBV[8H!m6s6#i5~2nRn`BEz*H,_t.chQ]x2%k@Ry(BsN cDpG# "GL`u5$o9F\jl=T wSh` >J]Mf8~es&Ʊ'<z&Oϻ;yzrA$A+K+$Tj :č[ō[ǍōJ#NSZ9Z H;*,..Ab9lQ]k 1U]5/DŽ5.$w9SKJvwWT;rmAͮkTdldl"7s}}Do>B*jBO5^odg>sp6~^pKaQ=Ӻx^ 6;#lMp ="S­d0 qS-/!`kWmt>^-En{a ing˜BqR 8ۧHy6Y~皍v1{$JSZH(і8~|1XjܸPYB2s,͚VYp_\Zk4K3u2l`WQmA+@IXv5g ɡap \]W1c}qzW 5i oUcC^Lb&XCI!Pkr;JE%Fb#{tYK2ky.omc{]M9p~5AlH<}3V|uc Eq~zA: )LA-8+*4Uun AWĩпu@ -O.xa*bѡ3}WgS d_)VMT56NNh0Bqhxk "7"4;0 e[#j戩ZOk"_X,N|by޵q$20P~?t{{#8d?Ę/s(aVϐCJ"p#Qsꎔֲ2roSG5TD𤌑 xNTQ"^|ֆr*1*@p%Q*%g@a!8էB<ٗ'8mȱfʏCv(g:'u_3XqP=yD p7klD0gU &pWǚ3fX]GuGrb98:)c]ZF8CY=3'̋_y;H ;=r-n ^!r8@X89!ko]qÇy" lŏh)XF 3|| Ƕ؍nq,8 VKd̻ȔDq- &п[뻬`#쬣NmE%d|D,6!IBgqz -1K =Dey^$l˂bVi|{PJpH: '3=Bq`mD&574k]Ķ/F.,FgV NZۓS23=xd)ar#FEs bGuҠcDnTiC[a (჏$wEW9W f"N@yOT) DzD9u ^PoVreoinӀLPF^ H&~mmn(n A=@Qgpk,,Z L[$;تoיbr brǜTRR1LT3b\tYf#BN)v_oZUA2'hRu]cgSL g03/=,` 1 o\]h$􋫵-\ggnh2 Ƅljs $N^c?ӯ|b h@J`jMrps;: qPOOV0!.(<}`@dk0g0'ˀ2"S|>Pجk 8;ZlN,wZ1fi?k nStb w.j0͚$7XW0ϛr篝+S';Z8$L4"2x`=-6/U-LT̜MnW\-c qA8*r饳{|cǰ`ooף [:8|qͱE+"55 d>Qzָju:^źZ!|ueM[ p]MPd ~Qn''\qtd:AscUW8?Lǜu?SYx2N$EuiFTUR;9z\*zĽ*'"98GhzgxzHI=0[}djhj(8 lZk)d穩jj^'ς EwSYL![,A&>Xm7rEU.R'Dﵨ*΢SϨEJVҥI̩pIRP0ohm5yn-Rሉ }zܗ/:SCSK}b@w>"GS(gy/5Jh q[I%0. ^F]甋7[iN{c8Z%YKm^G% @i>|T)1y"AEM=38ˮ5VDnij嶇gM2L3C^2}:oLQH}a-`.9 FQd4$'}3znW|g,|><7_.ACW3iZ%Xۛ5^|{,ୖ,bD< GKR1}~_3='PO0Yvr1eD0Ӫ;z;ɒAtv vw:d@6u}9Z c)l=9N:$t}qQ2A59{$. ބ ]r›R1flWy?cccUf.J {m>깍޶|e/dZݭf/s9ԭ~Q_lz6oKr;9 ~208~ur3w:OaE p># #F`9|pJq?Ygֿ˾t˼m)CE_s^F)Hqu/!Pz")Nlksa!e#}*kفySH *ID' ]&L#I=*swMrez-ѢS+Y|J"2KC"2@tljnszGB!'S/c-˖kHp˽hv}5܃׭vHT^*Kٮ=YzмD V&rODi\䔖GQ:jS3405"R&^:J>cm}E9gx"s =e\AS@" 𖵦`RBF$OZ"*k"۸%lA; sE% 3셶)w7fSG~E`P"}X{,:DFY ҉@0֕MVZ n 9ЕEd(&/%ceҎ Sk[2{&ws܇|+Il1ò]>L&*e1._"%^&nx`'J<E4gLKI yBV*bIX 1gE4io+)Wp&tt1*UGmM@)z\ 2XN%Q~,{FSY(ɹ!H'*Ƥh# ӢQ=<(lzCiꓤ9>J|HxW% H5x.# BH@|r%80*f6/dW.{u"س*wuj=HHA"F)1d-/ҙwW1|%2h rUѤg: ?> Q~61@"V)Q#nE&9bR ,"\緉vՏLP2âa= >Z[&ZVzaRpd|bZ/fܭfuzV7Z>/NBH<wiTaD."L"EO?9$rD.E"w6H:PXE,">HYQ\;A??ٻ6r$W4sN/ŗawgXaw b1ƶLvq-*GZIv$V=UEVu)r6xE6*k8푍 &ɤ%^Drn7;ar FRI,k#b=i- gg0Z8mBU0*,c sULP{9Ƒs,qۍ&";pX[mRl\=N6!tRrmu`:U3:a, T8@ݝ!{Zڿӻ!̳5rb1䓏БV#lef+.#.[3H_NJ{lJف&&ED.kBUUF9YФ3rh+5)-J9"8uTSF[SE9fjΖf9QsϦHHCFj+r\1/*CKU3$X4Ka[ iV՝ Xq{`XtM6X;BL`cRwTjC%h"e9i%LmeK! ^緁u. !.2dJlPq5 x6e#2 mƨu֎d4੸La4-Nsl!*C@*I4ێK +bY,CM%:NޭTyjCk95ޡH%tUu%"BT%*p!A.LÁ,2"i PN6Q &,`Xh9'&<UȸΊ!?biw,myU~aueվTsfG+J rjG.^}?-[]5qOcX"'?yi.⵷wz>m{7=?nx3mYD 3^mV3nѮkI>mϣ4+/iHt򋬗_neupNrMZ;k|?=YZ쀭>Ο_VԬ ;4gx\߸X15 ж?F-ox;sW.%YL/.Z-JStnh8A]k,MIM&/N.+W 8zV*Z߿|>0eMo*}љ?KN/~/Z&iX :Ϻ'ߊMk03(t-C- #4A{eY#:)0.-CK 2뜜:勡  BrFjcG &i漸jEꮦ`ظgzvG3gՏãg}m{mlS|_Tcj(v^n1خ=e=ݡV]#޸Gkes=NJK- !s6; BE6]}uƯS痧kg'|g%M/OK/3ONM—ɧ>,侦>/kWo rN>8˹Os{}f\=˕Έ4Wݰm?}K;=D.Vfe3a h(4{mde\u;^r-9خđlGKV-1ђA~%&6&U j*YSr!Q`Hu19C` d=BmOko{;IV|hOćzxۚn+oч⋯3TmͥH]3_Fs0dG;T2dR "XX#Tr@bq y7y̾md:FDGsSy1E~4%^Wj k䡲瘝Fa7Uj1ڢU%D8:ku5ɘ&F j#3kxlF4E+N]NW;뻓!vL:b5.bbB ֍\e>v>}Њ쒇g{6|fH]&VG1TkPL)Bdùֵ H-2q6xrSQ e2Z-f6d92uT)TL10h]ш"{תឲ=GDcS\|(&zV/ٮ`ήt;ZMe[|nZvqQ{V4YsCJꝥ%c"pUѦjcuu$CX|,j~=?#!h11+T"=UMp&]*0[F?q4i# YZq.VK䛟yНU޽$Mʒ0QDg׉\dZso|^gOޛ1_s~xhYHH5G?V<; _utqc0_]P2^߾5 |u&B7*vbruk-޽˞y~sw5-Z+[ӻQh4b15ovF>}{ҪДe}vUJ ue!LΗ tmh1UApj̝5@xpXu!T:Z;)$@ d򺶃9B6gI.yW> Slb &F;1fXMP{?_>w^Mx`=Z̊e071XmT \YC,@>qF]63Մ%+8ۍgg_M z%3'ba5yϮ;pNC ژre%6sYVQ)^/BPiJUA29R-j ,T!]d48P ~ x2ph=Ċ.04Ѣ9\ձ׷e['6Sj>h9T2.q,Z%cE6>buՁdCb|1QEᣠ'6HYlXyՔIh}.X4*5RɂU3r<\ b3wͶL M@1$*G521>Fg XMuqJᭈp]c5ofQ#q~"2NS? .f(@ihIqugYtdV kgH˸_~?fF7/yts òu\C&TX|Dr F+Sv)0) v5$1)G3ZwnTD4h_)+mkGE4c.Ud1@0 t\\4< .\c:cxXv>G%AvtH~UY{{u>ԭ5;;Vup7  ,520@[>>? 4=WN3~4!WDd?Lk/l ̓qǍWIYTM/^5n9.@i l?.&&WQJ%) ,ٸa (U%V*d2e]AEE8e Qg)l}UJ0p`a6ܣ*29H2`۶XXY ZJʹtt-U9YnlLp6tխ-n)&թ"ʸVvx` v;B7h]w.DBakXdNŬt.eV5;JFmUaT#4U!Y$FKLŕyώFx`cbyZ2:2uv]..Nri"Vdc0|T6AϐX ~@g Nk`hrՅ% _BrD>4l`ܵgde+DŹFפ+ <향V8vsCWvoDžW>:[6Cɋ$fV =FcJc%H̢oUmIňff 5< T(!ې7#C㥒a t]\wx(D_nAZ9}x&RZ rcs1-Nɚ sNDjDm!IhHնfQM6dq-LeoA͒즨wg  1h0H! iCW6XIt`yЛ, =t4;2 yi:Gwߔ3n3LY Sh$MSJ$7Ѥ֤1T"èEA砜@j0(G#æK>aRQlFlUKM bAVS@FYҭI)\(SآGB f(>] ~zsΞY;Ø'=oKGꐟU@ _4jCrpϧ-:L׾y;͔^{?/uӅ3vsېXۇSVO1oMېysYjV'x]Y(UȡIJxbt\Nkxj}atz=hg;5;a0sx-5à=b~ӎq[(sM-ʹ|ir)H'gH0& Z E% $ETppt_\ƾ#?7P,|F/qsKf.hsPX)+c ʪ5"dȏ)#qxq}w1a*%Qch+ioN:SLU"Hot,*&,9M`H]<|{7Y#!^1)?''mYmyBrR`6C~N7r uz^̴+a3WfwXt8.y Jُi[ǔzxs49kpzysXMm78X!@`_ww`kb~} ,_./IZ`߆DR&[1>+\,ne?1 wEϋγDᛙ.PЊ$~*qelU +M둎U,4nQ\o̻LYH1FHx5:!T iLbѦXVUz[#{cHݦ_Nf]Z \"BY(5"(0DIWHB<ɝȌ%[>PŊT'78ۋ쭗A-"%[ dM)*)RnUΖ\v*hօ\P{Dݽ9&Ui܀&DyGDH3{e71 g)cLV8|f.UHV^xY(tΆ!@a0[`_{rG8UIGĔ7ǒb`VR-a-FPrR[(;kqS3!Ym2=7qW'[ⷳ,Ls͇~;Gɏ K6t"q:j6#wD¹_ 9ے}YŦ8? v׀ <Qqy)_itwˊ<:4Gk%/ӗ^c-YA> L,Н/!^S[jNOjI?i`ttve.갊_{ ˬell8M#e j5 $ Q\7"؊l?Єš+On*|`Iz?? kS Lc)VbGg,VVɭx%k*G r1!KJ1N2+fDE1^VԧD-nѨEE+}ei$bEsJZcFB5~/\M*YkkU&ǵ~Z?ìVUI+MId(@$< $z+c쵪v!个/V68sR'3IDe\SvJ$xg_\MO.<|_?!Á~&ۍ2b ֑Bv\Oڻ:鯶ü0AF΃]bHjw!W N?HmM^b(6L:U/ֽ,"k47DL<f>0x- oVhDfy-"F Qւ(j;mz5Fv{F)Zj%$X}67M#|%y((Ӳ߷x'15zo}'!H /E`2i PjzEɯqs!$E-NjѢ*1*9P- (,6$o[wxUSZyKSvS_1r;`g yqߞ} EM-a.C;nyi^c-Xa>Иj:GRڣI 4ըZbBc_ c#c;˦Fld7;b7^Iݼ<ۆ cG.s [04L/g{ {ݢN`$iB!bcHJ޵^hx&{6A=c$솃heۃeF֯/ [#ߕ 4!+ "kzT@AK729>`,{э*&*S)K( E4RRYC^X@ qsjϣ"j?{qq ȎcշfGoJIJjֽ:>r$/ai-`Dn-f]%~`{b-% 6e2RECLQP+x;}:O܌'y7Ȗ3_B@BQhD9%#J9"TEr9E;޶r$!Oa[,vv`}ޠ-)\~dIv$[9bKlHXYRzPnZ|~uLsLgC{f3NlL*F)n[_'Q(^8by"cQ?E)G޻9du9v̿ʫg4/eGGloede5:/\/u O:0!`͝kd UaI˨c_?Dv{jݍWquۂM#`D ^"JC_LY0E[R%#e9 A,{D`cX^CEH deEAD "a"Bz/sIɹBDQ%쐷5Q5N=Uk(f٥Em쓵CPRxl0R#YmP(_mu\ _@ &[hȝ84Zg|~(ٓrԬF5F17`,޶iĿ-cz=ZFu f֕{)oOrVzYV E@v=g aaIMѥ^!_˅_Ǐl\@:/͗6ò[e~ m1_i~vߩqUʣ'Al&m40a( vG܇Z2_Ҹ)y2Jx2`sh#8ﬗ:>^}')C/@ϺrPj˕͞G XrJ%pD&= w g'.c:s"![dRւ2EAԨ,w*1w֜ǃk&Zi%Ot]H niQ}fȋ=^A>2iȒvPEF48qKyI=sMY:,7]5(9 89蓳!1Z嵏"*kAyOmDqSN8MBOt; "ӓqH%WO)ҏ˵4vb9?wq Y6E2b 09=茪{R)W㾽֜#9TFjѢ35qkK!f/QIQ^݆Uգ̑P(341Q/su+'iPr)2-e^Zː8*m{x8^-eF\MYCD28g%\i&,%+]wQ1SBȖGf-[?L_.e;O,XSc/}z1#Fmdh4}kz ~,&W.Kkל'e׿ ꔬ QaLkm\ c1;x=H%?A҇{7G:KSA 9 vQ;%͌(t'CǓ, :Vq둧Kq{"{˥ng|4cT ZcsDL:v8@*aCiD -Շ'vБװ{ XC4"bHNZvͱ;Gwx c![1ju͒k 5~.`<}N&BKv4*݆&Y@m<Ƽo2/χ[>XZwmz:-4}xן1XQ#& z,c3SjOѰMgo'u|=]5u2ߐ/{.UtP\_[# 3f!;Qk2ޭ$ց8:#%h۴7V4WY@ )h-8GtBlAO (Y댇dsQG>\0źguMHF+6l5@ɋpzXS"0sMƞ{yP[VN?ChV .= fK̹}r[ ezA-|`t vکzgl2dr4Zu^b3,}Ylp^Q8Xy+œpb,i[WI#yѭC{^^K_e^劊~/khK+57mfm8P3ɯTGz)YthvkK̖E[m̎'hY dZ6'2k8ӂF޵`!H;y+9vOO|H?,1j!Ff>"2]eza1Atz6uz}ZFbwu.H5,* XBۭ~[%}Vb 5h Lg]GJfl?ljw21Ql,2砸hdЈcmW H:.K8wAq0V˝O5Zg:ȳ!}cH͕=osMzTIQ[ŚCo_4T6m_)^ l`7p-EK|uLY]o^8mףihX&#vZMsϚmYi:g}~JcJL rˉyrV3-3ێ3ۗ8}*mlǎ9&c!1zXhI%K݃9]+JYr[lߺk$#9L2'o*~MF7&#Дqrؕᬖ_d/8~#R\x=i?@ Igy?4|Yv[h s؟˳jQiYi-8H}xoR*N5bQqL.e ( ~B~ w-~i-ӪRd7W)cI+*a q4FM޷ЏӶ"};7]=-Y1Wj<@Kɒ,ֿoJOe'^a7y,iΆUWn|Ģ.xP߲ WV\/R}__Z[W)&};wC:Nmfpλxsm+ K ]r|NvÓnaZ CӦ2o30+a,__ܗb:n{ʱ]&wd'␥/~ۤ<)LJ"l%0to飒,| ]֮<]rk l1zA(}sQ}NkA7_|;cK7Y\m\I^#W)oAy̹$N_Cը3$m mzWyy{vy^s9`mrX0*SuK&fR''?&e8+^)/"eD,=æ T8%􎾾VQ ܢ.:(!(k%E9orSorS]ϬI/kCW$to,M>tJߝ||5IF[+/-esR ~{wn <s~G|Ώi~Q7u/~~;Ϸ2/G(5,8n=3)d+j jJ1r u-Z.V|]N7Qw]YSN,F, hf6'¾lj❃n+-76yF܄ޏf9kƤ-ge83γ3=e=p=[ւZL{lJ%$pVKck{ cߑ`3ǁ$0'HyW/v̏ &;%sϝ>||o&?0csB126׳7mp?}}qه<xv7oP_?Ds<ٛm1 oio&sQ3vG[Th%&b?=Ru.4/ $o v!>`7>w_"/ίu9F>Bد?:! V=0G JVcqQ^[7ϳ z70}qS_~nۻ H#yfR^6vG7/_˥Gif1Rז|d!^ EoHyNvzG4|uo-(%:WggʱtMW0>w7%kx :؞s>xg/_ }zCj51hH~]q4f!msτR`άa{WGu2bb$!Š &=K+TVBq Rg1<︑$j3YZHxst[[+މim\#[#h/>vh;!j3LBὯyZ"r1EG3\$RU!Jbv (Wa=&5Y(.Nd&  ը,du{"{\ī=cFO`O!D'c"0 [+_rBUi$Uk]$d^Yb|#b-뭛W&$`+3Q XQ~ՄJA `i.(2[9bpZS`1l)=Cd ٨0PuSQ,-I!_xHnFdOcg!1-A Srݡ@NnZ K{cWV9ID:%RFs8%j!׍ tWH H+JMhHPAj T>%@P5_(ucDSriOc]ΓN0QkasOz1*D%3)AA0#lA,<؝7%цi HbՑ |4lhJZSQQ"ra5xv}: 6Ү FS:ȎخfLQc, ~W4p3ZI-IՒqFLw /afZtW4@d6ɇhA|<[Ēiei*At2˹Wc`בm0j |WAb9'HSg吒fGyViKq_4h2fޒQjAs#րcWaTtOFjq 6q2Jv)ǣ,"( Qk"MY\Ǡ ![M@jةH]<VB"Վn2{T [1BBK2i2X*4hxIɈ pP|l=ShzQ0Ac'`s5|]r=#J+̳ t8k7sVF `=WTGucv/1*/7lg r;0 CDub}6_@H0;% NH(ʹ% "1À:iHx %y5q="D1v=6TcBlrE E?S-U 2A\ar $H-CFUJPJlyh|}'$y pPǐāBԃ9Q Lg%<3ѷnl F:!AB|cC! :[Ȃ]:+iW@WG2uc *j'FIU t#Py'"L2 +ޒs ,h!s:Y<g+Gٺ ׻+tie {HVnTBŚ8Y-B z&}l-DKޢ 5Xwa\?A`'Pԋ,A:X ;DG匸9r -/>\) W7zN\s3.$ ĘCU.JlwTX[F4)Zp ޣb ;˜p\:!qb=@MrnWoUhs 䙂;@'DBkn&q@ ]l 9h|jC4 $3Xa>F21{ CDJq4nށG#w%t~q.B3'> {\5L ,:!!`"Ȫ] C.&M|%f|:~ >s+r(ijҔQC;5;!a 4ZOLLx:sS Up)ez0x4v;;lX}oK&|j]/.ggwfm%+0 fdjɖd;O5IPFydN>tUw}nA,@ ,tk;~o}L@`r8I%Vh *ފ4ܒqWȈAlRl;{-TDaa )_WaWFM8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"{pX8, =޾{Lt  d rX+"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,ⰾeֽ" ]1N \]?<%1Va9VSe"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,ⰾmkⰬN˶#|^ WaUzF_rcZBqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXaEqXa]'6ims K 8TJ.tz9=3]EN5=Bgt4\pEG.g80joU|ܬB*k_֊jj/濹ۦe?|RSf7lG{fO3(Ѕ 4y;G7L24}\~Ͳx8o~ Q`e_L=K8~K%}Z^M`;J8ūワK7xGO)sFjv3l8QK|ЭN_|L'PC՞b{'нav94Lr<<8Ztv)hߛ^؛IsCoִLWkLLY1, u5.k +IP0y(73h> &8sscE*J?- ίfkXh1}X 3` Ӥ`4yͭ%5w890%m60$ӊobl]< ~ܼ2-k8Q}0z|gt[»„e駣3| nТeI).W XIZQgZRVA. elrp' ygs9l<ڗUq5iZpU脳/R<iV \|g~a9 q!^VٱnSFEz'Bw (n77+~ZBghEAWk vZbaWZvQGc iRWiwV{L=3`S=%dKTKdʙ#卦!WE Pf =$Y3g&{ܑ^`׋;\ 3iOb~jM.bǖ҇B+aPt} aSy;ڵa'c0iq|եD4 YtfEKP pVzNv TiۛCyU?uyi! 1BN֘ faR߂7.7-(Vv`2Yh2\\|0=Kڒ1Zy1ҷ^e(e^_Rk "ϠӇJ5*5"FA'GZ9=Za=S3'H?wf Wu;X*`Ԧ*c#'\A yrk xѻ+e[ }a<GUw ήSS=<0դ.bα-)E Yo;{6a&8q \SvN5J`hE|Cu<6%k BV{T6JI%g4|{ҥԎRSpyz.eRkA,-TI΍[3jjJ|w.dhzju_I1p<ٽ/Njq=W7@=2(20깺ꕚu Uj NzbY`9CJ0sN) ·gQ/뒾ey<7d(cW`ʶMlm-xBr]ԍh~:Fr1V ZWhXhK=UJѲeiHՈiAkaڎZ/R~+o=$l֠p_A 7&8%e wel4Pϑ,t!2f>xJ Mk*;k6Y@U6 "缀 Tt Wq kpz;L獁@ cR0 5se%q2Ѫ̀\ b@1.3!+vrw6ۭr[TKŠNT{ox`vCRb˼ļ$[j\|Һ.c68wA<3J ٪ Z2C&j2F/?ZjDNQE*ٴF j1\KH>`'ohM:X[19ʠۉI8l}-#3eA@ecDH(884X/5Y :m(=Hbe>rb0ggk󷼝3_Qy{ʼ2꨼[މy+ka2},7߳{Gk>bxMGo^{_uŹ]]AZ5.먳3`8bcwy)-O:ie]Exu~5}Xa톧+9;}ZI)ɣѐX&ܢ6!+a6i3?[~x CWieTQ+>}mul&2 whTϽJ='y O:LUpzmJjP#uG:[B\R̸wSkj8j j1%; >|}vb6CFڞQq( rUw6kbL{P!,9£|=-˲CE:seouəȲcƲ6X0G @VRBrJLcĪȩKKxXυУk!e_Fb4AShB:P:PEgp5 z!e+Lz]yWj-; ܮ2ªr 2‚S} zÄ}MvܴxfbɆ(du;*AkZkR U%\F:+fPޫ \[kB*ܱ~]ˆռ.c&ZT6ӽ˳f7%7:;3jRW?7/???)o\&TG{eZf?2E^@iM5rVV?ikܻy(t+w?螏גz zqG߆KwG|t"LWޡ F0aIjofKK]}fZ,/=wEޚ5Ertdʯ/+2*Z}erQZ rupJĠ%ĿM+m#IүR,ntG䥁;AhdFFZK,kY(&uXe]]6 udEdQ{"PhIfc1a kcpv1| 9C^̛\uguZ<]n9MGc/b^<%-h/h|Hk 'GX 5g4n\5~<;|ci͓;~o(~6O_Z_p?jd oA ]^-v-ly?r< ߸xߗWס4[߻?]/iDvl+4*-ƣ tg{&1Uˀ|o+,K W[hd6j`_*'<>MN篥ՆGXfiDx|6"_jɈGȖh'Kv#Pv#v#`v\F/Ѧe,Z HJf}QJ,1yv'KBRL2b"K6D(`SNJRcLȞU޺s{U +zBJ7+; # [ E߯LzcQB`&c"׶9:Hj!+y!{͇O훖?[.杦J^DUؠV+| |q:8b&Mti G!mJdwAkr1 M2rM(HXd %%t= )@@sר&(%$S\5a S JZf);")I^*kAYrNqo@8B=.3 )0Ϫ<ψuMQigX_lg]6;=?OjV/7_+gN+\q>S`RB?6qt幜,R?,vAJ/[]^4;<_r`('VAz2, ]NQ7Sإxkr(/ieY>-Yl |Wc}IDy޼[Y+QX_ }f;5rg[ISsFϔ16j-36t` CZIvU*==cp~'堄I d!%-uJ 8S!LT8dΏͽt[w`X @ox 2Sc.BֆmIFʥ 2&}"CE:o)H ~G:~ƥR6Iـ^DhSԈ!V %: z ^D T4!*R>HIԊ:L.vdu@wL4$k11QQJ vYuq}@ %vk?4Kyf?)>2PMͫC٬igǓkf5EB+n4>W?4k-F[_>׼ǫړWW/g•b{29zΗCmMLzɿ)#W3B~/3&`Teg1OX'qK_/寬|_(M~sa.r?yau/R;\k>>ѬfJ11&g Ŭ^ ~lflkٸY 6xV_&SFɸ01ӣMNG f\VǠW_);}o4bjh ;)Xeg`{DqHZU}HeJRwCzffW="Auq"DzlЇj@"nq,X϶#LF{JYh &\M݊yO(6Rj u aw@;׀J;BvSa+ b&;Jv>\(JdI(ƂѩB $d̃}0ܛO)>nOT<\픵"69)GE{ݵ YYt$DK18)+;h B IeWXt:4aͩ@Z JDL"(=D=d/j;i) O\ g0+ FiAW_,9>!r|: "Vq>ͳ A-wrt|ya 0Fq $`3-n#ijab*3.GxQ ͵׈{@- Mcqr|||_ d2 jgxm,%X# Cl<`rf-4(?|kΤխoG&J ߆] 1hQ p}jiXȥ*z˷2]$h /Ś'"*E!3\)!ZK U j'e鵣X=֝[LSd:|1'TN]T ,K,RASN9[従\?{ٍgK/aA1~XԌGA{uԚnZ}9XȪVŖ QATG&`\!A4Z )P W)ɉfmT`LV1栋'_rz +Bte\ αԞfv cȯAMΛn{lVjk(xA!6 0;G.XG(j FKAc}M2cnJ2B!R 5,(A*]TQ6J`& 'b7via! F>T5!FmVI T$ #)8XN2=I˾/J˥KLؔ;= Kc)b=ղż{o럾hm'&},pb/{ԽbtRKh.szEYk4?m!gpuծsaqvkg]W +=X<B {MeMz&&irΗ_olw|r9-@zzxqDW8P]wy:v&;ONZloqy&b_ʫ׷=ߘze[U߭`}ϓӿ=5ҵ & >(]8O{#m:$8[oq uvL,cK,#)߄7yU1%(F R)AG&bnÆl  .X&6ƽ\{alwȦz dc'Z-7\>,`Ml6^K@2@Z%8 M K79 ^M|x;|3i|>H5S50i>CֱJy<.u$AS= N%JtnpnSʌ./5ՆLkANQ 5p!9X;FVA{y'9lp|˛)1p 8|>h/b@Dܐ[/cX+o9o|@o./D$JPMs&FWڶH}-܊ jFc5_푙xì&%&ew1NLbQ\3@KF`*C9K$WyNAxzmGz6~eUW'3l"R%gjh q)l1:k l]tzjg'_v6+7'Ɂw:.6_Jb`XZM6L9M(E @}vA7Ѽ8.tmpj0OOy]<%ʭ1P҂)p@6 ^U)0Rj1K^%rBB.2 />!kCQc9R"C&?x˹L}rb<yژ&p%4`a {7 rp"䍝Ԅ՘ٟî Б] J " YׅGC9Scd7 1s=3W`a'3-X@:1s0}>1y_ 5C 5XՖbp:;x+4kTq`kL8Jz0L#*F6K& Sۿm bqp8}p^(R2H QRj M*=9'ޔئ:R5\4_[s"UN-BX#{eQCA-=s'_ ] /הC &A1S1ΒA6,YTRsB- z»gZfuּEMyK>5vKy&_>)Ng=`%2`7(΂fhs#e˟f'$zn^^#q;Zn^k!.ߏWPh\rb wR՜RXwCA ) %΅26z(lyG 6ٽ!.7/u癝>U:ěBK˒-|{MeKM.?CĠ|i8=8t|6?sQ͔t6@%#oYڥ(ǬE}^U9prhprh%jZ2cjD| ހ.u>W4b/{jqdƭC'5;1q"Pt'vZ+:[I5C>{v;ߞ#fhx@yV5z{Ski&WI K.޵Dr,Қ=i9hv-bY+:+a0 4T5=*˖Be_Dƥx[ hwT N8pѫ4TZofXL%'$@Zvj;z\晠Þoe!)?d JV2eYK0ՀG(l iޥ̊fZ~?bprPɨ+ J@t*$h1ɺ;6l}^ v;/KFG.y.MD~ o ?l+d#(!Vò_1P4FYE)ZǮcua ȗOF;02 ;,wcY3N0Qq~5JCOle)Nr@_eYᝏ,!x#yM0_B^mh1X%sw$Cfѷ*=wR1"BlYG*v]H#C㭒a r_y\;Tbo|ч젫V_0ií#xJeSxprnn[Rsn0Xyڷ'CpYjVOPۦC֓L.\wG&:,K4RvjvT=`:Tͳ-maPKKjsFzO69OgVfZO׃V r DWA+dAt N9[kw M?Vʷ߿݁~_i`-$)$UJkmHIBv>eWٛj1 Ǚ~ש&hnJdՌԖ,i**mfV-'Qld3"nıYQr#ěr#߃Vco%//6X)go$^~|{Oz7ɾxueY37Ĭ`T$MTXqt==pAi;vP= b*[쉹0uA98st2*{hpLec 5%teѡuYU°F̾dX=4a@.&LD5jm%IbJ鍎EE_ׄE3IlKqw|.A6kd0{b08g0yZ }w[5&Ji;muʑg%\ܜ'itd/ bb*vO&hS,N+ĪS}܁s%%O {M&h$!@YWvǿM.NYC[eSst(),Vu·$Gr$hM*./",e*8Y *O䃖)6쪤dJu :[r٩[r #zBu71an 4=e(P~RƈpZ9&L *T $@+/,:gC 80.C`##bʛcIfR]0G+<ٰeѢ54ܾʎ/xԌ,cO,ò#wPx\]TN@!jY dQ@A( ֐i:>K,n86jwdB>YtH誒CY6_.W|;7;<6u!ֱ^<౅ )Ȟ(!qۅ'_ihH+W q0F" * `}'(h-*1HdF2rdI`.#tI@KtJ*һ(Mdu t*Ŕd d xS["i%=iɓ[huJGL6ooF/bku1MO۶0ۣrsK 7g-9i0j>N:{u{5O^ ۯw,}N'(I'Ⲋx2͢'sy\!落5M| ?~5nt\M.?oyߓ.qq7=p5V`ו9a}Oʿs!/#ТDWc,I}Xcԧ O[χעKwPnk]^ݺB X> 7!>+@H/ Ph%lWcX%Լ8$\!}Yj%D .ԨEUbT^;cd)9lʥx#[  eGnmF9MKD^oK>GذQthNT.CZxMYC/ec#m9T`zi@cj)hiTH1k&,TjI Oo4PiE !HU}Gmnn40|rSy8#>Oy: Q$ e 1"A[\,Vt:e$ALfv 9p:EED EkO@TKӌ!y("xz˵cX TjpC_ g>~ 3r=q`h'A"w|IZyGW\U1X+ՋռYC-ǙsQ}3Z/&TBMZTR QP@6GT .e~R*kK" cS!}AX=g'>/٠ ID̖D$xwu#>qeJl(I6LI)dgMn#+8vn;RHeKq>j*ѱ[Y5{b-% 6e2*r`JjZͳ.nܐmyHfwY#5m9hdHʿ\Nfa埫mOaħ2>Y~)s&mr9-CE, :Kaee>! - n xXwGM{@j9Swn^7,f=MvuN[ɢ+mM-D~)G"xOi{oe~}:Ufx׋\Nh{?ss{=5~dnYO* ufU^IObzuJX]O*? /Np'[ҰJ= . U?v\Cj>JU)gU|ɂ @b2H#m̶N0{ *ܜ4`63;{"i/l"=M@ݦ PX^[jz m+ɴ.k!vMtvIO㷞ߡe[k!EYx/KZBQ=)׮[rj)pYu D%9M-ie,⼖Wb;rMn"t6keƾU>֏"+ ,rF6ޡBRQP5W\LTUwS>rw ]zzCY0IkzZ f\mקׂCp~C]&nk=z̼oܗEQyI(WU]O=kfEc Y}+M:{iBikzWpޅش"+ cZ;s;Dv,@0(lbv[+7vSr'U֊x0ɦ=As 'u E%DkN&rzEh׽4q6gzJW?%ioU]m v`2H&/31}I %2>$ER2)Q>=~HCUN)B;&%H ˁNnas}oُ0rc@PsLdјRdFy<4|%& )}ܭͣ[_X=U|?vYiqpszҦM*Y@#,j^y"T݄gZUdkJ~0\֝6!FSK 5dX:1Ŕs>$iz򐏰{5RtW< AD+-K+R:Y2tjxE9<s#8Um1x=8VI=rvNH]-K-H0ATBb[dO!~ϝ/g_V׼sK;,B˳!?~Uh)LZݶ jjV ]TK`o%R}8G=MHI{>.ƊPGS4 䡖d V%yzSpgV8`Z4=WggH'@Ec,IAya%RTZ 誱Tؒfi^K<9o~_!mH~͢9G*G{)ڧ7fH[G+'Ir(caU[.=Ewx+u1A)kDx+@ R#)}Ud(t,sGNv3C/x/3lnR^dހ +$e;yy6tlBMf+QX}ju!j[B 4UY^ycZ~b2c^ۖn|(@W,dBIؠGٟ9;s=q7;Zt01Jn0-%5hz^ޥ:n>-WGoGˀB huXzGM:.9Q8 "V=vPUnUf+:ꪫbR_UPF1CХ}e"Y (ElhC]*bg=Y߉ց*(Aa1"Jsƺ?l.0|e.Zwb~H)Dqz|N!ƤPYtxj?#z!$Jp 18O&Sժj2 CYJZc`Rjm;ޚ#\=<%MLѯi|irnR;+95 ڿ]MӭiAwŸgĜt<׳i7 T;y؝"*j0X`\7ц~|N܏mme+#I\7ŰP>LƣC>zcS,SWWd [I 9o:7;K;21Ywz~bQvv\6zҵhJ[w,41= xxm9m"8Q֌ցmw2dQlg=ڠ$, Y5"ÓÓ[RrAN/KRϯ3Gܳn뿜Z fzӣ[|9668ϯx9^ ]줆/#8/DȉpmcoC2?#\߽ç2 4S`R>* j%+Y\sz*#Sq@蕵ų?dL*[kH&\)QQ@ T{=ps7oH,2GIҪtʠ:y0.ﱙ_@3@X>NFU ۊ1 w}P羅# E#kKU$;@ilo rk8D">Sx|Dm;'4a(\gŠ1pvы?Du Uc Eo2_c$fe.' 1:b&Ѓc@^#YZ>TDr2IJZu$1tn6`g C:sBN;0sbe8jXSٶ #;BCYLoGғ d7l@J-ruą4zQd5U+ʀ%>X}d0RZj4 i( e~5YLܮj@! zv'mQٕp\J\@V3 Lu+%pDc?G4|z8N2wڙ-^;/H.H09KIi/mK&+%XFADʾz[28(MʎU׿c@gdB3( \%KU)[JCmG^ݰE"+ bsÀ2=OLNm@Nꄹ%a˭2s"D(Y)W6V9 ,J  KQKɐJFRɐ5Њ!ƥTJ)fMwqګJVq@"_7/OK<j*$4>"C2E-- C#\R&5or:UíI.d֨jB@JQKV-+h4Ac*z`Sp|-5 )EDX}Kf`7 7Ҙա!X`&U+/XI( $4 m*\Us Kݲ:WFY ioۣOO׏'rXUv7 ρS񺽚\̾$+K!^Oh5RtJ2HUrECJBhD-Cz1<(@ L&WguT %ǐ(5#.T+g,eab՞+~"6ߍzLgƜ(~jYaIU eʦ!#g!Q|"]\5n.*zLPp&aA1y-@CIu!UxL7a3(>~ md}܍J r !+bZN1'S De4g[ZH)&\)Uh!&^}(>D eڿL: 0JDhWfe+8CTMoFmZX<ŴZ=+GMį.[v_fTss9¨n{3-#~jR̈́_L /eEŭFsa}Żrw;BIqe ݿOoOA,6GwUEk:ȁz%ؿ3#7w\{Y^/b  r?1wXx^"M4 Ydo8#YD#CU+T1klǵmYpX}lOVS|NJk5$]lUr0 !phrH$:p;>si~ ȅJ/G"ZEGp';X{7H`Օ =—Go-=)%ΐ5i$H~,Q&M XwUJ1A-AT(9d %.n(b*8;}mUlT!}1D(z0ߣv >DF7eԙL烍$4V=DKl¿5z4,o;9jws ; zhPݠE {l^)MʶnrK4/U̺y[D.?~?m]~7*o]7>s8ﳳJ?oFy8o߾ ;|qo|A0?۩j]¢f+F=1o?}w?ȝ?0j z4?G>qcb"*G7ⷷg9}vCpA:{ ʦNP4uF<ͩώh4d*ܦ}|vqgpƍ㫓p$ٝ׻GȾ zE:=K20w1Ի%r*Bdtz^n!% Ě`2 \aIԫ_"#Iͻƞet8 j&rt9+\P)౰VRjU7;\JmYL: )P%v] ÝC!c`XנL""2\ ^T$jVK+匥V0:G*L|TRV9: }CWWA$T!zFa_Nһ%8zdccJtbyDݒrr|Ch!4cY*%Z4ds#|򥟊D0oo=N%! P$>~.fbIuL謫}]X$/}jwVz)ԯ2*2}|)&UIvW{(0%wD8ːDHN>>P ;rU(?{lyVYoi^v(ŇO_E]''ݍ"t_\ vd|vщ<945!`{SFCpZ=wF̱.$)Y M]Wu-oZAEKΠ-pEDD⹐GtQ{SZ݁9 _%[Ͱ4&X R#]j?X ];,4E0;t*I|lin#F ב5T [p\{ wbrBmj&`/"f'SwR-CoI@ `$c J[DPffP7P5w ܞfHŵ^N/!"9D"쾑0 ;!}[q3轱?Ǿݠ%2{U/FeΥ?%hTqeXw$h%c^Gǩ'",= C _JCyd=bVM=Rآ֣3etInFFJU*;Q/D})||EW)gW#w)hd}LK-oӘG4̯j_5gZ"s*Ud[pn:L%fK}gw7jH B tHQ%BZCD4:8@"0Wc{A~KQ]G)ž|jXD]OhՌ4a# W@&Cr`Bd i-BL8WS\LP7.J,{6 R`U;̲2RQ55KT^gnw#9u 90qiPNj[$/2T[G1HB/GՋTSTS{^`pI6AW%6TMuQ@z2t0s/T!)Ũ1?zQ$Y5cS0xjz mcf `|)1 @3ճlO&NJ46U!/PqE%v^&r_ ԃNqıLzJ &Z@A^KdT2*QDk hQyLܞqݚ ղݽP)S 1'GQ߼V8N;mtl/2Jxc/monmF&`Zwo/Ų,60_{WIJtlw\.A-UB:[OW2]_!q̡HsC/͊A}Yޏyl)+~[ >WI4ƥ7q OY˅hmFD $!(RI93芝A olcUe׾rz[[սJaL>Y Krʎ_Hc@Dc؉YwNNN<}ϝs>u"ͤ9w@S(D RZRpܘkj%B!;1?248oy*`n w' ɀCCކ&: }^'3K+S\yqtGSBʡI\Wx g VuqC NqR47tmD7vFSu'8$~({7Q)?^>x!i"<9(UqW5I>2_ OclP2JNĈUn$%ed**^ -e+xm>RQtLȰ^մ[*6>vC@#𱫠T*&"\dܺm^ Km٣P5 LmlDhO7 -P' 99#Gz֨a7zv=s:םs}Dw}ٹ\7_!'*k%Ϫ1u~f ՛XJ(ڜzwIi cN![q2a~_r*umRԸN^uu+ _rmn*d6($پJkDWqh}Y ekٺ }\^:јH@g^=:yWOÛ_i4Fk|[ l^lySTL2܋BXI VPt׹R ]x \Aӗ.,lCq|vqy8> g|Hrn߸zI8Jol*_cDfjI=hc!7*>r=%86t/~#{96!^O«U6x fӛm#nZx+WalÜ.-~s*P8gٍt N.ĻPv^oj:t' ݛdL_r sU\״__!U4)JLu|Lk*-rιKJ(@N<8f㚼TBnw*b8nvj%Jt,jQk_ 0x9dȰK ށh!8-9^ŢG¢ߨI-ǵ Jyq7m ڨ mأrǐy*X )|j4eh0Ji&cS˘-a>J=HX@di4I &wY>o1)ݱ!_=\ϩXrff.u ^;$C!I-;Wz*{]MNl47;L8jM t""5ըTET7"'Y,bJz5T@n޵u$׿"Qm @ I?%KD˖szCrw(Ӹ@͓:u**y#%):Y+k =~R3ʼn$#ijV>::J)]:ci}`{R{N*'W:42w6Uz.RJNp]4&v(kpN`׮N`HKjNTIplUJM}뤝SHjvch+Z]lXQFepʤF/TAŷUAkG{C:*\Ge||֎{@ozIO8Ɯ=y# ,fRmAr0آr可.g ~[GoQ$Se߽(VpK{syZL3I.wnt%ӛG,OG)zpA]5lу² =ԣVɶ9zUbr}W)hI=:|g,ӥԼTeM{9`PjT)RT$+4byA4rr$jK ]J)][=xcfqݑnȍ2ƎUҊmҡjrS'xNG]kqq(Ab #iLR-`8ڻT0U~ kz͐GCzx#3 )>)ǟiW)|ѝ4r˷7QZO'2e&6'õv4ZO~tyg.]);ʨ-_xԭW@~[=2`'Q7`c˶ۚkZ+6Ne[fM- /~u6 fkŗ'4s8k=9<̢΁ S q3",8sRzLSvx=r`CVvpc œS{Q!H'N}upS del:LXZ?*G4sS]y]4VDUJB4*$T%,t)'I-2UX؏bz j?GsbćCbϵoׯ~?Ѥď9z>^'9(-uoklQG`^?y"3dwidjF )h$S dctNv72/J#&xEJWMД[,r!j4CĚU&Mk;#* >('?57=kfyЗ)QM~VaΣQA)snwz(m]B *NGctSTtJ~ 'uyW"ShFBUNJ/sUzbNGsݜJ\UUQD|Ϥ"(hS ڦh#<ϘzrW06RF7j=zZvڤ)m"Y[&B1T̋@wG,=Solu^{~Q_s'4fVX̊ =x;'@Υ)Ro5l{U) *OWnF x )ZHS*vKX[.f`LB1*\j9'(tʹRSngI{;43t]rZ }sf4qVjhPظv :;Nvyy~TU2NglAifD꿧u.vB^u(Zz5NiWαL9jZz~VKϟ|*8ӫ;RlNg+ozٛZ 7?㛏`+ {?+gxˋ7v口hn<%^9iڥDkfvyy2 =˭{>>Xφ__ݞ|+WϷ </gxjvﮥ𢡊~&zݷv (xMٖ_$F=fE ?iK| W~:;?+m#UˋmO>_+<;K[[~6y:~lqP+OΡFћ7_:?,"Xt~o.E =t+^nA^e]80~:\},mAo~\}ޕ|ݿg`o>&_xk~qe^ %~c }>~ߏg{o AzTmᄍ(;Z1/ɲVweLd\Ľ.vqmyw` s .ݳuBmJA%MWOLΦ:Nf(ߙP(P| Dbs>PN0$ :KvBXæ 6n^)sp 7@Lt:`# ~ѝ;5ق*em-Og ɗ ĘOk6ϨE"0Hv@VT2d}%P,FKj TFl& &K SSd*$(Ӂ q6PWI$S zr Dє,)6)dB@0lA, ؝Q B; ldl} i>`{R =on$1堰`SZ]hJF#`h6&\muoAّvPX&O04>d1f9c \0fN%qwM7 vHj:搰8Ë,jH>҉iݍfp{ iBu6j;[}ĮU]Bsi4@xFCm- z-{-(i;,E. ˢA>֚4688ylIA%c;(}q ;- AJAFUWⵀA>3X425½U󄝿< {p(diG7VFk"j]qP hn:V8 o;vyv^ތkmU|;6c<)jH(1Ʉq<)KA.tqT{)!VP18#cI uƋ~ !t[ik -), ׽IXܺz-#aSiP7!'Å5:2\ bvhga ކ4\Ҩf5[܌>,,Xd˒L;06fm7`HK$*|6u`ɶV'[(vKSp B~Gӆsd v\I ;NF4Β[K݃ t[4bcq `Mat #˒f|Dh Yi{ĦkMDgcaR aAI$Jq- {S e| %2w[bH/|RSyܿdRA_ə; ,cI0i ҰLȫ]%% ܹbA=& B^קjqx8#5~\(v LbfZQBhjά_knz_)@ &ƠWKD"%)ئGR8S lq9>U]KU KlRK#rWb4$ƻ+!M4"hM8)R~&Z6W\  C=Xy+EI#C@ S`zARn%"!1XȺx a`^Mq( G|F4ZŪkYcu;UaAna=^Y.0(,^D$ȥ^;섇BI29 7z߭sx8ݦKWL0h{ rFt9@狝(-30[|EԢ8A`C"VA+>O?r}7qf^ʱK#/H8Yхͺ",>Ա4;Stʲ$Kqut0@*q#jDVK{(r\Y2Ք;rZYZ6܅~nANCI"iאrW,O"]>y.'1Tq)"0bI,3r41 νB R׌$ӵ5Ҍ7o<0TpA&lm}m8s.&3_;ڿPW-&k!.oi<8,?pXm0{iCpX>(mvfewDo ڀ`nT7S^7"[Z}zҭ__4>j+ZUw| /_Sܝ-خ 5<Ώo5fݿQuZXGi(w 90~?ro m;;OkX~ӚW&i;X_ķ o)V|Kߞ񽱢U̵,67ԫ:ǯ'yWܯ_ޚ!_\aM`*>QGfa~AwwZMC 9e]\9+Iה/ZG,'x VPˬ.gմ ,T, (Zyil+ԑgvvᆔXo&%~?߭Օ'o=R.Lx/]A1c;VU1 M?.ދo-}YoPCftz=:)g?z:h i]3[FjD7bawc fwo6:֪~ywo/B~Fd>tP[̙fs%6$?ܞ'MroGd <Az[o]xTz EU#k%c4Ax%pu~^y#]ݞXkr2 JFLcuHA:޶wkz HAbSi*l &Xyi)#4_)%խYi`\F|ZֵpA [Lg)0Dv4ȟ?bGz-gǗ9V8o4a]kw<+Xcr%kwNp鋔Q>-'vk.bh2{Yvǒ7ڎיCVؚ3Ű%*}?eWXoZ16_WXHLJ%^>I@|RS1txym= 3!EϐTjK/,8[G?guHP j2LCt|VH35JvJI>0^wLf>dd@o^;x]X ʐ{ܻudz3ۼ$kAk6CBz2Vy~S&ѕ],E6-cjk[%cђ:rqc[ L5ش IV<,Kg]4|Y!||1o*Zwl<.{oBpL̢f4M^xW?G)Y35'{lِ^bu#46 |:de5|jBҖ!^ߦI7FŴɍ%sjr:!,=6n"<ԚMU'-P;r f..ek|16dctnS*c;|Ǖ$ݎK%!ףm<f+$8^ϊ,FoX5f:z!R[n*,pEz0/RfwӋY,sfv{ wÐׁBzC ^ OsI`HSRoH||"Uחwm:&/Qi[n}2z<#LX F2 7e= RMK+bfv}.:[yMD" %rfFzС({:/"-ǝyx^,ҿAAzx"C+٧* Qɘ ) *JCGK<[ؙ@]sr򦇤߳nÛHMe dBZQU/N:?lXEVH+b;+ǿ|ZȎOA"Z~[ܐRo7"sؾ^0ntƢBG/}3QI#Đt<^en.b"_流y1͘=n%} uzV*(iP[޶wpb.dDѳ(rgyh<1$= }EŢ0<7E7K7<'^~YίU,F9tU ;`TR/<.HM2cT# +J0aMXݫ:*ڐcb~1a^=Lz1Uu߈lBh5VQz!z}7M<w-m, n2Լ,6ͽi cc>d.%Y {fR(JV5]w9C^& PS=߅.eŠdrR:ǺGk4\cXRƂ!aƀ @"v% G5,+pOf=pvH9$Oy>@۫v ЀAq(K.VIx;v*0iovlR;`0Ÿ99 ND!N4|cS6(!j-|B24UpOs:qtFg6laei$6Z'ygb),Q0fBi2"elGhA"S掍r,3,nP2(V*.XuFupoXE ~\'Tmnx/n tgN0Y {soHOA WQ __o71nZMC!IS@@;T+jƠ~}8)^VxUWAx>z}x+ &*Z@ DreZk*埪ւ?(:4_|W2@%I GὣX\quZ |:m>hZWkB5E(+It's ˢP*Od}Iw+ph L yfQ;Kyf+=NqZ%~ ІjRaSw~=Wk{螖V@*#oHS~OE8aSvu9spzGmA8|$NʥI> >;,S$r|'sXukO ]p+g~ީT'SWEJݤ@2JmE#hĊ{J, FdQT.m\;AIgG`˰`NN'<) 4d [Aщ)d펠(YԒov_޸-[K 1g1#3fM'm$&#Ψ[Rh֭yR<%[fb_l΍ {4_/7*7-۝ VTY OͩáݶCvIQ퉈X,FxgH70s%uboZz}taY/:n?Q9I뀉vtWo $l⻄՘鈢J`2 0a`Iũ0Vs:G$J'PHgV+IcԚCpyk2Zb=˞儝x[OHS&OV"OAed0. m|ƺOZ8|0$y%&Rfӯː LYCH8 4:[wn:rfGSs~b*`훪0͋* r: ՗'$L14l/:$d$8_뒛Rߦq,C2I_V'q؛Eݦ^rϡ@;C#Um77:ViwUolԖ7K;/;o`mJ" 13?_?#";p ?o^퇾pOaU X؉ bʐ˕iN|Kl*.gyVl&'Ø QXÖ`M㦴#eEι@giS[AŘ}*FL0|ukIoc}+&g,xn |Ptq&" 9Ox>J'n*jZ@mLB.G. UQMtZTcdtnzWu­cɪ:67芌nD9ߖN'PG(Q`@s, M!G'xtm-8sӋQ=ũA  Dh$zO(qФϘV<\&}c~T)zw[u#Ծ;/6Nۑz.GFI=DbҨt)(ms*p%\:L3Z[Ev[KT8bbC,&^ HASΕ)c0#2)Qqg[u`26**h| X )*]'tK0`ۧ(Qzƈ^FkT.P*`m#n#\-ILDrT F]=Pl1Q8t'֞W- j[gxVpK:z` ST"p50%NFvd3zzw{} w4nۃ[g}NB4ӵϼXe-}v<^>ڸ$$ѶB\vs)" J7Q@Z .$[Ob`/j4np¿Β:R5/XzfO5ڬ턿'Oފprvb+iae`|D/T&`P0ZmC̿:/Gn% m_r;nr;OE}"_NC],Cq~ 90*˿N4fUk|q>o  Z辙ky 9%֒o-.; kaME* ,d`!w@D2e!adΔdpI(I Vg<5@p\0 G ;椒8LjdbNACP em^L<ѬtDVJpZ`[?ncޓX D m&h/hg$|+qAY$ԙF2\2yJ 2븦@9)k4Km,hlxJbZa.oL+p>'zD5 yYD͇SFI(&\PPf* 睝Ug}L;gk>,6,Gcb3 V2d1y\p)s>r؊$!" e(3ueRє<6,wE^+CΙN$Gib8`&)OKPЌeIL)#b kYjS8Xd>la3z@~F>b8b#$ta C2dzt^7 -06ZP}"j`|-$, * *`LDxP/әS4]]q|)1 )))@+ N&]0A i@ hPJ `L0Ptp*U᜕3,aG}T+J@Ʃ#Q1,KЛ.ܒ!)AHuH*ô8']c-b(錌,;4 y9mK41{W!{sdOU앲y\i{޳};tw}A^h}G[W.JHFZj)ѦޮVUǧ*BNQl __ox>m&ʱWۊ~7Yqpss`W#uZb7|rM#;2ZBA1,%:݆8o*浆Ƭ|!/qHŬ8O/ftYdz:sP6u$*vD[-N>hRNĂqUz]p3^ja*ju/my7?RG" >$|Fes&\]<`& 4&m /K\Κ.IK*fq?99OvXtt'G؎#3 آ(v;Q]܉b8T//W(%sW+ Rs "aZ 8=|iRb00أĄ!TWZ0K8t8t8ty8tst〣(CJtꁺؠْ%o0Y ԚRضbaPD.DNO)u'έxv9\|CC~)aD߃s*>1@t^u* K5(v)l] &AoIƞg>Y17_M!Z$ 'H$F RZ/ަ& 9|I!VU{# \Mc¿Hbs}:P {/gKҴy$$珷$HAʕ|d6*%kA-P͂jcN.[گ,>94$[";X9U\?.h]F![Vj[B!{SE*dYE*6TFS)֔h1qoAsEWj)GZdթ}NDj~-GoE=s#LV'Y+)[g+{ -٘]niC%5(ۂI&&!Վ4Y;3IF.xyIl)u g bj1oO7h]#:03lALP2S]jII3n-evac U8w;iDUo[GH{T.El|ŦE2Ym_wd'7_jnf}]URokNM3?o|pwu) />5R2ï?rQNe :o*+4KVh~/f6NT>}إ 5wO{l7w ׊nGw*kԿǟ~%~W/9a^VuV4>ϟWaOF6v!ds:S{~urƦϧ.íYsr;p<^tZǞ|8 O_uӗ]ٝ9]!j ^@xuݖ+ ;U54h, MLTloKg{#@kToԚr &FN* 9N"uvȸɜC|9[MƢ8 ܉%gɡbmP1B;d!1z멩!-_iw[K=a`:W/U6XvBRiF11Πec),6T<<셋?=>wbϢw/mK-ٸrV+Gl Ϡ/d{3}pb5F>r7iws3V W+{P_Syث겏V}aj"9e! u99zMA{GG?pYǖGFu_ǎ|Q1/'C`o4Fz^ue B$ /DHv' G_w+罃cSN)2u[L--J66gbieY,,TdiCU\׿p 1 (u+Bԫ= Tmrчq.]b~ D=Xk9AKMa.! D.꛵nU9erNb<2u{3 BX t!jƸ`ǡ2[K=w#R&X1.wRX15.HLzپGWcMB]!࢝,.X뢟Qwd(|SYYYh&Gť6(#KfQmP ߜAV|lEڹ*ce/;j6\P\ΗtIu[!#.agi4xVN֊gns*`ɥ1JddG)㐤*fS|9{TW0*5@?%^Z./֊iVTӤ㹚j=qT/MZ A( iRL9rVƑI`S5BO`Go;JRT8ӼDa1g 4J0:fifrd[эi\ȓu#=A7jkP4E > EQKX4df4ctT=4GS8-9L6֐rч3s|z87T5%dMPt7_*Bޝ*i=nk6,mA6sU8JQ>@URXxPHgoE/TR3SwZ%k}Q{&?.\H?9~rׯvyg.Q"v'`>u{Bޅ/*C!v7~> ;4hV-c gO^/zi˷}k|/| xS Tw~_P`F"g?$a) ͝qZ^e^&%)Ri@gNJH8jEB}Z3"qOJ4xVԶ؋ϻЗgҭk_ F951ML3Hsg̑nQBͤ {of"t3gzHW<%ȶշ28A&@Y,jqWd)m%1~4kýO+~ZV{g+}#jt^*J\ڞ([<7 pj︧(XCl8g堜uA#,>Bzx0GS&+a"JBos%i|._Wܿ`)\LJ^X&t(t*+pС"U0M'N5ku6d1',@թ.d!Hֹ:NzSC;oT K2>eR"(U -bS($9@șllVы/'_!OUVtj@o(g',L)hi[]e֘RvUdJ :FECDB j*n6w:}sqM-0i#>J[!"}2 `B2,6EP9F+bC7D:)A(HrQ=Jcf㱘ũ y^Lm{*PzkRji]](Gb@Ծz?C( T(+bH#a2u|B b4`Ԑ}>_*ԼIwHOSٺ:`Vr<`m:""팑P3e}.<(Hٽgz;a/=J;)Gك[p]8tZ=Oẗ paI^hu!;Άd9>Vhq;wќ#(j_ sBŵ>͟H]QShCԠj;g\0^댴-' |$l& zCy$#/0)iRqY :æo] C5Pn4h:P[_FU"eJSUjvd%yTY܀%dd#ڢ5yWX R*Xjmmleboy!Ktb~sozIu xih%lxL*FQӇGgBB*pٶzQ މd82Þ9u&\VO<}v KS5Ք0BBY- J-I5"( },Mygکk_m9h4dÄo8M[7wH`>>X&j1Fp%M / csjXI8N$ތ' g.Ikb-sV38ל Gbrjqh.$VhG{ rZl^/VfhtVˡd(ШNMAh L1D]BҺ )Ko ')SPh !v.^?t|"GyH]K筞 ׅKɱF@>$%VIQ1*am2XQ֐!UC :].QS6=:⊎ꪊ TUPF1':]jDJN7N6mlaʶi`# Itfυ?z??>"i%s+";ewCdws:Y}k1gA8vYBI!g ",h?F&UA1Ǜ QSHz(ț쳮VEy)0DRJކ%\9:~՛%33)G?8'_Fs2~@dtq?ޥ^]+_4sz]w!_5P/p؋(DkhsKguoae;bό$fZ~=ޖd̐o~ ֩锑,7a/ߤu^뿁~ay(?no2څ;fxXGjkXƿ ہb(A<>7JI/tW];vqn;b ȪB1VYI[N!9 ɛ`CRB [5֮tg\ry=T/Lƺ@G"]poHxQ@^IyzYDlO}C"ZLF XL` :YU%xENJAϲ [)hֵ0غU[@lIz_#SLƠDj+w~groq^2fK1 j!ƷT+/N|t;= RRX\k mKuY{eKKdȈvhdˆ\ {e Q@Nd2*E*XzͥHdKVQGewF'zAꜴ *P**MN\ކv.K+IyRoo ޲5jO jS:? (N.?[)˜R1"a}"UaP+ x|.yPyyUFv P(Ѓ̑$+$We8cJ(Y oåmy8oַ揖ZxS4**J A 3·pZ hdtcU gJzUDȹ}?ӛ jv5 ŠK<; ]oM?/u9?vӣyH0,)FWGd~hNw]yVusaFXݕ#[ CEhڧo?8&U-VSE],JÃnaA&jdExRRX x%a'7J_gy;U , WSM2 {l u)V쳆D@w^꧛.6E9lʎ6z-6vSeHAqn,*};m=8? Dh,?{WH?&f,h @Cȇ|HpbH8뷕=;俧hɒlݲƾcK-u7ޞXd?{LLe* 싚>BoD|\c KA>a=6XTBt.&1VSBڤaR]sT_KA(+ݏU.fZ0=xD2f7F_tl]}}o8]ə(Fu˟6P1(^Mqy6vmk K(ye.oo_K?Oqqe+V1O5)Ψdr`[K&}'dV%JdZtN]烴3 RwSNv-iv2l4%LK4 tCW)5Hawl^93qr̡fטH\e؀ȽU^)AD<_/}4<ȫwNtO͙?0!}K) %ۥ>..EB P["D"`̍_Pv6U`v!+WQժTkC). @p{t GjOBE(GSvu{&;-(T)ͥ+IJ0zV{˽Jn8L8c&ۍbWn^?0]-囼MGy<'&2LO/V"@R#=NjBX_,e]dT.gPeT[u׶)dZWU&q,Q91@R .°Kqu+KyTL٩ +g,imrѾcA&y bV QJ^L4%"@U>Djp|rpGOTe6'XʭZeEg @J2Nfk,\1!F7JY/hJE YT&*&mY4s_4Nq/垌+|y,(qgՔ~ )@+Uhɭj޲&'iSܽ|xUWs4\uM%rOPd  I2~a庯.[ءg,Md*ΰxA4 !6׺J͙QHLtK1Ty3Р0kWx ji}@EĠ2g.9X/45*5KfFb> *[_ͯD-Z='S|v"9u;B_ z/?^L*BQB'$T c+abw ]]H{S[%cTÚ]+,:3IyjgVIHatETl1ke̩eܓ)1Ȯ(>5Ŋ@LB&)$bOp?+WsLsR_lRMsbaUAi;9$7^^hVJ@D͐,+zQu"gm&)>xD&F ZkTQڢ7f! |֛5= `>hxQ>N=?Z4O-ߨ޺3Lmtv/KS;S@z·l "?kv-m@{%/ZkvVYD\&<[f;Hv|^Owx~ü>g)wG[3T6* .apѻbb\4rig,zwQz<%ki!T/jZ`Ds cX,-{s/(mdP {Ne#S+#dc;I)vse?Z..~b,؄('q tX D<;hʋS<{X//gЏKH:E%!2+Ш]0яNoz~{ 7X0 ޕB!Eנּ9j7֫S7{1ȗo7Atqw87À"`׶I-:_{q/(3oM[i37IA(qU*e(ݨ4.&d1Ӫq 7`B6SR՘-Ŭ6D-6 D =*$ES#4@U{}; tA= kX$̢`xB 5zuFυ4\Pزj[KQGdt )Lfѥbj6.g#̼qCƁB3Fc]ۢa;>ؤW9Ko9]Z^brYګ61! ^'X2lR` qwQF `VuAԲ,; mTB F/*->Ol-%VȦ6JW `d*KAF\oS=h:qf!=G2[4*~Fy#; T%YpsƊϿ܈{у,7Gjy5VjR>a.9褝cʑcB0C~n]܄ dC7 f6 FU4U)rIrzlbCh'"KKJhkʄIbry󁫦@O bi/޼Hi5r-djoǨN)l٨V{iRr[ZΕLӚa`޺屵̳3u%n)CxWdg4ՐMP/>'ٺ(1F8ZB?mF[l -1=|ڒ7Ζ99%<:6u!HYW_=yk[ʙKR)5;(2\凗`a]i|²h4.$tͤWЫY+ߞy0Z2=Ԥm& z UCXCI@E'R堛GJ"ƒ#G=ޒK]_n)k 1 ۾#ڬ;-NDlBW;_G96^˔ w? jBC[d8r_{N\TeةwΞE|nN62r7O  1yT<h IP?ģ-{6E{?ǟJZ 8ԋ|Vq=jq77vkvG[˹ڐgB1O߮ -DZgׅu'N^"5ݙX'hwO9KAwszC Z^|of,Y@п,ygkF;s<]^{4.HbpzV"!_}ٕL8n4y>ĦnN"+#yx^6eKe,_[@F| oNo@jMhL2weHz$ 0500= -uj}x1}URIҕY ؒ2 3/qdH1 ,<E`M 3v.1ڟ/mXǓUUľerKu%CTºx-^^[8;]J4rz_76wZΏ_̉Xpڹ|}8!jwM^6} фG 4\ŵyD8C=S_,PH)6eR I THAW4򭠐i H׫b 6vg3%G@l#5,9jF@jMbQ6`z4g' Hs-ћ=[ ^譈T {)zӾe ᛈ C#߾rqakgτ78㷆]\% [KKTIrRZ+%TMC ~;>U#7|Fndܒo>KdZ38oy7-{QUJb1鱱18ϼ{f:Z٪ш`C1>Wd&ӪeHxS\,&蓉!nz\f}|Mq:քuDrkŊbB dQШKޣe촡іsKKJހlL0FrWӢww6oH(9T+EPq/_ۈ"]c?a#!rd}3u G9/Z tm[,M5Z0lOSh`ml^гŌ as&LOx?#ލW.)>1 essB^^*&mTN8NNB#l5VyK6kM ph=qPtc 3Œk|l0C+c{2\LbLdAD"^6/Yɻ*i/>|EP)+8t0=v>SW*e%vzUǯlЛyꄋő qXwNcp${N!8ʣ G Q#pJ9ɓA!JXUZA)O6fGsV_>9=1 gFpɥZЖ@ҀVWh)A\`#*=z(їlo+RdfLz1RKѶ7E#4_=A[MۉKq:g'"^Dy(si'4h65v\W&-eq,O3,:i!Z>ыsČzh{ݸ4֔CzuޒM%+tZtJĽkDkD!/Hf_lpb JhZJ&AmAQ0#ڃ%AxTuRA1Y[ N:h ѫ G߬A]=(,i](nY< @ 0l3hgvUU +f7HZ̰NiULicO&E`[T5/0FK \ets4Κ<{g'P+V Ճ" o]rBU::dU.jZ8!$8Al' n`e."<Ƃ[ oٯg1a Mv7m/.i(PB;Gfڭ_T:lǓ٘A=[v/y{K<ّsPv 1`b|?RC\A%-V>H~D'uN>y,A[GG[?~~kZ-?l߾U^Zщ(w~gQuT)`ub k#Ϊ92L˺Dg#HA黌4x>Ccec=C-856S〩o*FE5#3VGUDܺ}%Y1J~1Ѕ/)JQ"_ NT͢*EŐƒ$%kOڳnge!yOLMCe4z`i{Ҹn-h6\AFm3/1.ƶ9mB>0.L2L7h\LP6@crh˹g[Uދ6dQ޴ >8%DG@h=GVդf߼-Q+dBjֲ %Wnt9V ͛WrF>w0΋+Nf5xh&'+v̎91a`B} z/:VlI L R ĊDIQeDxB-FK-s"~Mv&E&ء<{cKbARM'ƖіsG8Raidt(Z~dXr;VNLdEgPZ7ɣJB@2~2~*f,,i/W[jH4^R1r$ Ny)1w\rF%T7lh(N3(kk"⑕OWO zkJOq6,͡O OK-2u<-V-&Q Ʌec5k.Xbҳ]%u5mֹtS@͇([[UuabY{kS2Qz5g֭ 84Ѣ'TeoְOyk)`c jV0\*8Z|QKld=7GEKF^у朱6o}➓;wc5 +h+q=ĘI!`v 9t&!oɻ~ܜ%Sv.D 8~g3f,>{{Z΋jׄ\ u+ k!s@_G߾ F/7 ע(V!HJƼz;=I-_4[?4>>w B=Ԝߖ-(A B$TߏYwD i-Ǝ-B5L8j9[}FP@شOېS1Fc'{_S/&aHqr> ~I6rMZ+{rMA 4CǍqbϏ_Jh<$uq؈I[˝{oit#ukσY4᠖}zOXmůa{>↞(}:|ŧk0$Xc}N9n((+)JJ`8\ԅgY/3$c e;.^/22bڂyr1hm -= q~F6N3S`:KψBUÕ~~L6H.JS6(T!l.ƠMl܂< SX\1^B*鼣wlм H4S1[BHq!a}4D=`idiEeL9:zRexp; b*!`03p˭1Iq9EN!K1mR)Y/uEeYt-q_̑-4M_G_^_eC$nR],FH1qur,ez]fp9+99ғ!%:^ }V!kt !訔`ӾhW+*X颊1d+@mdΐٛ)!f:8p`@s3$yh(ys5Y)ge2!%b,2(XHY#$L6ovCkQt+)PT8;d7f3:/v֚{R-%T.(眱>CuWq}sqK=hR'Ceؓ(.DJ|icX!2(g܋ĴTA|zm )pa#,2w6pY!q&l紮1%;Uj|O `M6++@N4uGrmpـ2ʼnt;q .BS=),G~4_Gcۻ)2)$Ђ{}Ur-^Ek)g}pDm-IR30m){қzQG4>yxN<'{h\leRH$ߧa8V@i4|uwG=g50_[SqEߓ&k~M?.MMju4SԹ=[١Ux2^ގ?F3x?0*7Fto=8+|Oɓ8WŇ~1YbˣgV?۟ ABb4 3J&ƐKZgWI/o QO}y>C L%BW<9S]Әf5( +&e1Y(_L0֫0<1A0SdQr $ۈW8'djۼէ`FSa_d? %܁I^ u1VEyc}'ro}0Ds_ӷ&q}5fJ/'m I"#"8!qS?ͧƳi>Y0T22ZҡFImՁ:<}kR`t ӡ;,tyQN;iXHȂ2I2C$E).V^66`#9Ňt I؄p0z&Zpf}b:ٗӪV]EVM:KpwmFW@}Kԛcd `jϭ%m GUqŨk©a}cLq.ž÷&ۀ-NM20eGҧ!>uiA]Ӧ9~A{+OXs2sMb/,cNHk9 [ٹ^yVx1Xyiz3FAs'_?ߵ `}CEg{p& Ls'+XI,q9ESNm@ж8X^_^7a6K*ETqmkL-93-R6%cSjqҽKOdd1]p Oɶ j=[pQ9'Ӵ3h髴=9 0 _PxW9 ^%ZFK;M.t',]"<:cXi'˙4_P)hi=Z5?C ص<dHC=nɶ'WH_~C{`oq@ףi+$N47vl+3˫gwf4&j!;HK^{Cts u:MC裝'@Radbv>Y9fXڦo JµOLWb舍)Z)4dpHx & BK ҹh)"Pph3̡Cd>KZ"H|Ldj3JQ!ƔkDĚ?d,fI[(-ZTPx="6/C2zX)A+H/bM|։΁[ў>%4f~Sbip50¼k,!Zfɍ匽ַ\dI 2*F m)AC4m*l_^W9 6>-km.ǰ7A&RwfZh>+js+r!*ֿ=^~^La<>Ct %y"  ׺ p*sN$ƔC!Kַvޅ?XGM4~uUTh÷ܯvM6ut_? @V䕖DR7kh'0NVΤvXQ/ Pv"s>P 36H2[쉱8s@MLBdAPkC]՝_s]NRɓ:m>m6N, )= $kEL|EbPN|;~mкsڝ*.[1ݒQ[.PzE @Sǘ,2fIuhrk`M$Ikr DR:ԨPԽz"KN\jt.֑J*]$ G5b7@PnwVyqQ_مuiis~5?T">y4s[S⃧:h*Ĥ (ǐEִ"RrПj!5C^ByQF!8!+iL*NaC&13-\&Rř\3P뻇VXD$K~Jyn j$r쳮M4\ee/(tG/tA+{*9P{)s:lP'"G"";5|BE{kfJOONI΃:rRݡZ5#,,=9у+"/L2+.I#9ف%h)UU@+?3v C[Pl fVg+Y1(kNٝy=;7ٝA18Ӟ R\ O =J(xS fwp8QoI-@ YYAxBD?pv`ٓ{4O!%8 CʺY$KчIJcEIE/) mf $Gw7s9Wjo~eߏ1' YgAwWYk}ɴ@brZ(>Q5%Y?izӽO_5 hRB VuiƤj8Лۧq6bd$2z~,hI~(S 1T <iU&{Bl)01z1)O™Ŀ*oqݽj> q?g~4i̖ek>S9|7_y|XڇXҼY;2f*M[.M{iU^L~$ Hע9@pyl3-ާl[8D\9>L01ֿ V)@ \ \Ēb ۉ =\cn e痤Lv#G*`s`swuq;qHI*.beq,eFIekcdhI0f9d<8g3PaU7Ol+FAFay[|u"oj0P47csҙ {e'0 $5F2Ё( :S.BaJjjAb yt6^->:>^AWپ7W0wmґL2Ku'5 Պw|vdp*C@~x_:[ͷ΋ooڽKؽH<. _On| }R<0қae$#L"3ԺЊ7mi=@?rF9s#C7GkhghHQI,Xo57D |irqlzl֍4q twt1GIcӏ_q9'/1sYxqA_H "E]Al.BBH {E3-Pa`48zDIk7윊2J1jli&).a(?`zWvnV?*#*跠'+6/z|9=3PP{,Zp-MT]u;/Ks@/Kϥ/uƕ>*HJP BcYYgͥ3jS#U,bh1P[@BNE T$QLYqI 2ڝK"Zm=4zln?#}: Em\}+d?[,^Oeۛ+Z0 vbo40WGraG!RseNՕL6suLycÅaC\'^J|i!;"^0Ozqz\٠?[oO_q҃sza(oZځ#ʢvEUdo}Ͽ|N Wkª]v~xl^3}یf3&ԫn]so#۷ew\?-J}ϙ&~]I{sUh~X[q\9i +o%*zIJuP~ 7n ,BQ3N ]ydCa=ofeo7<" z}lYFS2.5bB0(UiV` Fν~ - hG $ mKIϮr|/D$]銮jRA?2{Wɠb4:{}ԩ"q0W$lE$c^ 36 'yhչ$/Z߯չE`#Oivj|tr6sM_bR2؂:&L-84jU >Go㥳zѫ =(bp=%JS5@qR_1hĝkA@ˬ &Ms?+y.9U!G9Sz pcfVptfPp(cHD{9J$cVa,޵6ӞW!{V7[>]Fmo<;<9q?.gc=F?~VM,5 lǜ|i;v$HweHo>i 6Ff"HcV;GIF!g.M XAW᳃sl'W 3 ?W\-\ Ɵ}: O_s<kQ.I(jյ! B7\/%f_Z4i9ވ@'Sn$H"B+G0ը0\T=hi4`CIBeD| _܎yٹ\&6zУkwqbFFt',Y]t"G#B3{0Ɯnh##k*[*QhD,8N }cgWo.c014)J>|1u2}CNm kءp9TwJՅu<'n/7ōRbQˈ $@5'ŕ1TV%Ȭtw|:W䯓Tp<|s]vG` kyKm>QgYR;8e rqJڙS ~7d/~vyQ} /擦*M1JŚ 57o` u{5# ⵍVuP)3Bf 7&@S[&6'EY!V1ČkT6Ev"=bK7EFo{!RbŠ XR ԠMҦ8RΌ㔨Qz'T;[Lg4)4rq#\GBΠ^`GcԖ4O'%wbo;U@RD"2Aj 7Z(p3\hyD3@bs9LjMi) vb [R0@ @``轖]@0ܟqܴ7o`ڧ>pǃ=\'jppÝ|29׮yf ]ӋӞur O.W:[Sc _r~.>M&aZI}ZaX8B>וAH4DOǃK7g0wu%Br#Iwߛ˔zCozo*QKihZOKg|q:\1Oѱw:u~tGnn α[qrSÇ)Ahk(v3}cWx٨MgS8|S~ǿ_|:>gzȍ_Φ%ފ, v 6 C5 ^%vf$俧s4%u zz&cb]%kmdj"itF.,h_ɾ0O$/m˒ p4 gz?lJBk`l4="hSU!$[,j\ȡ P^ Ri0ٺ`IN>(<! T abk }`UZ7>n>D|E~Q Oa.͌&}`Rsic 5^1 Q>:9˻ H_m/!\[uWfd DkH) QC򆨀N'$ҦnrOnr3^^5)@N88/UxmiϏ>*izQJi=]]MPYF.]_m~#uF-'\5螁o<^zN6Hӣh}]jq}ĉ%*EXڮ-|)9^d:z6Pq` JZ &H>f"l :%bydrb4T/хe""QHVY$/KM^EOrIii/Fb `"TkP2 H/ji%vYu+rJl5s}f709hZtoԼ8^lړe8Lp2:]\u37Q39":md|BȌk^׼$^5/'J%tttj:Zִowi:7fB*{.3&qy{w>cN~kO_v/?r/ː_M{v5 gqJG]/Te]Ͼj54iL3?EfsbҌ QL>Nu㦖htOjs+1I?1[MH΄qY[;Bh c6mDBj04!y@pтP"iUu1JU?xcfL(1&עݫoBXη.o nXy>my2q3Wd-(s6u)~5ƀ5[7ɯ>6nOq{nObq{6s c+ERƒF9 XY2(w8,; *yfYBJr9XhS.Jxi˙ zDUM}\#l9&QY DDR`b٩hmBlNY+1ژ8m RX`urAfy;,s<{i BWL&u+&|q^Aw6e`465664es0tˇKY[~p6)D2+~5Y[ }V2,Ԭ;41]C.v߸62-7/:[5̮olR,%y Qqh=kJWytҼ7MYIݏ88r˒53%6'*Ҁ}sKǹQ)54)sV}cwS_?}tT6Jo6ar/RL*R&L ohD虮!Q kbHDVT0 $?yf6,诊RsJLC D*˲NS@ f1>,.VoU5ްMՌݢZwOՌߛïYXjbJy"g[͞R29e)j>hOs)%*LUclDz_x/s#{ |-v\@"Z[RRT2=2AE!VjKICΩ+BY<'+B@yWWfe3'bV*Zg'`ϓ7ۇk19-ZkoQ x[h CPhg( l3x"n?@ /ew}?8Ð!P1Ve#ZSR mS؁g'a$EKr1b ZjM6LY֢?s&l $!c,~ݽMW͋*iYd8gyu׋Sg'! pMT:b.skjE1ODIЛNWaͿӯgLEfMMR"3r?UY `*hQ}39ܟ,p]qQfRXRA9< ؛dT:w#Njm1s~u۪WvΖί{Kg c?-h1sJqIWӛ.0e:",<*b^rrUOp+?_ "j707 f{&gf~35?*_YYǼ~}ZWN/hѺU:}#0M),pz֕qbVvW\@Z!hZl5m d]FJGvlbq 4 g;˼TݐTr<@8"PX(JY]Y H:PN`bFm^[B1$r2EZ AZ]HdcХ B,*-Ǿ8_?ɫUX5w)nw*VJIEEvIh(ETk1הY\h@Pozv ŕ[Z,8BuBG ڣF[A>#u !f(;YPEy##d0T\G+W'MYe т*%7z{["Co H@hPuё Bhl!,4Fޕƕ"9BV/pdܗyc*ɖZZ<ݲTHc^,7,#N/&hR4K''#R1hmrڥRr\1:o$ 2{8Tt1bdߊ!`2xCd 8%Ctu~OsH6ZjҺZlZgL p⑑Ö0_p1x JgZSZav.lIy-lgϪڐ]"q(՗Lx<Ͼ纆|ϿB^{:ħ#[dד e~pW^^-p;x4,Cʟ $Ne,BJRkB "mwƆҨ75yrȶ佼I;{Gobf1#X߻qlz/˫ۂ|C>n>c^/ۼA>_/O:ޱJg*r\bQok=zupsܧ afX|;>܂':o-0\}xx:$Z~?n_v%7ӣ1i~#r'qU7ݑ9CB1뉄 LXOVs%8O߈#=Kt1$ 70eC38/'XeAnO˺c!1Do7\K=~t|65rκ=ϻ Xna>w|~Yo=;?upEʷ6 >aC^w+|%y|Q;ݭ`[ZczWoڨdCu-:HFI!oCc'0w?7@Ї2\~#b9m C`.jHCfеhֲMۈ#qpq$#2Sg(hlF8"v* gi CZulԩoLdK i2QtHX#2AYB͡|yē)duGcьy {-Fm.{P%VCV3 O/DyGCv{c! #irEMK`k)\u2M auՇ:ם6;h{D/+n4k1bHN}+ !%5z

xH3#6RU$E\16!Kp;aKx%(-f h\L>x_upzSwۡ !eZv\fgH )xt9bUHR%(+Gt,-ݨ:DJ, <.hUz`MȚ*)!,34tEaQ8eRв$̞鑫[ iyG$E998O+OǷl=(Ij[OuAo9,EtCg%d^I\99Cj3uHX uMsNc%lz#DxrhX @y(1/F4 NOBUnNhJru <;f2Wd Vo=#s>TlɁhKSthsdLTʉ+ƾQP{{6w8?f1Il/ڔɣs X3)!1*+d֘rIބQ]Ԓg (*bwejL%OM?1mBmiw3ڍ}^ܾq{~NT7볓S^ RX2CLL* s+!Ds򾪆 x5M[`Bjh"l$GxkUيCW.-E=FzL =C5~Mö|Na8fhDgTR#!k%⹰]u6Bq$&1f[%i\ 9c}rpiȧ'cNlK 8; ?~<&ٷ)RBK:ŋ1afgP٭f.[?d}ܐ,7~;$1X7o[Ő< `VkaP(~ ّ .?ӫ̮mJmQ y3O׮]$$謏^Y}7_s15f N_]|x[ 2:ڐdn9dqx @% ._4&.~8U|lm%^C2hvӿcR9~9q'5HrkL=UwN,Qpˇŏ :{r=>G!QrZ1f-+NY,kpޅA{~04] 4brd&a>1M}YNIBVdN [d&y߆8q!]@&_ZcݣCnDly}qK5`-V-`Y%\"QQ=$z*]X%Ɯhk{lSS415Bc0ͻ][rlG=7 H{lҾYZb]j9h(V*h[쐆Zb됆'hwm.. XY٨):*yAP^j Xu#Z5-dOokˊMb}6vd&-hj'O5`l뒅v#I`N6(hJfd@ox6:.bɀIKPyBZdC z6tC$Pb輨8(C +|u罆<|i7-j!TԝlYiKrxo#;dDZAZmbCTqH|X81B~8ೳƤrP K&:SJ[,3OCK@ #pdw#J@ J f!W݆l/j͏yCKcdGw#6=暉LhN|Ӧ-(`rs_ Ndt%SSfhH-vָ=f5G$lz- rlSx5 qRM#L orY@2EB94gVGI-M2-\؂ko5ˑJj 5Kbлy`$EPu!qוZĭ6[\&"eRrOK tN9瘇xIß[QM3{^ |˾hCy1ic?`&f]b5ⵋfSΥgǼ6/uaTmdTXhT֝+j4m78e'0(9DÈ)PkD l-GBtkˤSAٵSrk!,'e{ZzưNBw/E~BrRS ǃ7Id#i;)(K^~"5˺䦆h{ӝ=9tsCvJس~)["`R>c!ͺPŻ@V:izّBY{a!锆ܲd5۹s.ZTeGN3-4V_G`E_;zxwg|ī~$a+_U^'=j_O?n̻16U!)&~c&YVFW7q꣏Wn{|ŧj 4>Ͼ'qx7Ix~N6v k;eiww6`\I0jP7ܿ]*EeIF+~,0zi /~vh$g@?2@edIaryf=aP~^zZ͈5w5>h,mOCJ@aOJ/w]ΊC##}׵̸`,=IFBUwIjRc2/CfGߍ39ͤNV] ol:գoޫ-Έ_fU8`Arg5ٚa-YÇ6 NfB;Ko"OrgYvz>1SC۶B7G[[Z#{ њTލdFɏ:w #I} 3@i&V2$qQ52~iDUcُ-3&˦"|'\Q%-ʹKY_b֮#vrGudgiL5blz\:LRvEGںݬ۔("ya'VKt$'4 aS777˛D.~r]п1#Zy'`Щbs&Ӑm X*>>QŨgoi٬8Ƞch\)^S+)j NךTqH /{m | Y/gU_c:oUEqSlES2~U!5e7f΋i= IYA))A:%ѫ+ƂHʮzi%sV)9 ETWt:̴j7^lm5^Oߋ[hh2ǜlCijA{zii/eh6}| DB:d.5Z wh[ĐpS3/q4}wHZ%B/朵iÐy< yvDS:NV!p aQ1J;D]qgv1fNU T#k"Aڲce/3›Zkk1(W \"55,fkz?YGn>F%"I@A*\T*l.ʝ#RjmA a:(hJ+IҜJڑ%u8,f+~ٝCL'M קrn# A:urpZ'OCO%$:~KDni o$7p zdtfEfnm_5fÓkk=mT*ш-xID Z8OVӚ/?t mh #ξ<+0zyR Jr^\؋Chr` BZ)L-%Ttcj~F|Owňu*l-36**<YZδU%=8^+UߗcHE+6mÒ7,g+]KrLFY xb*Rɧ؄J}%h%|F-{C%Y;jmFIAYY%75p^h_Z1+]Pk>=+]]=o8X*| *Z*/o[;̇xه!ؾ~dYwko+핳Iiz^\beZ?"ɱ8prۿ\|xS^e`=LpF? ɨOs|A?swu7~t_װk~͋u5}y-?Vt +,l}b>^4 007G/<5͸w2}?՝m<ӭZg WQoN񎾖buֽY+ TNEfSړ㍒!}YJYWcjťHi2F[si}H{u7 .OzÓRW)pkbvU$MfȠ E2\*d{(햺Tѵguk6Ϛ%f A*UGrJj{9 TN٩} uzcC&x„:Z,̥bc-ADږVMKVцJИZR-FkΘn<1lCnu%vjd yJbJE*N}Kֹʰ[Zr.(IaNiA ݼmN"o`JʓQ.&]")r)m_`ROx)>f\3" }B b$QѦf\&j1;DՑ$wd)QszɰZ&Cl)fh0Wsڈn1+^1Sk1-7g^͓)tIE 1U>+9'ܹvj_AY%qul#PJzweJAlӶFCK#A} uYR 8Jn`+y֬4Va2mRUUue o>py (UF^`ɜ@hi ,jeaH`Jt6I8p V*vڳׁV?&W+Ae.]A[L QMTR&Vi igE oA"Ԍ vNs4$tb p9V*@ :t^91vɑe{?;–[f+ybziz=iVJn<* gJc|"d3Bݩ+E `o0vsf_ܜ‚-jJl<+b T| )#kC4񽰃,);YP. &ǦdH;<`;w6ވIΠE#Dj0Xw kԬh Ag/l  1xƫ-΁Ƚ1(5.ampª `E@Nd@TmiuT:e+I,)Yd-Z ffZvW=RSg${.Yœ[/ 45%x^4(ԺjB;s:LAxFНAG-5hNaI)c>"HIhY/;`нTsX425SXC7fr7:E=;Gb0ԳhVuU pU%m*֬(^ B# SM:Ybii%H$E魀JcZ=[YK <@"H2ꄩw8"' *CvZ܉-s|<;ws^;\O3 @T:@2aܦ"1OJj@30`E7Yc5l[M1"P1k6:D%)dž_^w{tNr"(4d_mi KKs[Ir .Etx58popSd)"|IH@5/Nv"S.z衠&akFJƪFE5PYwJi`sp &0nPcP28hc * %Y)V Iv.(x+0 n U)J#J D{,FCjF^"C5=*[^ 3vKl@ltZ|ǫlz %v74jLF&dp"J4+C"ʤ-=ݥ;*ho:{yf4|p SWn%q%sJ聧M].`Be$\)z1UUbQH[Flyf+%WiBvDHQclek9Gq۝V)HDݏvoP+ s„jttoց;TX !3X)W'ku?vuJOGYUܒ2$`710Hȿv8xs2">fKխXz!:|L;=(>ngbW;ȚZ"̄EY/!dH7{`Q#r4  Peqv9ϼo\2! NrYTH D&3t3cܫDS lJ^4|J0!:M'mqXՓ[rsCB$k 4\3U!?UwD#f34JQ}֏ n; ٠ j-'D%|9lh![6Tq c,K"Q4dϩBy;Pn"Cl >GJݷQb\lutn8EaoOG],dˤQn&Η~o[gX%aʑ!?a U/kZDF-ېk<"}|ggOKz5&F]}~L͞=rw:Y.oo$VoSߩ W%N!G,i'ߺm.Oq=G{"s_]S\.VG=jY?>!z%Ii~+O0-X')&Zf5qzusX!+@6%氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9ad̫bB|=V:Wa% Yb/aYPӘb9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,~e5ĿE 0\kng0JKrX2sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa1sXa 9az8, 7aaz 9"b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,氘b9,^l@.ۂZoᆾ6 _뮽pT/>.V_W=Qo$Bn32/Zt./h, A9=G~stЏ֞mzx+"@Eu’NQ?׳xL֥EbGf\n̕y,\lnUonZTw5/Klϸ}u!zB0s)Y~^TU;Z?W!Ц}W{ -z+ԛ_qF:"$껰tp(uy+o8p?4q)U=MEt&JM7QI@&hGTX `JP5Z.]ގS&:^S¡y'X_e/už "\\]`tY?UD8Zi_)d<\;|ޏ|S!\ B6Bc97#z7q(5\s]pLoBRyI"r{=yQP 7L֗Y\)w~5A2sA~@6N!,ZEhLi.}jcrrY^|KqGjt\ߌŔ Ir{)lD֥ +)*1] iZJJ?"Y3C1 +JUBjv?os2t0ÝBE,_|2CX_V]ij2x[Z3S<Ȫ쑌15P׹9rHKV(+썍zf}Fn蜖%ԳsB-qTx-zdzoo Z*)9Tw1f09 4!iG+-RRPHJu `*6n{ePN|IKp*nkY]c%tS?MW`Sm%"euNI DnکFV) /75o Rmzj 3lKMF~oSڳLjkl:nnoZ"Gx!bW7}{$#yWaj>vn=&@8фF*C@aT(a3&_~_!,Ű #N.˼:نᅻ$N㻩 !qLML nݩ& 4g)81Dw׮OOސXϦn@&OJB:?N̦GiMdCCe S4wM>Ul$~ Y[t15MmྉZOriZ''-Y] DS :khݼL #Y+ljzF7'Ŏ71Z&]U~۵s.~.oW6S'Lޱ:LZI$UMHlx6Y Ys-!t9zE29?|-&UimK{ܰV\n9'Q#Evވtk3OM<{ ІrPFG7*SM81tS[mͬ?ۜ2"N)AX4T^_rOuϼii>@B eQ0QD5St'ffCy-JPɨ"BC*[gՖ)m矺~mPt6 هBq zñ;'{!6*'ZTH:QMpIʜ¢f}S_o:d9(\i0=I%#WnI?k:*2;U i]96VB:6CpȪ?27Ќnq{ rL}/J\;[Vϸ}y1|kѰǎօ\br6e4uR?-5ۢ v 'gOVOV33Vt!T>; Iԫ@Zmj'~;Z,Ԛٺ^:LU$뒰5S{@L).bO?&|#٫P槟$Pmn"'+~ Wz*fS5/ޭlԁ%|u]tʼnHvx;zYx ,rgV,k<<˞/Jf0rχE49" SQ&PS*2h_M|qy}ʲoU.o [Ldgĭ6XdD!qуZތ~w-[RiQ/^d4V.{k}?[]ld1Y%@Fg 0YT]k 7`RW$(FbG#U\Sj$ByDM˗B_z|W`]Mχa/J NX_5Ay"[>ȤVi2Z{WFd aEy X4i^,qcGnT-7(D1unuTg/"^dDEҨFlTTLgZC/OR% rJV7>RhyO:,K&hՌ}p \d1`;/'5"5I9r"!lOL!^+@NU{o'Np% Sĸ 5BWN{ӗdg6ɖ+Hv&Hw*\-hzxN2EOuNB4P(-e\=_t˜ZchJi,Z68M~ /KkqW=x|֏o7'8_~?j,KrݷɗO n4,.[{W/eZȽ+Sg[g bvIm'/peԮwˤ==ءOx֞yO2z 0  0G~(ǯ_";RfWly8?,ƭ2Xh𷛰0|bs}{uIڄj3ڇ:xژ,p&SQ[2$6g5r?f>Qy2tA 3\BJ&`$Odբf贔"}Y^mZ5guL_i<^ ;YؓceHLU\<g$J9oƩ}k,N0rFX^4|+g"FN WAn5 v1u껹o5w8j-جh)kϮNgOu;g?҉+}[bF_NH{hяoV^-}|sx*z1ahGLb\LK hTвРm2ݘpi 3??X<\N53v${PkQrhc|:>U B $dY$zn*{p)?ܟE3= _[3q:l&E R^1)RL!zO.΂YWmDAF2gK28"dl@Je@Uh%@t[0,,q 3q>E%RX͎ϥtij,0݀i؎,?0ݢU&2ocFXTkHΊI{%dt2/);U>_Z{v:mo.wX5[Ĉ:rqL\t/&ӝy^`64; T-N:0/dN."\L-{2Ƀ=o'#4hTVH/iĺkuE^hI" bG`FO-uy>xhr^8K0]տ`iE޾ xpB='a۬?Ne퉮|k]q4*֝~9vfm8N*`ZGW-`: ,Zվ[;ۘOnmEm]ӻ~Ơ 2%-sH.1!8::f gfB2rK{eyjAMPwTVn]hmOiJn޵]TN }#}V I\e{/۱#%J/r[^ywyR;NNCd ei S&U*hLy d9#:RH0*c>ʖJk*0>._KC͐e-Q;W9G|6a<,`%u c00N<򬼕 0 D2okb#.>'$ݑLe,ˆ1 A!rᬰ\#?EJE>eZrU$ ǒ!8'%1/ ٌZ S\tX@*gcm$Uk"sH.( 6*ܑm"71!1iM*+9ⓤI۫j*)G DWw8o&PSH#fA*'0ctpgͼ 2r{BlBʮjBn^I[)_.{E eg^s2# 2VSH#K8|G6z<ڶ"s֜칈G5WtDZZ7ܥboxi"({׿2'< [C,&ń#&*à bJ)K&hs "]ڞP۬^_v;Ҥ7? o0zq-޼o[`WԱtjf֗\%!4slFK/Ȟ&R c$]O{SKyW i,r/F+0m"Q Hkk;Pj#klPχS[ k]x@jpUc\qɺ~@YN:ېk$FR{J0b75T5ܽ@X*6ZX'(r%`YT\+Js4#JmSMH"EA\(RD:3 U4Y27FFiURP5 ޞ9'ݾd+w-@HWFj >2%'m \?LV)2qmvG~Wvd{D4ٳvvQ)&StMƲ)4xӀ㾧jLp6/Q_.ӨБ@4:H 9Z.1*(+ApeYpZn]b8JY,2=53x=A`f LK06U<PF ח0Ά3Rt1._(Ti*9@h$&MC&gbLZP-re: 21+? ^rvXo$}s˧=jcnMTi\h[lgl6Ys,hО IAG3 YޙNm*nD=ւ1Y%(?}%]ueNo&e}-teu86ŧxքiO&xFڢks،E}#ܯ7%1@EQSsSs wh9 OS:U`R)`3)A'aS3M/lU֌"YT( \1l˜+gSTkSsnTy&b*[Wȴl4m°5UɽS5MhwQC: Qdg%IGIc1Qܞ$B&)P2G$iLKړwDcU TfA-kH.#q5{JPm\|f.1'@#XB]IGmѓΐ0 &8J#_r#~PDHdz_*}1i.qâ7v!"q_u%CPIk$b^ $5̜RLL|Hgx 6* *|DZ`,`"se5DL43Αu\:IޥZsnLMp:&,"\d>+ld#dȂd?AHEJ )jC!u}4ֺ(Hbms$vwrBV"li&Sv`![!Q)dL'iIڟTkiIȬ5q<'oJ%!WzP^s :cO$']րh :[ȁA$눨{ y y6Q  ]Y2 ur]Uۣ}8iQ7^?ƳvBR3Q็R ?ζIE7Vd1b]},&X̯dɖ#Nۊ`duub׋I딬<{oȴ 6> 6T9!ecK6%-~Y pb8`7@ԙ1 ,a'mPKU3v:/!e~>@waaZ47T)m%"8>rMGd9('R-FX@bp *F <٣/=I9M*I2K))Um@ Y*5yF b+ R,cJ?U+Z8v&/@;8x5[U`M7? -M =6%!f'\,Q0ku YIlL#>ZC؍JMv|(|u9,NEX2&&[W\)jO. lM>WeHY8kT!MnيX6J VIIjuIh(T(E %l]0ՅQWfJX5aCK><4{첧zocV+daJ~ԊeW#D0Do/Z8%y p[37G#RJա\|hCYmm>PR X}936Z_yc-9!Yڶ-&Y.VtL5yox2I *z#׋Ʊ[YRzӉJbd6^г'൤}5X~FKLyX@bAx#B+iaIkdM5|'4[:)t q{No>A7 W5ˁ\(<z2d;:űYBM*]5UVD%KfGds$b}e=k$^ *YS!G彬_7P8Ai/슬Ɂ-(Ȧl,P;S JK$p2H͑8<:4L%JWsdOX%֖>0>)+}YY5l>i1V?=.ޘ &[U:PjomZ#ISG:-^TX"xjҿN/E4ʗx,s6OqGk>tyNk6O}E!f7"ǶZ5]/rwʌjQv٬% K3;~tfy١-;}5h΋{*`;2]iS-)C8!xaR#5@ɊVI/IDr>Dc|}35ysBC%`I#|ǣ=;`T-mCVF(Z;D2&ݺ#EdB\Q:3S=wt'|&| PnPn(?$n& 2:ᵾ L@kFV BY(E6<i2/w(9«:1v #oNR`KѨZZ>Ӱ aɁ5V)+zli饑nטyW3Ra(Ko|3J~X$P×|GekVr(Cj$h槲Cxӭ'i;<ZP0_k*!Fk ZVm.Z Jicu#wtءl߇p {R6<^F9h4_7E+ۇw!e>O@ww}h UBW&P@ \0>ϫ_c|"RJSS*༓2J2:T # CArS>w߇,e[Э`D0x |5lb>U3&{lB,aM Qb 9)4(eY/J<9[u8])ID_hM;K)%KYjL -i@xic*^+z=rIG.٧p'$[f $\`}CLj F s"E`$QȨQkQ$6/!ENm:=t 4 UdIN"xū(y]chm 6lQFmt΢(h`ƩwM#QP+/)u#{ @ $ݟ6)|0$#V"%-G5&Tѧ j~xsF E:(cFAn-ǣ֤URRA{cd)9슲K  FH>ytƝ^Sϴ5e4rʁ8B7C1xLV|ww}"EF`˶F`8cZ}NQhQIE~TL1'GNb\J %EekL8;V%OT$'?G^A#D^=Džw1ՊA|LJ^Zʶ?Ba$aт1{eL=juBmrpl3˰-rQq4H9oS?qL!ڒGGy92RUsɅ'܈r҈栆b_ߔ /?=!'fpP&AyK6z+bAϔæo.[Mшu/ck9;ёߊ$GNlэ6`{y?hFX..o +twdY4$@ (LTոp*Y@KU*|u9,8 G<Ԗ)ֳ{lm<|F9Xl]s$ыUrZ l(YED+!9n$ /rtX .R-G2sHm[S]]A$OIF!Yc2w4{hq p ZN xxDVćb,$¦]z7flQe:aӣ)x(褫!COJrIe91RsrAG?N<"{!k*bD3?!ޜփ**^$!b2;Zwoɚg[k>¡5}}b)S\Q*%AT)i:*qdk&(\P:B>ɘu*)PWFtn5ZȌŞ5{;Ws)$D]6cbҏGBo.١=0͛wRN~,ty'+{gAW3b2OV?8>+aʟ6 <=cwf^kGo>\\G{FVY%|7|\n;7D'zqyub'NX..zPN-L,9xvM97{ےk~g+7gxoځkF&PӼ7kܲb߽^l}˓q};˫]*).k>RZ;vߌV%Ϛs_%0l}͊Wf)>G3{y1u=vl84ok^ {b%ߜ6Z+MHU.D?~\&B"ESEL0OdWnKw/GѲ3H} &99y+JKM| 5S۶SIa`--;>gI?3h!薱Q ^Nq8obr.R/'Äϓyyv&8_/+$y=ct7kQ?>^u\Ux5> AL;A7"}D菢wd(X_?#"Xq@ iFrT(ni^`τb#ہ-8N k7ovmK K_`x[;if;y!Xm "|/pl 2k7$gga4݌%ct̍!G)lIpQE[-ݴڛ)'wŇنn1m*ޜ-m}ߛ_* E|ث~Կv֎(SΝ23#J?Z.ycvl)+gPlAHVZUΠ@T m0^9, ֛Ҍz7KLi9򭝝1+R 9 |\07]*NF%c(+K[faVzdŒrǁ B|u( 5ѹ8xf &GVOS ($'RA|4]wwnmܺC I$~ɮ㤎q,f$Z]%;0ќgKbv}f9]H]Sꯐ T)=U#Q?Dg+l}Z UTIO5YN_% ~s>މq T *h#z}N>'5qlBޓ<-݈F=A͏D25%'#=jwz}iB]vYU%ݨ.Qhj*%7>Q,*nT>EJ0Wn8XMr~g\-Yڪ*MR dh:IU xL)RpSЌV̟!~\<%Ot9XKp2f֧ynt3zb*Kv{^>tn ֜usY$aEʼ6F6YL`ՠ%g$b(92D2Rb5n9^"vݜW?qɢsqڕ+Ud'l(e&ִ.HN^e=gVI1p>8I=X%_0UMlKLyz`l7>ь(Hai$ԫBے̙+"F7`3Nz~vq]F 5=F|;^KlfWP ;5@sTy^mO9mȗ󌱷כϯ4n@vH`4Poܷ(C5Egbd  2x*&ChΨ?^ĸ'>YSRQ2*<h}6%:>TQK)#o,FT}rm|Y65ӷP:RKwٌKlfH9h>AgT͕*6IzvMN!uθT۸La.3( ۈUf*;9\jQRYD:`Xyk0!dЏ|1ܡWgCƔܐWi YFƩ:!Ȥ2E.%Km(쥢|mX"H䪺&7Z|`JyYCۺJ#7&GQĜ nK*4qtHJX/hmj|***;&cMݢ DYŴFӉۼqWaPl2s6I]WoY=fthoL$EB:F~E(#jO 4_%؜gikKW n.r*-k-LsADB]lu[%U\S jBaH+MJKm/4\*/7_Yn[% X_P:}L\#I1ĖT3-ճ{ AqL;Rs9Zm272,'ʗwsu|yI֘M-p3-vo qD [>4i {Fkq}pphFHF{&%&)&QVQ jckqP(wB[ls?_|־t]o>P&2%YkЖhRXrI8ylmNX~=R1:@#+[b #zgZ,$Bͣ,iZ1W#^$ Y ̔#!ZଇZs]sw_tnU;봧Gdo'kj?>c*R*䒍dw9buI5:]~8mV@]ClDv*ۢ>qjZݑR1{'AXնȗm}:6iד4Fltՠ*ުkk䃢RlTYիHUj[UrhMusL1NշE ! [B NDǚRVV.!bClh@YzV5zA}9;Wo(.\q*Pg#}|9zXG`=h_x=^[(+}a&b (QFh;54θv*{uok&̤ǧ=Ovnv b~&@ #vs2->D/>嶁3k.)Zp^n![`m//R40U\;Ui4a97ZnܖJ$k:R%EYjKQ|G蘛M/fpޭ~D JwyF* 7AF[,gĕ,H)PtY]G!J=zf/Z5ӖI!V7q_=>k~ּ_7|.G\r+vt"D{u"nnDwDϱE0J#q1 p3j[D+@ۯN1E/Dvfq;>[<ڡf{3C %"W>?X%l=oM %X7>St84yZvYe{ v޺r6b B=!#wHo?e[Hg ƹwzծ\Wڹ EG"n}=gFCrW٤h} uP٣Ѥe;siiêt:I[hd337V dKQۺcĝs%poj\+=w7:KUz#48 _۰z1 ?)Q7\ó`ohP+[7bpY\-PU=>Aز'gkLBYRFiM,QG-u+3QHz1C`o"FZ#7Job1䌌i[N8JzùO+-`5xh+x >[V]n #mdj aϔM"K&APeC Sv*j"ˑ% YvLż&CɄBl.rkL*.x۝N1HMLmK>G덧F Q;w^_?qh27+Jr=G;iܽ2MJJ$x,F,LcGuYRx7 8>f{^g e1`YE):jCPѦ}:cްl\ecݘ<.b\Тsi XL"s\ ].: `jv.g  c ,] vrYl[h@O9s]etUWMp[w N5_>AWpcgD#!G/ڻ)/F1o+Ў:coYK&lBw`gL9 ݍ"P+,-l'.K Ի+Gh"!N6hm^7 7 ~ԳӋܘN33H\}C?fJz}iBPCr}@ D&jػP sA;$xrfx.t[<\H:1Ն$RZS5϶q 8bƃvz]o=7[?6Ov`^'Wa3?B5rZq95S)Y8sP`s"+'1:RBdmTZ$+=Oa=aO?ӛ bi被CNz%m9&w2ّ|ْܻ.SkVȪtԫMJ!1U2v9O+OQ;b'PSwra96axxJBWݧ~e]͹?{JzH W3FW'F: y #A`?Qk]{^|Q{6-6( }v^9gW"l$#S= J{p{_К}[cBI sA݈J6W)Ζ'u,CōF`qڙ,')d:xKmlkl.rq ~EɞevE.f\`f6ŗ,pqH,Y,ؒV俧ؒHl ,wMaթbUѺdCJfOjII rIٳlo_E/_`oWl5vPϤlX;-lwӉZ%[BHRiek[ekzA[Ɋ=mDp}rO'Wo˼Pd 2Nd$),>Ym2f> / KXͩB@t6RRFt0օř/^q\SZ C v%$ $M߉&$+MTh$$TIctXC^T`y墌 F$(^ ],{ͤ$c}K=]VpǕD-)ɸtB rdE0Y' ?TW3ºwW,ecf1#IQ$CR@U" L cђ(4Љb`D 9Ҡ#{ёջܲIUN'm鱽>gz쪽龲_taL[YaOh= DVm*Z^ѫҒH%`7Mnk7|nY F֬,2g,@{P@Ab^+6JP+-@zt+uU`e#ŋHYNS&M4DI,^B\b-> %0(b֤ !a>@q!X@c2@nE֋xlB>]^w;M6Ea >+| պ6|y:8b&Mti᳀u]Fy\ @\ Y/`BI+!FO0H2`_l0D)! [J෤aG$%Ke-h1+@)71[vӟϭzܱJ*l(WNΣ֤9v>WTכpΦǓڲS󗥖t*d._Zd4>/n5WZ+%}ǹBgdtQz`)nux֯z÷S;Īc4.J>5S;X\]]ֵb0/ ieِ>-ƆΨHp_EU<Χqg宨q;g.ϨݯrwFX ôV?Ta:m;< YnCL7EXevg*v 'N(̏YahcA%k jQ:PK Z 珛m('$fM:uLhv~E]{{7~u:~r-j4%-ħ?otnW@и˻wiԲ3"(NyV(z*=` YA{8/l:79M3MGw7~ݦZ+/<oe{/6]two蛖{L@H"igL(eRb -z# tօ roOm2ӛ?^p$  ڐ/Ic2(HL^kG+r=~}ss:>ɾ0W$/|-ay!5.2k }T)Us e7 ,j\P+s3C!Q.%n:yFE4#VJl|Qp{~l4a$q@:.D)V&bjCrZ r Ho^B^E&m֘l B@JnKSD@p"IB+  fؽ:ktSGS6링<9օ%Uf_xzvʡmt^a]k:v*MPCPdzWPy d ΀Ij431!BL6@H[FE] x 鸆gK}qKds530gbfvxtv=%"T !գւK,cAh<$A,yc n6D "../f]ԧ>jvhi)Uߴؼ;=o*Mq<}3ڸVɓf~H4mdpKe4-ket1cn=#e|r2qbQ76lpyY<4}>hm#eBV]ߴ_?^6{v|{ѫ.'VYׅ+#Sf 43:SM%`679]-ͤfNwV4u{ˎfOic gn|y2c686o"z2Zz:KW|P*7-:90~#Ro Iև/ϝ.Lt'LU5N$U~WL4mG≮w+VAULfj:`Uxc̓WG/8z'}q7K g+Bo&;Jv>\(Jd&I(ƂѩB $dCre/)w>KuH¢=+ YYtl$DK18㑑!]ρHeWXu֜64wÖS1D~ǢOyo,;( \ G0+R(-3( y>+%vcPr /r'+bjXGwgrR׻~z8*gxH+>9<848S  `3-n#ija!6xFnwq{`/^Ef75f8&,1l<[#1/ 3TC CpkY,*ޤs< p=%]ZoպbNKGft F!Ed`zH=XBne!ׁc"< {,,_tM4l~8Yd !QSlY y3dx;xz:NA 7RA:kCJZZRkC0@QU*Pv/|?`cn!8:%3m#(=:n/Jbs~>&X6DZȟ\-*X^ DD<# x <6ý!FRY6)!I{Fy Y |)Pkdg rP@6QZkIb5yR^;%sdgoe,Elog}1K,!<3g,HK]N9Z^$D4di˙(f%[?yf0N,URN%kчh3fhc $g`Xu+ "(ΊH^"%xEr5DX=D~AN^iQe\Jj@LedRIY hFS LAdv|_%Clz)Ղ/&2(` %3@$bP/,a{8߿ynfﲳV:YRآ|I LdQdod&π\L Tv `L/0Qtt*.>S !QĴ"!9pɸJF7`XO%Uvuړ7qgwAUa8·˄s>5IG*w~䆾V^׫_ۢПq^ޟkmC熜 y{ȭt= lǦvl.ZjSZ޷II.\?y|]<']=ZY:~~csE֯b;`.iǟuoj;4ьN"يٮg؆YS-> rS-]e1N.$ȌJR2Y06*V.FZt%N{1Д~%H'G[<__@B/P컭[[^W>uvRJ-"^]Vcw S.?}޴T~U8?d27~(5 L,gel(h|`bD)0#YQq i av VTUqFdHhYI5&A^#Ƀfs@H1zAXW_/jwްq=..ƹ90_}&J{I.[Cڀ x ɶ_SS Lf /s#CxI| W3kmKI%{XZk J§gJzJ_1<%"})`R9rTI--YOGD7`v1EƖ񕈅Egk)ISΩ;BYHڃX]r}d ǿ}v9|g7|B-Q  IоT_ZYDv%pY2mx?K% Mz{ΏYlê<9bAd]E_,Yk#"eH47 B|'tM+.5,zЮ@Uf!JWI.ڣmBra?k!O<ب7*#%n]o N? 󽴿|gIm.juهz2CwcC9e.r.CZ Cg^tupon^rХh>N[[lRJn(eRjsveL֓ (Qkfn.nֶy8p=nnLIZA=fd m\ت \{Pl&o $b,rh>@}QU9T0]䍯?`-NiX8T5HWJ4>gsrl1W\ |Zݮ=]9BuBQp5q'tN*c`݃߃=]qT#?4"r8F}X UJ/(Y,U%aM}]**ݣ;~P[K(54`L#FtPҘ(TW^0nM OTT7Bk @ȱʩJՅu#xC`agth)h=/~=(OS KY" _[~;<}}BM5-o?>&"+\[:y\|bI.R4].X:1h1>N?1zMu8(ܧd γ>V$[6[Jj:.-EqxGm_nNݧ>9{ԨA]:uׁ#&N7S5<9 ^zNtgOW.4z-rWc2#Aș/-FVRN/ɌOk49,(|.OUFF/-Bۇ&HL=lBc&/n\BX(y'πaC8ҷף#?> )B M^9`GuZ|vs2gmR'G']ҠPWg'yz'hϫzΛF1JyvSEGI|t&N2jMqCmVK*~ k}{SyE8L4c~VtZpb9x 1N_$cu!خm)"x) bEcVδAVg2k evDeJR-#?> 4Pvg_გ*]sٰ)>*`U].7)ڽ\l-"]>zv&BMe'G9+vk==nCڻCpmI)X\Bj1\ԥZ;Z7}Gp}Oˬpg(x]`}̶P'0-m2/Ϯ?'^J#{ aujٸp|z|9vۗ&{ xchqA^&\ 畱]/ޮ8u89jq?IhVR$Jih'b% ٫=qhٚ-s s^Urb6~h]WL+O'h~=qg(Esu墕¥sI]zm!--*dn \P *qwI=PD5iيÏ+(UȨD fOW,+08уn v)[ ,!S}y6Gϥ˘sWH=~MMO+&3.w!T":|m+=%/D^t'h{M ݕ;*ߒAN^u>Euޢ@ Oªv3}^KtV>~K$`싌/TJc2(ruXtq4Hg?s8D w33pD*MJTF6|M[?>ULv.WPuu)ywL$ }` MLآ|tR-n3oh^@WZʙ._y4+D?Z9 ^;c!,T 1vX4{EN45]*|Ĺau3!!S֔;с:'AJ129逳f}/`ą}c+bmyWu1{./ch*ܜį7=vK=q/{l{Κ4y/g Q,7kt3ϫHvv9o;qs6`B lPHكf)LUwG#fsE=h=KY.E|4@)j. b_aO'VrY !??7v9 B f^Y1Ƽ &Z]7gkSg/sFv#.v`Nl2T'o }xnۺ%/D^PRħ{/)U獁JЋ-H,%Q a{<ֽM'rs:YZYcwiC[;X*M^[1ka >L$NkdJ4^!yNуrIbɦ Uǔri#]2ˣyXi_nNݧ.մrpJK,ǽle!*j$G9*yL`b+^lNC)[ j@ۢw6A)]Kĸi0]({G\gLm6Wg+B ?J \A(HV!]uc@xwMg7T jS@gD }s/=Obl-4JnJi.;bq|V5𕒎uk&8J؃6h]S-S-:l-G!8Ν "I>|mO[R5q6ֲM2~NUcc^nP4!K1bʙ r|0p) Y7#G]U] :HfS`O-3$gwz1rdUWuթ~YN]|~Ufcν۸B j (dC׃4qb9RX7"SRԃoa=te-cbb-B 4<:iT(?r˯,ѹf^#RR7垼!dҙ[YV82w{ r^~΂Lycd?q6!kTs'I1' K#̄["ۤS?|WXz;O[N!,ݾ irXo1N}C| a-G9k9!,˲qi@|cdUZ*" 3yN<ZձxNc䙯?x6O?8aa49mYKrls'b3v>na"ٿ!O.}=˗,\?bva-Y8ePG .h`bɀFkE\&2@6f<(fsM!92 wjM;+#ŝ 5SrC빁Ղsq[2\quJ|ojo`ϩY-BuozOJ/: cW͗&qȜ<]'Ng~Y%OX7QnռI>A>2u{CoLE\̅"}<<$+n#gÞ8G1tg)p|ZÓSi̓>sSp“f?ܼ HZ ^Wc$}˜˭! V|a"Fmqv_'6^tq xS]jV-v+9'l8 ϓfgMlI&c5Yb*Bo}ǃC^4:حe;C ((Q962MvKewnrƪIj5 7J)588$< \ΨcѹMFXU\46f}П4t))oV$9_s˞߳"9Eyf<Evu8fZZV9j4ucto:O+?r%ՈytHѽhX7 TR 8lmMXS4F~qΩMT8M1LjClf .meA*9L52B r8FQdLYW \TjjͦK5a.'Ŷ9atĊԘqMVWz쐕Z֟=M|N4-04T8r\9Q۹a~*85cѐu]tsf81lb ;=O,{)*$|EGmz"&BDT!1sh%]p[]~W%ڙ#`~>/sq~pX=bjUɱ>ާwk,IrH˳~@<nO4Os{ӎt#b:3y.TSC:>9zybphdH=mX"dHX~ݘvG'8ӘOp%aO&EX7kR<)}8q3KPg%/59>"ۤS??|}XE C3(b#V1yJ^x"j+LTʉz6Ǯ7ZzTMn1$ϳ[]\tR8:eFp0i[Pi T天gS+^э9Yr*P2al b-fsM![TK{=֖ |fon2UHOaΓ5.ד SAnanrl?6mMBNYH,%[7!kCؽMVNQ(p:@ |coJiȪYZU*GO9jU<:%}H7WJ8:Ui v1R}En[9J tCM.&Y7[/o}ڿpR#^ `.%^wԗo ,]?s?|˛:$n?F5ەQCR<̣B>K5[No,}7k_ ң^obG{F7);iT}ėoWϮzHiޙ)[gw8(KP.^?lzyu5F$Ztx_|n?fs]՟iF>=kXǽۀ_O?tv}3 z_UIWuD}WHlرOW⧕wVr-C#7I4^z' z'>7ݧq߻>wg/o6;8w_ɑwc||6R?0=eǬ:Mk= 3=`[A0~<#_ IW]eCI [3E |8P_lT0HU@,9= >MAUͶLp ' \dȕyWJ;wJxAG6Z%$X4.JH QM^PD ׬w F "Ԍ qHBh@0IX J `rD[X Q?*Ǝ?%J|oC@rM <zicx DٔRX [M>23B+EA`opqsd^^-$ d^Z`"9MT| ijC6}SM䔝M)(Dc*+a;w4ވ4SZG)V1[RpXw 0A >T1nc׽}N9q7HW nKXY@>ʉ,vIpJg AtˤeRMv, :ZeqW=JSg{ʔliNlrhPyktdT k*&X'Tp< 4 a:\Ev K% sHRʶXRwhnQ1Y <E.#L/Gͣ+ݹxMwͪ;D*V blE`֚5ڛdhDv>WLA^'-(Č.@(* oExLTs P V6s8(\ENkô AYjֻ !jd2NU &noŝn?Wbu?ovC7͎Cw#& r :pn[Q'5 18wn T bD,bk&DF+/\H7Sx(tHa1nžjU*pU<"3%N|uaJLB1#BMִEpR6?:8fA hfz=uM'ŧ.;HbPOK8sf 0 ç21w?>d:_~8>ҽW|N@_OZ-?}=ep# ~i]j[?G-)|u9dLlxB]9r 8,(R՛[Z)aVn]fC_➻n"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,^28ܠ e,$WoBs{3Zk/aKrX"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,Ⰸ"8,~U! F$~3VooWz^(ԁew]:>S/DX$cд[ t3\k"0bƂXy5ۈU=C[u\L;^ XC*2oi֝0 tIֲ, /wi z97X_`Eˬ[mehhle6j4 uZmН!4QQ:t2䓶GP2#`pM߽|lid3~yr2Ak^_Gm_̮| '<&jÎ7s\!5KcLW OOp}> `$/힬ymN?>?-#~m~5`ŠKȄo1:JaЄBЄZ܏yfƅX H-K(i="@CjԡW.V1wL֘އV2X%^L `TGQ`ƪ2dԤzՐۃ 8q\E,Kfv{ X jF?%l58mz1$s\ 4 kLs#BLfYrM0bBCq_y'urv9S >_9s6+$H/}ͱx_B)?2km P]/COxt,m'LnX\3DGQCc}ex~z-o ' *4dΐs;0,큭E ք՝U e8BkG<2OU#[a yxLS%C~H󳖡m 3W6$vfL|xvy\b.׽^.{oBpL43U?w'/`<"a?dIR|bWf5޲EaZp N׳i'$#_K'bhY bx7˦&ֲff:BK՚MUK)hflp3pir_/ ZKpǼς|Jr.GN?4VL-+ffy(dJe_awg]擾kkV-r9^lE.XK7]۝Ԯ\&W[=,r^pv= XwTxu?q|S9Y_-BKcTZ.w|;R@VjkcqC)Z)^&JO0`ZB1Ugos5BgjIH~,V)LjA+dyC#k =Ӛ^O14*txkFԕ2VVz_MT[>ͭ Yn]pdKVVM"bf(hh5ct(_Np4f)Z˴P-Z%cђ% @fywi! ӾQQEx99WyC<7fkJ YɢiK*KOϦӽJ#Ps+Y# 9뇊 De6 cN[8սm>il^4$֯`އĪi1~{3؎oemm3NE AeLb`*~ػLdܛ~+KmMxnb伾o _0z$.7%uĨ=.lAWg-84_VAT+J媟Wt0̂6p9v'Xm6W:?;/:'ո{)uvg^&K[/Ş+YwQ8WA&7d|vpls2TTഫ84^_N'4R,{13hԌ%7!$ 1qǃgɨ˼|t'&:o4ϑ8*EtRF5?>Ά<앬u3-c8BCrHol@a{@VAЏjkT _)T(qd8C_WJ %Wi+S)𠥖mfBZ4(ZDΡv U9#Y3!SN/Rh]+JXYKOFXYTNf-3dB$fO\kgޖ[<^*٫`#؛w 9w@alL\C~epT˃a@5ՠZA+&m0ŴEcUAZmfNow&eTs(l% $fLW^Nm2(r[m,N].4 ,)>g:$I +E=;ʚZS?.2 o\Xw"6Eg8;;{uO[_R>UK5}Q~IU5eԻ#e<~~?vR7UwFvj_oPߏ'F~0yGl 0w0] m@Y`ۺ1\šg5QvOOQH9 i#AQ`0B g\46%A_>gm[6_kOBz^eu%OXE*w|AvwQDT &) ѡ1 I&񙙐 =OH %B|>q^uTH/HJ*F$dx0iϙH@\$QC p 1y9+{c93dA9r?#M&܀F+JJg9@4E]y -z4diKr,bXt]~_nS8{L rQ=1s[tPw?B-_+Lwu45s6X7>[tvkLyT,ȥŅqw덶em}G?~_}V`g_&aW?II&do_~[5tRJ52V߿sCoo~~z1,Ym쏽~Fn{;lGۼΊy vxvjҮ02^^R;[XI>plN(8[~>-ڽɣE}or> 'qxq O/2g]n(IQH]<<* JˌmM{">.S)xK>PR19ʹL: ;bc HO6er4r,"` EŵRIG+$]ԜP>{2[r]2:ZḐT i ssnc%ЛfFu F D h MK %0 '| B$/5ZV Gf&jQz^FSe뢜nZO/>A$Y%-~!$CϭR\((h}7b cȚ2u)J/Y㘔 )KY bv&\, G6(TѾh4:(,Hh^tH Fm8/k O6'˝wEhN#〷5en 4ѺqV,m^L:tFzC3I,(s P2EP3zЎߚF(=Ȏ5u(1Ue !(cH\(N,]IeEU26 W2l1]Ĉ\T IT :-x8ę8h##R@VLW`jdsfD:_|c cyC~շ~e\.fcӤ8%uC_q%cS"ӌ\ 4b'x9f7_;;.ZOS؞^~x=7:Tf>AtșTw>a~3_{t8HŲG[9f-D ~%W2Δ_r74ϗu:aF}qQSl4g޼MiAO.W0|2 ND&q.r9ԗX<SYvVVqܬZN㺸 vk%3 Gw͝`>\̫s=X:E|et~1ocЏ5q[XԴȶ~^2+BҀ.*'M^㾒6R+6ɒ*Tk{َu"Uhާif{.潦J ;@ pV.Kc2ȥ6V@3gH[(yu<3 i6]Eeױ>~RdV&U8YAR\T dX m*z*WB„7<(;`1 m!jb‘ZAZ 1r Ô%yr\9E.xB@ml//f7E4OÛ%7GH l%結+k!]Q*NEh"7ST.yV$+iK4/&o!M-oi) j^yè%gZD<%k. G[H$k5ȝ`ZmrQ֌x/>'Q,5xQ{1򔕭tߙF81g%ۍ3ĉ㕋NUJAx-'ns=m0ȉ9p ^KstϘ&3Td,#aG#O8=Y{( c*Uv[\I璓0,2@j>"AeT`[qxr=Dgwߞ7{;aW6/T*LUR&3V1TI[Ze} ˲%o/à5^61c0T2hG/8Pe9@Њc܁!9i&Dg*ԡh=˳ߟ<-x"Qk ӠdcDx l+cP*NbjBm!ᶐ.$iYӰ %hwǫw*FM Y@Db TkkjcI)S̪11rb^y.c,0͒@,@-vFnۗY2I^$V%QQsO;4JC\ήjޮ>Nl{ϊ j͕]  BPZ &{[*w0eaj!CXaW٣Eױy)Bѓ Α5fkŚkDZ3b]q x|`>lӱGQppqߵ!Ǘt.xC]\~ .#5  S `XQ\f_s]a|dΟ#ыRv7at?#u_֪VZ_@d+Q AU )g] 뜬Vd1q̺dmTA V7hgd<}22wee˫ٶWIko`y7k~ׄ\\ &H~&?T*K`TOgsa/Fmȩ4K tST:F4C[̧7)<4G|txӚ_r0v.tFIQҰ jj2o@).ۍ\^haDN!+TlÆĶhm1&ːG-kڟ4y"y%tߗŘ1N,8t.:\J+E\5l9%y,a=|-W9|Nm Sf\ʹGng~Q3}ԥ>]5MN^Oe[E{ަ1I ހ:S\{JX]d m-VZ&~Mv3IcSN &ARM ֑nyI+ad/hVt)XXb*9[FԥFQ;Ilu9MU+6FM ~2b~ˮp-Y" @)'zRGXt<,Ny/1tV3/h,ΦRU=P6@\-1"`zb zTh}o7Fj Y#{ gf!F 4g0A},cnyXzyyZjWku#9fju Z胄rA̅'XSD1;<%n/U?pBJh6>>ֵ589u cv׍'U8Ϣ!=EIɫ4bp {Aa.Q S$<ڈD$닠aٮU#Ány8 fH3C>T@7aaoY!h9^}2cԚDd%U UTM6^qݙPK$b]k>5M̭1լMq4 $nT&uVl vޏ { {{cM_;A?r\ j$1ȃXldkiWTp"6',*V>[~SF3=e pE6B*[ G.6JO_2>~N|"v'6~z1=#@8x["c׌{+7śjs-1{s,/NzJE ͱ$nQ;LI(1@hϬ"|qZrr(f OֻZu\K[@_WyÓ''Ƞ&7--^zO@gp% 2qGrc 4A=AM;(Cşi P7ch(2(pHbE\EMqjQZ>-^w|8cILAMNñIX){e)C=-wn*Y!Z=z #{7QڄN#N(MQV5 \"/ –QI:bsTM\+L޶`0ّf5B) L`X%wVl丳 ~ik0ʘmQ'-Hw w[Yՙ  EG, <^.L93@-5H:+5&gi)$Ɩ=l ZKjWTj+VH>@i} lӄS)nw8Ţh |:=i'Ӭsui"\CXHQ:G糺=jYV,9@Q eA^G|C9/Ez ,=~ %`ƿpgC 6=v 1uм+kk]v+E\y*s'*?`m1Y>uR=2yT_f>|}gqsZO}Bmeaת Ӌqa\svMɳ;&or9Ie9]$ۆצ5ٵRu3wfnjs|bpٔ(2LH\o7/0op=|JF#G 3ʝR 4C^_4?9Fof@pL!xF@]y] ο UtFݼPM\X!*M}m6P0j rvBL\{G #!:}M !5+S$Tcvׂ`ǃYk WνbrtZP3{vr65^o\5 CA8I81c# %\r5;9w7(7ۄ"ZVEF~_ZH-Sݜb6otU={\]߹~9/SGBY=k{ʺ<;=WN?Ͽ_nuS?^DϿֹWܪS*T]T1@1)iCX+rڊ۩ؐJFT$Է&%Ag&nTWo6^}8qu+6oQ194Alq|`R#b_bتh)Z;mA\nvP$(( %BkJߑY$fCכM }:~6fxheY q LU(+^Wn:AF |KEXXK }VK+~JdC#P`dW6fa;okpXJC-VS @4tw>+EA;vvyY*yܼdbm_2>ӯ'~ ^^XReS.rKBtH$h& 1) EH%#&PִjqΥ!u;6n%qn}J6ո:%mP2))"Ei D`u/%xjEmyn؆(-[V 2}pri#1ͷ}YqoZ{p*֢rU@7I4cx"RL+_19+FxY@kyB]r? ҳwޢ Aij|kSͿk}*ʂв@C֪lrY%ț"6Ό'Ҳ~J\]]\q 9,8ʘV#8=7qXk##wذ:?ԳYHW7W"'.AA '34FrÇ2h&m[HL劽eZ>2kl2!8x7!94@0 +??~I&` b$xjHrHAOZ{qrx b1c)[N/dƊ.7t/o)Ttr^&#=(MՒdBD^Z*'Nh92<\׺,,HMzDU۰՝^nB;1ILWDB.9}[[/9YMV@ϯgGX?/u~ݸסgf^Kb:ܬln#ȹ6LkFLPPMΏ7'VbW}dkи}!;lSn=+{`ld=e0dKa}O#LIV<^v [.&YAhϚs*&ͺ<`VZͺb[@jrڈڦy/D6,y 3/7bnD7!=;<ʧ_&:b^=SmZA +Uk*F/fMطط7I&fK+W]8W5rmP-ֵYd :P~?p hP\hȵ2`uvJE6!0s`!ZpHA|z"yi4:6MP6Lix=)+8~oy89+C{rO3eŁТ'h1}ddeӀӀ ju1ゎEd1f$L>3Ŧq.xVJM= "N6-#ʮiʁX{?l{7fdfBU.%*4oT␬Gaywr؂T1h}4:%ptehZGb}%Y ep@^[@>[B1 uZvL8F^}.gG&TcPfypcɜR6NT P$fc5VLRq H)yB%Od6_ dxK8Kg_/9'XgAWg[\_.p->A>zQͲ9Wk*#d5jGGs'_/?{/>IW:6]~.nK`(DXJ7b]ϖQ "w,W?}7:UnoS.ǐb1ilRR, 0 %ac6ĉ@nZvx xL.04)&rnBq|x Ϫ5q{^i1kw,"8!]Jz$-S<4iU::oa E+Uنj#8t6Bm:Cj.*ƥXM8TU{z; xm40i2or%M: 9DT #=Wkĭh{۵` %ߩOX1抅R { .E]q9=@ģsr睂Wl0@ue̎l]_EK.zB<$ߪ QBR k4g+Nd՝"}J 9ȀF!XtN-ۊ>4d(gS%dL[PN5L(ffpŁ"O$L[ pr""2dͬ V8$7Ns}X[Z-)g uպC0Dk0ج.I3Gۘf@ ;;D?o.-VS)koH-bl6lBo-Lz`9^\T#CʶndWL1NŅ5*h hD9!pK,:X$r@ȇCvK֎-{-y<_dlhlO챯5XjUO*$+.ET2V+3J5O0dD2'u0ʅ`CL肑Ɣ{*CEB֨jEZŚNZL9tԘ:ZR(*܌D*CC eP끴D O/ŭ1|یNHy^0ќx{ .vDKBZ|P<EHhi&;@~w׮W6MrN&l䀘A Aoc9U K D*AUɢN@yU@XT'NTTU &29EA$2db,ݬFf.yo|,z`0 8ŀeeheG.B\XOlKXz6ӳYl&5>9>d):{<$w:#0h!:2E?!!rd}Ui#k !h2uԬ3 hUɬ.:9W%Y1l8^q8hPmM0yvo⃳WC"I-IaRN*Bஊ*ֳfK#7777*1Xѥ`TFK[P&bI..f:Q uIKOȇlޗj&dO.DLrA~RRĘU=];+r򻕵qw;>'Thl_վ/Zl[BAv\'T3F<4Ygn|աgwoHa>nq&YL 6W,)dŬ6%> f& (Xr-*sTfOE* +Zs[Mbj0:*YmQŊ04`9Nq~5q?TI7QR҆䔍:b 72$jj1G_UMHyP,\sƣQŜdEPQISr)I rGciy*Dc*bq=_q)b\(bGi|P%bsr^Z >ZY.(qRvfֽ=-BISut5YRrlId[J"e/JFb#[tYK"٫g˞x~%{la=ɣY;mC H0uLhz@adH5' j- a0C\s  Eץ8j\VIDP3 }TglCq@#0L)W&[.@5yN HUI .6 2 Xz8Oc4CG "*Ien iKrdk *.<ដuႎhޞg}Ahqɇ( ),8I Suk%!௙'ViVDF_w+胲рZb,Iv!(ʛ EZG /qsvt2[[bI5R Ȥ΁1 (%OG:xs-ͅ`Mhl 5ɂNX̎ͱ޺ [eװ/gHCkdVvWVW+~UrvR֎vH_c+{U[zA4D8RIM=.coׯ.\֡ء0SQjj1鑳[ 9HBP9QȣP~'7''pSMN;8G .tNK6:twf0 j['嚥5˿VS]'>;5Xi4qv9c7nύtNt3na< 3L{ny3JݵH5Y.:<** M8f @{2ml~0Y#6N-e*O$V+R&Gv{}oF'd@譔hlxk#@J%m!֠(11SF.Æވ~܆N'Id@_ `M&%C@AaAYNB^[ o|FrKd%fdnb:5yKWyhOHn8ٜ}|c[)~i`cEu@nՊ~MZ|aa?LH >H+a%teQJ?U(d{w+NO/-ǿ^ޕ6'-Q?wQ"~+z-u"we(DJld3)EsZ~z[k}/(ZݝMol V&sY-V{L KXݻܞjVAP-6McXUP}aA"[}QP)TU> ƨ\?>¾w⌞ +ږB"&&cb(Y>Q؜%lPr|@R Wזf" e%P!;*K@MӘX)O*ݪ1D.v= knqsHχHH=\* g?{LU>n 9ŁF_/!s[IQKcLEcEFah=QI Ef8+HƑspb% Yx+Pl=Y]ffd/,xZz9 ߛv<&?ҎA;vjoW Omy$2)HczҘ]{1|7bcn:;X#֏%V#~)(MB-q|#J]JA )GjݧHx#zOHáZ _qVRB*6i2.MP %)D6R*62hâ2:IvS!jS@H(A5A@`KR> =>j]!-$c̀ *Hc2Y_`Lπn_&7tbN'4OS\O}d?;'"OtuD̻b1]L-f9]w:)g DswmMm-;P%gꔒH}9Uiy:}0Qsre`ϪڻXZ>pkק{>ɧO}XY*~vr֗-MGS[9N2--rx߽k?N ݾ ?"aXW<Ԧb>!CYSO?wpx|U:.FFhh<8`\y9X׷lgiC#rgle]0\-e?NnAH?oq{:Ez/e鯁]d -%ނiRѐT*ҨF*c:o6omNɲ$6o){iSZ[4S3'2_+9;2 NnsNW&ix +˳s @1-*_fݖ>nWKbǑH9(,12h9>0㯫9#Ups@g6Ǜl̙Ǟǃri"hOm=YtǛ=r'rc{ 'gkK!Y6 uxsB!!KIKpa\:Hsm;dq3{r/і`2r%$ G&gNK+^QSeԨʍ_~.r p\1 JFK@[NB cNp_D4u]t-N͟REJ]o3m' Ȇe ӲO0z5PU{UH^=6<:eCӝyuW{ ^aXh^h`M^Ƀ A A9\=wgF},~.<\6ĦW%L'*L>Tv-7ݴe ds¹>>uZ͚K!0zEw']D!EE|C^ %Ph}|y#ݷDm_:U )x3E9YYφ}N*82] Nu٩՞@ȧ˛1S<%Q:G1zv5Nϫ. ik(gH]>c;}XGj*S nOUsFv'[ qӍnR0C?e}>^ Ldj=6^ AWȨZzaܿ$pS%Bh-}l(PS>ZKcJRl*Ǫ +9Ժ+R$4Jct{LGK@v?k\bqŜ<銽+]q9,[nJ^7pB (H' P|5Z}':zD0t ٶVrt.Īb\%uӨly=p`1˸1?q{t.d2݂>bOM>=eWs܁0*VAx+#Ay[AzY7Tmb~(#?-u0MRb6o2OycG@/DD1,u=J[lT7>‰N]Gf`${،&S&šR$ '/7s$=+D0wyPCznXvFX~!~\aucV&H9sXvTC"_sԧ[?FOQހV&YBQlP)墌No|"xtWbDN}$% >BLZSrŬ6wO3/蕁ĞQYsL]'ݻNޥՔ`IJ  # `{h5_o__)ST쑃`^20Aj]Z3.L{2T'oU #_9 I[BEWङ/v1K9y[^*kf@׺PE )Z[D.j~08L"9!Vv*H.{G}#K+l" 9$>1 ekjXeWԣ{hK|NB^!9,_=F׹f#F?9*P AD`puyUdm%@Y9cHKלϺeZ8]jjFd[\ޢx·= B` vʄ^$z($ͧv󊹿z3 E=ȁY %SLχ$ԫ|{_56b-t ;lTT*Mò2yg79fBVY6`B UU]إOhm, :p&ÀxzJZG;f\|fNj{|grR78,pE/35Yl풁1ӞoƎۯbz,/̻֩ 2+Nb}V+"Ѭs(Mi76DVRv 8@*t\8NK8kFXR?c FNĶd 3VV>=y_׳ skɁsJWW]NEٹsgxr3&sfm5@ )uU]h'NO,/\}=ed_뀻\a^ ' N- `_~ݣNuz'6-Pw3`[3˲Q %]^I7pMO@LhuH~i`D 9ӳz]̨d:F8TM;B7<99_TmgΊuY)2I )p<]ԜXxO)z噬o߾%7-jRJUM+F5 gYSpBV`*nL좍J0=;.mŗq&d<.]vPTSDVT-Ƀ P7B)viVt68YDk]I65H}.d>{Od;1UThQj*iS@`{-GbF#Dg 0L E37Qd':~n.t$ݘ@3zjy <NZ1viUs^ VTZkH֬BP[/+YSKZ~[,jRkX:U'o=UաP͑Kףn-[uv=fd )cܣWO3GN&!-&eZh0Bd-˕nQ׭2=/%~=+6o_ՃMrq/gNKc^WewҸwax߶1XeWoQG(=9//]z{GIDJ}ߣ^.{?JƉ{W܎;T]vG0o#$D%tOదFoԥ&Cq}u $Éj^1[ sgqqzwt5wu~(q}(yVq(HeLF̼w)QFm&#Gۍ8ZH3wrkibĔ3(/ӿ'"nLf;O3}0BAՇl8NP%(rl"z. QW$i*x-#PD%i}\Qy?/xoF)k1Tix-(]nvۮ+XR5MbOlbtS>Ů>sH U1(o&S#"*9b=_qp&nC-V$PQ#Lm?oȔ5]y i +ۍ,9hqrD [cl .o/?v>:Sx0gz"}')uVҟ YdU[ax1T 1RlUuI5kϐ71OQ1̸3څ 3Ɯne&9JXWQ٨i1l8n @ƖҨɍ\@SGK6҉/3uX˿Z%SUd'ϣnG90' [Zǜ2ytu2*%=6>Ȳ9fEIJ}n6 ˽޽k}yJ) c&2ujΩӛ=7b1)R~$lKŕ97$ֻ/fyƇ]X!9" O͛ߞ ~O ڭH3;qSu%E[+%d!z\2uy3{/2u1Pw$.>71utr NGȦf"8zƆݺ70eEv5k(*&#J2.Juʮ~bzMf-6V!a[kTG/FE?x_*ۻKfTfz|FkKtdQʅv99,@.TUwE~89ՙ{/% >^6yskc$z5+#],Ȉh9[)I"3Zu-i^"[z-p`?|X 'cuGI}Ņ;f\7ŏeeZm˃;LbtOÐa8\$R}s&mˀ(Pn"M?Ej?M>'G&3c!!ZNivN]<QNipEGQQ|R'hfSڭz~8(jY}to| zǙEP~sgirgYԨ3c1*\jARS)睵:}:{S;7OƒXf Oo͝9{M_]nle][:}}:M0_f!չN$Kv,.L1š˖:t`mJd%%gelbaƜvP;JE6iA6g*MIw#*4N"CjR >e}F>%6P:R:b;&uI%> ucAk[R#&tl7%q?'>mXoc϶;ƷZ6W 2ŔJmr~{ƞk/mCloLgvJ;NV*UK(S.pao]vThsͦ=yCJjB՛e1Md-p2.uj@7u՘eV1Rm0NS5zssMM8j$=*JMj]<63,RS|"+(>V^֥>ˑs'{"f $^FѺ(aQ9O9m%ϳ(/y>a`P&珁gsZR{.2͉Z9-aR[]1|KӇ\YiƩ=Ϙygzm3SE&i\uhɮǏS X<O'Ui1LylTXFk1k/UEig'խ$P }&񱑃/SŜyCzmgzC^OS ._}xd;̦o ՉGu ܓJ]Vi&SSIIegL&Ԝ֬EֱI/cWXygc;wHf#xsDŽTk '2VL@%kJ^͟;Z>O#/^G؜rܢQ +Fnx7PGeq{a5̊F|U{)D(lUv[$F&lmqU ;D7 KkIf-wȵ)] he{\ UWey*)ڕwF˛&D*E" 9f JJk}hC˿C=FVޝa=b|WT;_ ?~RNҿ>yJ`խPǻߜ|nڏgs똞p|o6X밝oϯ |Qֆ ^NoA'#o/~^zy)7// 2^8M!sK#e=䕶Rգߟ.^f4Ao^^$J=9 {}oW/ϼ|?.Bt^^_=⇗b}w^R]:C{_Ǘ˼wP+󧶓ήg=ra7-4mh_Ctz}[ygǯͅ7"Yߞ\0 !@T`bkn_ORσa |k4R#z/>7 _#o>}EbyaN^t.~İ숪8y >l1浠0|з# ]>'ﶺSA>]y>ZA?~xWXʁs2Srl6ZEPƤSZLj:_?|~%@ZrnI{`e!OݨZ\cW džRK(Qu[!\J%SMnn\\[Q.X(hِѾYsDq&5J<r1fSc+mhmqKOE$jc V"d0 ;M-KԪ薙\f>rּo&'IƼ5gL/4:lVC% 7^X|>@ u%wQd** 򌶕8Ru2kHq.jJQi5A-eh}Jm&0=D w&Ȯ`)SnJtcLI;%"bдأ?;l;:ڳ`j,pX k8wWYzi62F6B8&?ĝ/nȬ-gI8D%`mQ56-C$)2'Dǣ#_GDn09+VAz(p.ȴSZ1GN1͙>C1 sƉ&YBCB"$j\g"3|#kIGcBF0RG a2;#HF.;CuB}c`"z`licƷ&N.xF~pYY_l4L S14|ჶf9%y5iL`D΋:H^ב; vWΘ 3cHT@=| 4xBvf7k B[dV0dDSH HS #ɔnaDcq )d. !a/r*Hu<m0 d0 X%fƄ 0@wF#S#2;$x9>hh Z'BÜ"K5v)X:f8nn9e@SZ;+*5єԍ.eGD.wfژDRoe gIm="5h F*-|F3ڪT;qY[5nKX'~Bs{̪QNlLv0넜h\G>+W8 [a(aϢj!lב^0jxoc)4B蒦Z1Z,&@Ҷ[XB- *d3js!4c,Wa4ຍ} ),c{U]ilt@Ѓ;k*  */Ͼ jsFn;=_tM4t35j.zd*5*lCaV K334j Q;]n(Vj=UQV|Ӝ4!=f@B*]=4awZZA26`Cd=2N7-;Ne`?/gcsKfpsfsYI ^#`a" d m जp\sasS X\lŒ$%l@}GL`cAbCXYhb }[hrݵNsZfSGYuà2!i@7wG!Z%38.Rc#i9Fׄ2̚ &n-,XT4xpXԩI%!%+|6u@HҥU)?{Wd 1dž%@*bb.Ƈ˄/XE,m>Ud-(R(JVg[2Y+LdD#҈HϨ]18Kٍ}Ӂjcs?C;b:i61|ɷmkF/y:'}nBiDUpR(i 0w[zÂqd3,4p.Xt lq.T <2X@؀P\|E`uUcAc0$F 칆%2{G >pRa8ɤ! _əssׇl0Ş~` ll N"G2;WLkc$ @ځ{;*dQG04؉a4 aɂbvTRgeI)!W-9|jPᚭ, a0 HK6[+wхZ7 l6KFlpe $D{(g=s7}yʍFfv밸lj (5nDK(l`o4}óɗD &3(Tȱq7h k! 9l _zp\q@m]IU c۬j`L玥GL\l̈́ >)WÉb!k|okA2#3|E["AF ; G  wW-cC:]N1Gm3'Zʁ@e'ń0.5ed*l Z bA>PLx:38.j4l*vk'J8*_`@Τ$HD[c/FM|`7y$l2< g2;XA8&k#Gk4PԒ;OHp؍'ZVxQc,K {ГZ%)@m -T8/|&2X&&z?pF,ߦ E%I&usH7ᙕi`s[7nVJʂ)#F~BdF:Um/2q& I<,5vF>m͔~bmPOhV]xu tZ]ݼ:>66y%ioVi>nsY ȫï?NmN1#6C[MgzuC)=cƳ5Jia|OuX<2:, :,V̟ywbZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kZ갖:kuX 5N:, KH͟ YeEo~Ԗ߼Ca̼LWz:ùq@n{\YgLoLY|rwrƹTOUʼn:Îu4xw.0{SS`~mx 49'Z''_~r6bgg>zD}xsr2eiX.SfOZ@Z6 :A, tP*jo{oS}aes(|6YC 08ʟd=ajn($Uf$բ1q\Liš\%l?Zyo! 6xNS3Z^L+<3ZcVoVS1_[nk[*w>/`KOaSݏ{ʾg1bgl iSZJkջŨzSu~S}>ɤMQ%S#;v8/.:3919":S3#=AWen _s^=~ҫTk{e/Vq*_^RpTB {.pmWuLz3BѳZȿ3*J`ՍT^Sb/b~Ao1[y4ҽU)ڠȇ+V}$PyC uwBb5 H7?WGzoMG g'gS!_Fc >SI]+-ʼI;nI+!偘^Q嬒UvTDH-4,"oYGN* \ެ,ʽ;yFzj&<ܑv*l +6ɹ4#ڨ90zAّP{ TN6ոb#LOT0%aZBx_"Y݆-8WZjee aƛDGPH(SʔE%OhmuGF5"b%-5S 3:)&WYt|GƘCh-y}(v? %B%zV^p> Ň% ڸ݋ )ycԤ(WWbLS>0nkRnD 1sӆW<E(TUu57FjmuQr~:XS^|6~d&ᨮs3d]X̔)֢eהM*DVA\”R󋣛zqs?ޜYI^]^~-roJ gM5)inS4L9L*W\nmN76b0h `TTuӜAxi$i9??Yep"O3*i^܇01!*1$R:x꺤ChϜ*H_)dV/]]z{Lv}jϔb8"σeu>)ng-!KjP1JV{*4FuǯW3p/aF7g8yHd>^^יDfI*VXθ`$ J͍xu*9"73Yzϙì5nR0w&n\ҧo{<&a9⭬-Q)ZVdtr*F^F5 6v@Nzoz4ITM-*1)dr6B' NIxqZʛ"]3sYx=.ܭ sU]_ul7m{vVhUJ3%x*Gf윜.Wc Pb8chFֲۚf,U~iN.5FNYր's3׊\WB[X9|}59zwzv6íw{_U9}/?/NH?nog]ns6 ~ոgx{YעWݿzk~鯟{ZOnOgg҃|vhW'u: mzv߾Č^Nn0.*Շ|Et {ܾo K| -'5o?⡋_O{ݾV7l/>(bݭmao wWozӔOFGZ Ǜٿ`Y?`{i̵yؚ|@@}y.+cl)O 3m =A &z_% z/10 tzL4K8H{yڎb ]$?Prat.fvagk0ڷ[Sm>O{PtAf%gk H{ 닦"vt։!<#=p_:n$Φ:NfߙX(q $Hs>Nf4M]b &+z!(MInr`Jؼ^}Vh>fZlkmȲEʪC2&/zJL$)}mRn9L%Hl)v}S}OU2Εf-'tME pBTxD6ⰱV @W5$$sn@q8ͱP ,@ @O{0.l<hE gFZ X<9TS=Y"9#d#QNXu p N>8йٓSRN.iC))Za96H/w[V(^쥸)Z)΍咀hrlgᑶ35" \dQRFigF[D4p <pKksn>]PzR65҉Dfl,I t$8$jS$p)JJP>W8 [\fqAk\i0jxNwV" l7˒F@%+fYnCŭZ\Y`o1x hB\WQҧ[DְiF RM(R\XF,w@ҸcR <E.#t72cao6ǝiWͪtFTsT$aZk:TThp ;E-rswӘx @mAĴ9& 7#VHcZ,aS>r8CXI3HRʄk=(um.'Itz㕇%6,vpfG,_ެ0:'FxM @Dtd¹U΄) 9]#*MZr#*`3f%C4`,NX:9WKz:he9 1Vz,4 [u M3I۴AHz5iQY^1Z"*9aAiH!ద]j'᧢.4 :ͻn:g`ӂLIH L&&AsDJtI@QVJZ@3GӄƆۑjcDr9fEqֈ\APZ/\'qa~hkDs"ARp8tU!+Q0#҃'n@=Ai["`d/Eo`wd NK0#i$tYr39Wػ#bb4x$\=wQ ʈP hI U9`Y'\Ş; H& ^ E)1@Pnji$0(WVR{SR"Q|X+Bl %(= /TlD98L[#%J⇍U\#'ق1hVK@lTlhsϠP.jF1>ldihӢYjɔI jB\Y^LrY C:lGrzќi^AcBTZbP v< x ᰱNI朗8L( Q9Mևlu-KT,S p pvgSsTT"ri 09;ARIQ̍SRҔãN]^z$Qu[kwk9=L8$Hf@-B.FXȔ nV|u?r='xR.Oj*t% _@8,#"f]$NOyL)˒4&)x"IK{88,dMā\W.jzv. wf@ (0$'Q6Ӯ!rW,OƂ.G<o6Tq$BΙ`M5s"u cG1 d ㄼ̋&IO Dأ8.-e g$4H0I 6G,zM|#ax|-73*GEQ49JQmGGN Qsj? aE+ٲKd8#`_(GE>;x!Y݈WWPwTmsWH.6cq\>("Hmͣ-Mi\oZ7^>UAE2ݪ` }ޮ꧟|P%&Aʑbƨ?5- nh|A5trʆ},n"g˜ogm|O6lIBG>&|fO_sg5b~:;==W)}Rf\}ru됿l˒FfoP57/縟m+x[׳x9=Xφ[V3a%&?84q-ȏ S_N^Mغ˓Os}9۴2|m~C3x%>ۏ>lC?M/6 #k*w 밺tX35#Hcm갾|s:':::::::::::::::::::::[GtX0taA~+:,suXRhuX:,jpauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVauVau֗aQ@l?`ƋLw=}iVw6Y^,4W>R#qiPvެdfoz$YInou3gC_,l''wuyŐ k=O}1_bbOϜ:~=w{"lK]S6-ݐ5N _2RYhYF2eLk֙']ZWIQ^C_/~cb`sq]`y,bш"#XT-%Td㣜*)z2.Xzu|Â:wa攝êK޲jkeDm$Ҍʧ΢}D>xIWnʼn.jw\ 8yEͿ!WFڍ 9ocrr䀻d[cE4`> A&x\te:rj~/]j &.ƸSZuw㮟=k;zɒwi!ϭAr]D}o ۶ݵ y@ ]QNНxšŞrU-:m,hcԜ4m Dŗmk[ݕUVTIڬ_;z!@ЁYU`W$S뛻8&zb:KKEA\ M2,ЂnVFyu=OO1AL$ )ɫ͍+NvZ0>ǬN.I| YJ[SMib\ӣn+Xcĝq&,!ս>ǟ^tFe:y i Λ/sәcf۪a,%Y5ц¤1 7FYgn agmvoYp`/4trk*?jtAoy.HYу2 ${X&w:g |a^ǷUo81# Aw6vҝi 1_^^?wIxqhw/|o㣃)7#~{yfrl8|كwI=-NOxjd2fK:䪼󒒐 SHR2wv+ջ4{cl#M"у u2zvH=i3g뛺}kHT $*T?Go Hɻw;QL;[DXjˮZݐJyrGCD҂|>=I<*n~MX^Z'IXlQCD 9r\l6MXS=cMfk@<ȈO zUQKk&MǤN&HN%*6e#RisFO1]EܬJ^^V 9׏+b5S\Ekȵ᷺s iz+j>#ƸjՐZvHbHӈg?`Tg7Ӟ3NfO:ߝ%W[?o~aZ~հo0Okmu2?xX>8bY [16Wr[7<<}㫫ھe,Ȅt<cT9њk-sO߈oOd|y|zv>􋿾ҫ#~|Hr{<~E]e)Y`/A?}jn9' k14ӪT7d]̟RiP~1$E]U; oNhHD>0\C8&bXpo98Gɶ :J0Έ c D*E[+8\֘y Af1!a5ji|Ǐn&uӭG3٘zu;IzncHWk\YNރx]:N{ѺNmغnӾ*%L#O %m@x v?|@m w8S/t!\PEy wR2/XG\Dv]`X?6 [2G7KQA%o.ro\'_ht2U}ޛ~896/ro곏~l{e4:Z/_fKra2l 6/ٟK׏f \,Lf@ARxUsf׳_h60O;h0c-XsWY‹(NbVIo ^{ da6H]b̥RrzI:8YJ|oCɉ%d0T\2$+|JtX5l!YPĂ%)IvH7@2Td`ftrmW Ɠl)/A?f& $D] I*~JX,ʈޖzi0*m6bTJ3xU L"2 \#f!$m VI 6\rR- I}hn&?~~CQf"ZcrBw(E)B r* ~Ҽ[.Ȓ[] )Wg <`2w#p|hG3 yvV^+S }C`Oŏ zmoXِox5 fNK2n~PᰳJCz YnWp?75oWg:d1W2O%XY"K} QRԖ[vWB ܼ'unTb~['{ENejMt!G# ]:&rf&/[nn E: F Q,akIRޓ Z$5ږ̮?h="^  xx?xX imzZS u]Mo (:kr Q]Җx% _'k Nۯ#dG|W; ʡ$D)(&x%%# iryFgv`:ҷ;6Mo *6 =!%g sovGBede$QN:CPIZ pSzͬhRщBB eK%ddd!1uXbPM3TTY2y ˞Ʒ|.}s|>jQFR*Ftښ.(JVنӼIo Agf֞RZTYe4Q'f rR"\ cD NI4JcHx;Enmdђ8+{^z&O5ݦU&hi2b tL ERdZ,> &-BqFv1c*E4SJ: &jՙS5Bv>Wv5?#Р!jP^9eP1#M( ` l~vS,`Zq΂V7?fUӚ +IYB6ճ6կ|,Y2e9B~o{v5\5sBa(2(ŏV |JV4'ɧU?>|ዿ2(?xӫ/O1}c2 ;Kڞ(MۙKO[i|A<1(ǓcZ1r~8ldǹZF||pGR[wOyrtbkŤǺ`R-^}>};1I $(U'yG'ؿ[6'w=ljzG'uM*co'9n`s߽}27=lAx,Z+[ӛQU4;M>Q{13s^J+sT $EQz59E*EM5@'AvtKFt-MȞUgA:q4)Z1@,Ƃ]9{릩!=zĴϻ⽥6hI l\YvE'}! .'R XMI䐇mwq7g;?*}ߴP*]x_CnvrJL` ` bɝ>8s$.b[r|aST% MlJRɂdPNkA>K$Dim9Z:z2 g{0f\<8}DX?(="bgVoEG%XEq@$< DHxPŞP87G̻ŋGu~jRdb."v) ml m8,* $ ? 045'j^ o$,ZQe B%)W~WkVjePR x%I4u1n$ZV 9KQ%)PKɌh=E8! ˁKAbS*`_+P1~l~HΉ2NcӎXRe˥Utkhv;/kE#r(L:^flT̔%ɹAda14|R0 fZ xMr^9%tBzp&+r\ш6iagrkv^֌Lն!LUEE7ku#4 nIE!I6)e)Z[9zEt-m -njY2gOs۠*獑}R3k]vrFkueqqC 2zd%/ֶWPUU6Fd$F,%0 y 1|zL6pgqoDOػ޶r$W}aKY 0vyi,ؗm xձeDzt-YHYpnÏU_UPPp9z3 UIXvi-kd.b.졄L:c6٧"vO ~."/DWA+d` ;2攳h커4@Χ Rlٛj1 zܩϟ&a8r>=1ӌLQVUkͬ[rh-TـGUVqD cx{$JRg|z,v}"^+gJ+m%^EF}95~bpkE37D7`T$MTXM6G.~[;`u[ tqwػ}??o CtT^aFr ۆG uߞ >y1ՊoUJUJ2qi{HYS{=S͆zFLTl|9Y=n0(îp:B>y!*bjXԔ d5dZ5Hbɵ6SQvKA Rӯ(C١'b,S_KGz ^_g-ҳmm6i1 }z*]19 PU:PjDmؚtk>_]^[ҫ2i| oPaՊòHfL/E4ʷx,sVO1"$&d~yjOwۦC[jY966,K[4JvjvT=`fxقzD0zsO V59;џ>?MN zLG㝪#5ǒb`VR--L]e>l1ZԵFw/@ofdaVztj-=~@gY dQ@A( ֐i:9 RK8xK&iM!,EWrBWtʶfq)d_mJic"ؑw6G@?vh~ )ȁ()H V?ֹxRg}̔]_0 Q)򠄮 f$[\$?>z/1]LJ$MuV:TL1U aCE_HX/,͞XP"`S&?TѐC7 :t:#'@ّդ sDgF )iqY D΋rFa7>{5Ph@[ctP[/I[UJ2Ruթ*j6V%TYH"f ' C+a䡔];6 6P~qܐB:F heceI*#\km ?;!0=VEM Teh^ɰ+e^{jԙp <Cv j:.bMRxaKl#?'QJ Txw Ijknnw Cȳiηx0|: Q$ e 1-) I$D9U5FN%?RfqYA9\},F\M~ó^y@CGk%akuE_` r%WA`V/ ֬2h9}< ':d9:PYVC%MVblp)RYC^XSyI~SY#dF"r "p+g"u-=zv\.%Lj2%)J Q X8[CTO:rªG'ﰸW1Te6':]jW_rXv/9ف%h)5N#W9 WFў 5~!9~CΖ]m{FPuZDqݩ#3;y`)CJIT$Uڅl(ުdXdj28hv' DWZ| ΓɔuQ6D[Bk#(ٝ y9[?&I1*N"|N!Ƥ-OR=ZF~Hu6 6z#d7K8˫?~mJ֙mՌ5kezN+-^s?O<7Zz{:Oᚔh CV h ezqy1^.ǮWrQ~ߖ|@^`SłKn`f mɯ]qy|3g]fZkrOjhx|[׫aVov~?]oJ˻yNsWC;7Sg͸wؑ+7?{KC E}ҼY;2k%M`UM cGF{ ;Y-t^ֻW!}bBL 1/-#%H`"SE7]ّFTnL-QU$x>L)kkU$>HHEv*@cgnetly3}7&qzR쪳'VVLX]_țysUwMlE]Q9flX+Zp*k=,k@Y*ːUfzj 1\ h 6W^*8yOtCsHi^i06O"aS 4 ~9 KfN؎heFф:}$8|oy_盯 %n!/XZ>];G}Goey LAWV}8kd37* gdmn۸̷m&MY~gX,jWCtrfHAO}/5 ,hiA 6T&{剬Ow}Ob4_lnM vpZI~gpYcӇ I$J1KktlZhvGJifi1[C:)eU^Pn15* ,T{AZK8l<tOjoGHfJWuQ""YzŴX/a/5!t6~ ԾL{l["߿(\"JȢ<ʃG!q{hMQ`vF׎_#0&C C-:8\{V;g1r--oVn!*j0>m= -瘓v-HTݏ$0Xm n~[ד 6;F (REؠ(J/]DR/>R-7DpjF~+±Zࢗ;U*hhCpHWBEi.XFQ?5NJVф#}ݍkBQ8j\ֶr\15sluM:$&I,GU3 D؃7_HlݣJyz#%ugT2K%iR}PTL&@UeΩ8+rzk%{КrdoY ^h}vki.23UqM>kҽζ9s6=S|DȓOg&U@csgg,Ue^VTuP. +ۈ!#<\4gZjU I<$0MyYv#M-w%f5sTJ!}Z͒2~Vlޣ, 5 g Qm!%lzN:#غa_/7o{]7m0. Wbdqtהcve얄nxls4ndS6K}Z q {|&91pJAq#9jXZ 璊ů2QWQ je.kE#Lˣ$^ŞK?s߾ؐ ;-@+#ErŠ,@E:+߽ tV1;+C.ѺT$lF,Y*aTߊ$oPǾhgE7FvW2cs'&F[ՐAFDi_bxgЫiQƝ-ﯪW\YW{z${jSV/CUh~z?Ҡ(kuRe= "gYmpm{;FmP,͡UA.I`M|N.;N5KփmvcZ7 ڥLS ( nўUlw{TG[Mj2dMTQۢTc>wR|OYSRלJԖڷ=Rg\X=CH䄩䐳J uS ߒsj9k}*5$aJ]˝8<Na_O/CQ["G먿?Nmnz<亅_.vì{EHBG؝*BڪX'C| ;^kRΚB_US{1ОoZ`:?G@8Z6sun1Z|`/Ί^逹]OS'_4a9o_>,?ܖaJb{FFS:JI&!f?( ʓYQD굙j*czP1? P_뎌Nwg0_rscڕVs |>UU|M`RZSͣ){fyW&<LֿǏqum^弸^c/݊mD:Gr"Hŝem.Qeu''gTب@g&li!4"V}DӲ?ZNU[b02Rm}SSRA~>x5`DlQ{'RD=ϴs+ b/YjNC6(gG28Ȧ$tdOS'RM`62Pd>aUz?ߚu͚uQ5Ր5@4%FDyt3.u{$KneUJS.wqJG"= J>)g;IĽV7g;cohX[bpUiP1ؼ!{Fhʴ٠u.knk{\\%4n2+j5<8j{Ֆ_86{( /~%bpSzl*a\.[ZئWoC[-<Zr4_T[ FM} F¬;F("+Ēm 4L7)[GzZHC9艋ۭnH4Sʎ2: \Iw p%0$Ue)c:y7{YHlA2feWZ;-K1z u8 X]*}֐t|Cx#;es:cЂ#JiL5ᣏ24A* \JWJ0svf`&)n%#P xޞ-=[_.ܧd6QIJj.)AYO6{%4l>Qҋ0qDZr$xnp$v:W{unZtR„'HL?ϯ6f:RUsMѥԠލ>&qJҨg?WloGo=Uv1n,#N-fZVIy0c% `OZ,ti2 ?D*8dj\$",S (Bhy Ȥ>9 =cԚ#^ vyce;8j"lMl ׆&i[8|H|(__a;)LU|߾/@;FwYv]%ŁmĐXWy'AkEz)O -`p1$2C 6F R!X4() |7upvpɵl53+W+f@]-%IyMB(DQ:6NF]AcNX^)tSx6m, G<2nj.CU,8Tky<Γj~4,mR#ʁ \glŔPΡ\.:3Ŕz1i־gYE moGG_~߽|ңeE 9P_:rhT[(~L(d:Jb ɢl9Ɩ=QJ4BcAjY1R G&0,v;{8, L+ ɞMW/)eT0hvZ_T]Tmd(R`ڼpP42BGWƵ{%uiy.T?OuU_PCݣf RT )S}+^֘=dԯzu:{eMn2ҿ{ ×8*w Iz4+KOLzof>9wM0"Bl.W\7r_.fSS$LNK&l>oPvwVj/6# 2& W1_\(Ljbd?\: yD a?dp 7辵 JTlسtȹ Og0% dF_Z"ЂaFt0K)t>lމ)Sq ]ttdPұ?Kk Ӓ~\(k7~?DzO|#yL|lW/qR]beEin8[]_jq||Buz-K?%Aoe>owF,]h񆎔BjQ]tpz>_'!Ўe)jG~Et>h/M\ڗ/2ſU=KV[^ttopMc`7 ^24PjspR ]EơVDpz! ސFо@aC;x=Iitza9V34T&L44"x&(crr:/?}+#ΥpYCJ PgC*!t1Ry5LB=Qi"B$F 3@ESādO"r%i0/ao2)DJ #y!mMT)gu}z <%>8;d5Ȃ>vqD=y&<…HZ8t'cac JIK~Rؐw0# 4i)H^. ^o i6-1*yd:$ad9^a@Y\lv39U;E6LL+LC|3aN*RI0"P\ħuFZh]6ٝun?֪ _#{,LXN+%bzY`R&Z:[)ɘNG{ỉ'$ %\9#1^"?@^BtfBqAM) BDs1stC/rF74Bq<1:@YOG.];DLm` gn!MҚ4}U)=+t2糯/`FJjY*{F#`&HZZ 56ҠmH˳YD]3:Œrl0o26ɢs7#gB\V!/\g-Dg3ZGJ71R-X ^F!)F:CTt@]1O`:ҾG\`*(D ADDU1uxi*AIbý,$+$4$+#I-dD孃$!`v8{`[B|80SWj48fii6ͩ:,-qw\-i%ΫէI_.i! GE-ɿT܍'?UuᵮW/k{Һ|*NzҴb8mx=~ڸ۟޸w몷x[ձ%jg: &{,{;yh K ~/ՓYaQ`Nz }u8a_w?⯴/|=2M,7,[pS\u2Lu%Yφ_!\-u5ɸ"pVg꨹Uio(VG'3DɘpbT5_b0b~.E{W?0u"κuGMNUR"t*Tz &tZ[e0 4xb=٘S:%u (l5N N c7dPx&hZƩen"' q2q3j))b6EP'Kvo>UyY+SO[gG!|BKQ!Y$Γ稍(NIrDP)h4JD=HoD+w*|dԈ%@}t [}1 v'_un\(}^TfN岧&u4cҢp8&G [óbx(4µEy58Φ *D5t-cX)Wc^7bQnt1x/d8^AjVgis_|]'/lz1kleݥub0GqpTŬ}B5M)![~;ŧdZv`Yn Ym Qp:./N3mm.FM.(e{DIt|2&cn#PrAuPn9N%WgNλ <ܳf[@M6\~n֪."0b1g/xX[6~f[کtZj_.K٬k8@ q8Q{S/:Y+һ~YJS{N6MI{d>Uѡ1>xmAB=_|'*E%;GUJqHL91TDEj NBk8Ԯ=D0Z']ЍFv$k<[PJM"Ighq* !P"wZj[KC4.ӔJ՚8 &fCuZN^s;-llC&g²3Drf2s1]]w9ʧxg]c"ÓP_O~N TӁ_ o@3`1Pw"(yVpBG>f/+ZFk@jeثpzUDR1J'|<7 .\]Z l7i;Th7k- x(OB#pK| Cf>FEq*NrYg9H 4s&("gZwu<Յ9t&n.nR?dNg kfL; 7H2%tM]2xkcTom)JtfA[ PU>٦ gf-aO,r79}w_ܠ5yg JI_r\@7|[kKn?ϭ0ܶᆿ~ʛ{DO_LbCh!F p 0ZWpQ{:ܰZS=nhk- " ܯQ:QSEݳ7£#pb&'?c:;Ax6@:K֭1Ш&LoNi٪}D)70>Th7Yl1'/G)^O5оi۰OPdV%p9#cuyNC/5!B!+=\l~„kpt8#m ~M6<wR(>7dk"Zw@#3$n5˔^ܭtmЫ ɐ'@/T.9)KtW&"!t"-҂| &^;|u&);O: X2o_G3cބPq[ }y#Qi+iDU:eωo|P)&}2d/{\3A-K<Hf Ze@޼yxAxֶGex5U#2rhkVϔ\zZ9 v?Wy52c`"jRP딮q1M_)=7lZ퇺¯ S@HY4HW_FτnDBݛZjk-HMn"wR[{J2M?m.{ >ۏS aGy\WU:VTQ6WA.VȬ{.`"K^6BW[u)u~:4yAI շ]f 5l3c)yzg\N)!l1a,]2Ґ,fzXcsqA\q | cI_" \[[a] J۳s0ּ.kNG'OmOU/?>Mq&G9|N}r{DI͇3{'̚Xo$!<;C{)+\W<8dl{;`۪.D_~~Th4Jlqy4:Ter]U~#k6/^jl9h <Քddԉ93={~ 7Ԍ5 7f K'3+g 4>r[O6%J&5?\aNY+;~nw )2X~og6ZvxC6~&Ҷ]i97-UWAEIX1Ш&Lk;e\ܜ̜O=$|5{-5QȚԌk1e+L, 9W4wxfU$"+wy#C`.(<Ӊ "b9'Ku1PI颹JƄW 5Iz@Ї&b}OySZH2VNgO~_&I }=暞 &ts':(Qx\jgsٲR%*7ݛ)QW|yݯZ^7gzyW͆7dݕ ʇ*Ň+fQUPm$6T h)ˌݜ'[kx-vذ(\]Ǥ!X\l2Bi[sd,u:u힧wHpp=* U(JyFAz꣥4|!.dZXp452iR3<iW \՚Ή)ɳΚ*Ї,3zJٴLwP rp 0+ɧuț6S:~Mxxge`lAD2ФNPj_zR@!ONXm9jgY'(g4[ʢ V*Z6⻍m xlf?;_ݘP z?w<@]4c| \xh .iL"u=A0Z@e#{=ddC9YGqW~qȥfw_W@9MVF!%Z0yi?MC j}?f72P1]{nm^@jф-p@x%,/yĕϼv9\ ҆9}Ү*F8թ?5,v2ˀwTꌗSc']ݘ< tFk\"wxk*Dƌԙ['l٬Q:ir N|HE_PPRbD?o Ǟ?v2]0:Y`ȗ}4@ R#J,w8+5{f"W=p%+rK6|G؅hێl:# (پTvfJmҰ[H)̿6eK%§)cyn#F#$f]AuNٔs٩>!sgVxwCܐC-Jw@T~+3XXs#d}is""YBqbXLhA$^ͶK|8Y)D "#0U97ɟ%W֯2dճ0h*l"StLJj4[뱞yf[ǣI.Qֺca(]yԞU:K8:w$oeRQ?S`Xy+. +'ߟ__?zɿOC/6>c.k~~ g%w?{WfaǰãGl`sn_e}(˶Lɱ/8:^IESUO$ė[WEI̓_#L\>p~Zwg'Vg;vfwo*~y۽f{vx3';Qͳ])ղ{9⸉t $L7ӫBݟ?DfY5KXdɹ#S5&J2\-䟠w~gĕfvC5ut伲 >czcPemxeߏߎO[_xçk%)r|=NٵoX) @~_ϵQ5zp5R~T"GOVobOαK- ,i!E\O&uVz-^H+MV(p?%E ɱV4mې:|U>b@H//.0ôzIk.N3UH 2 2F&XO)uY9vVOi92,mVs\.E7ki`}γ hye)$ҭTb9Q[^˫x<xeWM[^q+t䛶Co=G|MԝTJO 'T0⤼ W$=sHɝQxk@b1gY_8qAqZyMngԃP\559RFHbH| j{i皩= A/,m5 QQ E.jZJ=mWɫD޼sVRHKlf<4sMXw~_٢Rio|0-/KJ!uکM:ע˫mz1[/H B"#QG!:NlolsS|b:G/҉Z`Ds|5Sfh{Q[o?wh_c"Y{nAOc7@FRE嬵p\াIo#G77Zs<^@ )e o`%WtPk=|Z Ysv}n=}=^lNJz#jC 5\{X5Uz.փV굕_s[/pklhWe>մw|`-8_ٷØnxK]kk(`yk{$.h~$W\KNi[Ojᓴw$\FBJ"SVNPtJRAj,nHmxD yU EԘioFO>vg9%ʥ-*ۡ)Vя'+c9){f׻ͼt^:cN TJPy^ŋmިmh:W!ܹE*'e }#)椹л^J-XUR!xyacrn]C#KihOf*Mk)lqܷ$)鐠d%RNX 1!:^Zot%w/[ﳛj|*ڵკWS8!Tk;[>OJmMڀE#.Mh(9!|}roOi#zeCRuv#=G~󓏺oD*6bD{^uq2 3_M`ڮ`³{yp񶳜ȵ@$VӍMn{|XYK8fܴţ{;aϰWڬ'kEgyl`[o{q{]_z7IǀxpߏLVƟڐ/S;OŢ?Uفިh\f }ooJ25m4QAmzz|>tފ7g+1{_x6l׃NuQߖOGDZ O!:;ngjw88vNzs}P4Q4cNUhAIhNm9҈>%w$Dm4k^zx|yH`6vgO>J{mCRڥsgu6r=9w*L);}-q`te[>w2ٽ&^UA<{g?=-.4Ϳk/$L˛ݫ{S_'G!ف~5ov>]Ƣ19>MW|@?'`ǃzPi,s ˋ2#k{7^tV] 蒹oM/utP'{i<ΐn> ²O_q8e)CIϏ ot`>/&L5}|n.pvw.G/y6o>~ewțO,;74GG;([XHy܎OV2~j';O3=kAįò<z~<?^.O!A<ԋD\R@?Els, qk*")͙{o "2:'MsH:Y˗yv%f1d>(JZ\ך\rF;ٚ E:ʹhUT D dUKzSWR,89q*isІ$VHV@-yTOwН+'[i%םo $L:G 1[ZN&Kc-AD ۤ#)S6;#JИZYu__q1x1\m6Au%L+ 4d#\Tr<%Mu\HaS~ $8ZZ".admP&&J (|uܬT!Et#')lLtjmchJWJ;Ft=ZVɔ^U|ZEs(Ejj1(ȩc4omltݳ֔Yz[yqޖW6bH!7Cp|ZD+awIAykDQkk6-oM!3'RھtP x#,]N>AQsSISba%jpZW<*׻`JL mkDܒ|GPtB0~mCͲe@1Ji6 V,a29oh:||Ȏ%7@N\z /Qr'2lRMd L0h )r0* ;7@`dZg9~3l.$ عҸ ڂdBLj.\@SjaߐQO$BxTdj$tr9V*%|TѯL{7)O]lA|_!lBNTpzicz1))IaUrAt|"d8fBݩ+EA`orf_&[6E\ELA{ݼ-BM|oY, bJadQKɱ)%;oD̤RQgPK?$SlH,F%>d11xƫ^q5PY$5 Y%,KFY$Pl_ZItHf*22 e4KVbYbf2W=RSg${R aNlURjُnjnjAsuue3`Èua5HEx RXRʰ1%sRҴZb3ҁA[M22&'hedJaM@n<=Mo!tXӣx\&U]T,%BU E$A5+ O(-z},pkz@Xbl2a oVJacZ=`[YKa;Jm辱NT'L|tͽ;"qP hx;Ye(0N7wb˫q.pO=-Y3Ϊֲ7 = 顠<JPH&m*}rO'8b0բ@6[M1"P15"#c u:_^{tNrcGX-&że$UW͍kӶLpz5*=bRI)S7SCN o4Gh(˅ENUM i4 DQU!jZXd7K2ug1:馛Db#\2Pd*=e[vZ~t|Mn9ᜃot;k=a\AHze?و\y?+Qm0T8zNr1t8s ] (iuZ17ӫ= %`=Np_ޙ6v$iU_;K/|30F#])T-O(rt̓/sK~&EEn:GDt)ԾML AXH;.X<9[ß7.CGО+)={w0qKc10z[.S?wQČd#1PMJ4 NMJuu\|vm\@@g$Hc MtDeU ,BZnC-XwA>+V5QnB`r"r_:"(JX+ب RK۞-C9kwABCY5j昋&|#5V7(a@QUt#SdtyDfK^ŧŲkE UbC8R,XOhM^*߾:KL~dygM|ǽ~sdݥ7ǵC ӛ`@/e.q8qXJG@H085ϗ WZ/v/=m&Z 7r88|ҪԶ~\Gy6HU{Μ/lg!C{;@7E./;2'C]·+1g1P]2'A]ɇ`OAƜgϞʠtZ|oӷòu U$'Cg-{vO?zl`~|r6DtsF rV>]\Ʒ3rQb~~+5IBf||w \FK_6oŧYrvx7==_(yAjlt+JA"oof[cE CU@Vhc.:2`\!q_Pj䈁Vd> ݀6vԇKiUNa:0(nZ:^_2RYѲdVC_X4/701vCdvΏ1m4?lv6{ĝ/ew` ;g\b4d:c,!3t;CX10m<6%խI!^?v%{otZ1=Af>Eh%F$ )ɫͅfKt|-kC ē'mxrz\w"ɾ7XHC)}X}x;`ia9;t^N:]1Bh:s j)$dz] .~ښ՗w4!_\asxuc1rkqWJAαwxyuqp:ZU6Cv-lY3ddT^: A|Ux:=ؔ3Yݾ;VV8f˥Y~;/xEӘ<Ǹ!3իi>J߳r+X3f,35dM ch/0 yZ%̷Xœj*LaX2˿6ۡ&bh}eյb6XmT}? ۉ4ԏY(x}W|B*2,8͙…Oy5\яc7a;ốŪ0msxO5UѶ&bH1,%Y5uXTv6xwЕ[bɬ9,UfA潮> y\ש(ӃFuv> SY}!+Ȣ`rzþ# ;߯ {q,}aVEoʀ7u?-p\3%#S˽6M?ˈrYt{s+L w瘓sI#l㼬 OžMxW-w~ÛpЮ~V_YC}vK{wk^L>LG5lp};!_g!z{_',hkw~Z]^ϫ55o/˝w+[HW{2ᥲs)ԋ]tWmt)+Rpv~||p|TwbXUJu:QM\_0Q:wQ˛qW Ρ_/w0>cLxvlm Xymҫ/]MMg/xCܵ~:;PFvK훃ϻYy~R@ || t[=!?w7٬s9qpir::B<7 JMHDS>}x]?ޞ=.Zo l2}آJ8vU}$Njc5&#f̫ɮws:k-Fၒ` @FI͹&Dn+tb{~tC(^nH@|v*sp-PA8NcIVΥmYiŸJ>)[ZsN{7"W~'h*fJ)`>St)ٶpsvɲwGI*U,Jl/"Fn(p~]Jk͹8Frϖk?uMƕRk(ɝ at尅.12%kY*ʹ$]rY#\kiraZ"ah4EH/iצ*'1¹=4!D#֜{2\Sܳ5ϭΦi)}Y,%R+[nRZ)sUu͙}QoGњsVngP+Ui-O҄{%M_N,}>oc;3ݷ'@#EKb9ZU ^h:~miOҕXzF3(E/%ʕ-v8P6";tu)CI5z92Zs#9fhYaȳV4KlK%g5wK>C7yĸUJITUIddkct'Qs[9wϊ aG1uh}/J'祓\5ad)Mp! ш6iFex1Zs+15_55pR47IU*]6[xfw}6_fl0c65Ul>i}|1|amz'kYZLEEB}W.5@N??.)YNA`<3QO7#"iqEpEru=yx~JU=5Ú_F'?'m(Ftd~YZL3,}kZ|RCK K\@knH-D[ [uGKmui)䲷"]0%]k>6MZ+{'V'mF(6zH$'fGIRL&~)IU{cq^^_h\E` V#>q/ Yn`o *ҹ$FEIUl1#`Z&RG5hBr.[k]%c{ %chFQ{{ 7A~6 -EpRVN\ d =d.^~, P7̷S4BFٖA"F9E 8PG,^{Pj)c5v-%Je:SnA$nad+e N4e{=@Ł.dk`e&'y{ɼ=PNIwC%ӦڨhC J9 JS'5ݜ{̅idD/.<dXS@ ݘ`9,KR 6󚦃K%d̀d б#mD)']|H*-Q6YǬj:z={:>V@ɣ_j識 ]X94eW[F[^c0cfbipڦ݇7[(YcM. ~RS ~2M.;jMI5(! D(ixa G&=Ͱ Bm!:kjW\\ 'u:P&avyZ1u/ ،C`YE=8wi!ސ {`b-YASS2ls[ZbiȦYK$eg,mS,v8e5$W^O{N#(*֪5A*)))+k1e!؜1VS5]ImcoSE)jvj0Q˺R k=ɔ<0eCLY)q;:rZHi0ʎd"?v?) >d$, }Mr,Av ^3o#{|IqB4 T+GRգwᤃ>`,{Q%k"(dk!$vrǫE@ qzgv(^?٠";Aw밵z#JIY~%+ND@DE@w |[ /\RϢqF̕d  B}g{Wo}ntDZ7~tXJ_/ΏvE!2u}-R#Gs4۠\2G͸wzwcxo+A嶎[&i9m&q\}?SwTV̮+R?u0$[ZNq|Y؛9S l(?۬ݨu&w +o91;1hU^<1s) zvOi|V6/cCPRΈI]S4a+75:XJ5UfnV$v`aic/jRAX' $YFҦQA%2!qV2&H#anA/he3cGѓ[&Gujh6≵Z 3P؃(눿{#"ohXPMI& `%oYVt(7}ԵtYޑ9Jh c xMBĉ&Mv;k+t߽/iNE܍ < HUFn+LQ%{'R5p-dQ(hL^G&0{9_}o5<||C{pK@2bGhȘ}]֛̄W~_q]Ck /mclU''@vA[,[_?X!VhPDTjm #r3 c3>x6F[[)M !qSY!oWg@;kU-o'Y<xJvS$b$ 4S7T!~ջ6PNܿ a{)4 ~ѲhMmT4)N1ZB)WF!ZiN z2O ӣo1^ev2ф>8^x&ϥ^?.b/F|4 tAT2-:.Y賁wɆXwȆxadgC-:o4b}H%[" d\,:i6DPIٵ\lFz"ږlM[͎47ou%׉!&׳!k oox\{{JEr'і>Al^CR?!dעy*fŀ Ǟ ^4LaDTzT5٧$}~9Tb-UEd=M̠[ΣͦS;Z6l(pB" :Br*/K>))X1q &ɥֳwdG:Bqz2J=R㣲./ @{ko7g1cI\h 2bEρ*\IE{{8mSp.R}51y 7t_/xU7|\Kx~Zf=P}&o|^gOh@5{y-T`[ ʥdk* 3!b&.:mY򅳁W* JÂ|PC/~2>?G]`C+NdRlyhi9Ͽ[>YS>;!R/OpV"Dȉ 0Ȼ/OLS]D:)|U@9)NܔOI#çؤWe+xVtB1T894)4֘]N%jt'd9lic{%kiU^vah涒# Y .^}1h-2/|;;S/j]äҪZ锱^ff ?bڒW,JE1W8=0-w·3)jb 2iىŜLQilP wP /ۃE$}3]d?Jwό_0#}%}M>ZIZMsPkpR'~&!9'B$d\IB.m-JoRde"X& (CD@[Rɐ5ئlYKU~)Q:EnKkD6#A#}!ˡ$#ޡܐ8i.ZewBTǶN?Y@ZQу/5FQJjVf ` TN/FwPf#JPëLAm^ZO4*]لe.bbԥ=\qYٶxj@ihxqO\/^Zv@ 1y/^CZ= FJ iIƕ2a\R*,l5Uhe(:V00)jkԈ1i,Kz>l7b+b]=Y^XNuA^$ P J*&5 )9rlL>z[#pcz7 [ F=˦~ӄ%lr@I| wmI&p(%FAlDʾzjq#(yd7){Xe㌬Tr&hZcS*>}K"}-1 Qy;`EfGPdt}wΥOW8~?Dj#ٜ)))' 8%B. ba2oSjP:Ҹֆ:kÑGk)9E`5T%)iјC@шQyD)}vc؎]oR@GecMF22|2 Ͽy´Bece,\&a2-_&dbE#R'ag"~m.%HdQ|ZwEv PE7[C:4ȟ^_hJaZzKF%Vb^(VToH`s(>\CLxvދ9s-lyىXXx(pJ[L5"a6dEH2. hAABb}tyd|ϿLOC ,p⿤_p9c;-[0Qt]ìAAM)هi̳-xoXtg sd ŗ+owdl#|ܳԣw^2,H 0F~fb lsYS}#%X暹19ɋt)ED?#v6KVm涐E_s\fJ^3~jK>}U[M?[զe9 ^V{[o=ޕj1jO|v2;bZ2G-F3Cm^E%"bb鱿!e4IZZ  XHW)7XLdUK-FٷȵK!,j]dA`?pvq֩#t"!BleZ18֌uYImq ٪EW#-"2*f!%1uqӏV*?>)̨* "X K*Ql1Z]Kh{x;3sDJ5 $ L!E犬] }\ĚJeQUESB򭋪qi9T-N+#}@:BܚH |kHkx 8v[ zVQ6ZV<+U E6 R>z* Y7tS(':'Xe3]4ngLz"H/`"SEgGd72y>בl !Y˯Z1v|Zo.xM0B%| D2m͹ %=S}^ EkfSK `f0NJs;C]jud-!k?}Nt Tj R,m 9LGVFo:R Ĕ E(.r9@U])zt]<2Bc@ A6_lmZ 0+b- BaP\6BXXiahiƦ˕\e 2B2 (jFleҸuW6yfnE |XJjV&ސe]+wY+Ȧ F5ɊjQZU/\)2JzoCIi#ItzG5ݔ7KgbtvB=xE<74ze (IO!2W&T]_!eMcFk$肑'9Aè7: >SFB!+lV &B"Bl\2v9 kj"|"*F1"9b'*T$*W%0uae)=RRt,1Q NR3F6)vdԪւ!ep\w뒢v:Zb'I:-Ӏ&d:+k&rZxMӟn_\y?^/z_Q!cAi8?^?u/lSZJ5&a'ʷyIŢoz~G^)Rʭ?^\ >| ʡvrMy\oLü|W_!&B)RG\TBB8lĆI%1}jXۿElcC4 &GnoJ9O렉 #_,uu̵ܻd`?1=`! 4%$k'jt1#} ƈm rL1WRT5nJ#7Y< +U[`f4e}Ɇȉx;>z?FB)a12Ɩཀྵm/s1=Zk¨½71:22DG̿qq7aGg!7|m:%=~A| L3(ջ\RbO)]2qʳΧ}ⱇz f6&SΙIR`B)& h(TQ"Zf'ؒfzTD*a7Z͉]jBb+}",l\gz v:+fc˯[V=ƺ6#p9uq&Su6F fUÂI6/9a]-[x]2fac1ӆ;qҪ"82=cqm%c˽Xam#N?`"9jU5qRJ/{B]+JX]^ BfCj9zW=XQNcٜqlqN8VTI |ց5F[wj\l ⣬l8uFFFF¬Wj (<nʮHigP2!6\!9&4}b86D );F@LEٸ3v;]K9wS;K>v426JwB؄sr `R/&7!F.!D;7ÝXZZL*LfM"o&h4Sͅj ]7_8w)'2jɐ#P|fD¯j6! J*k#u&,&yK]l2 9oQ)\ u`, v:/>4N#[-rN)SR1+ y)YK! QybNq:gj;yn{3:ljrYۘ8Id'%GTU "]%[lr~56Ȟ۸#8n0P Rqp6DR w!->TyWp$ǣЏ]W`"PKpQ8jnqX 䲏.T\2U̪CVyLX3 6 *=effdb`CY[+\|r|rOa$lCP JM{ U W%v8pYD*x(^D@ń`"iu\tDP yCI0u lSۛS6gVC.H> o}> ^! :8zpN@BF, y yddCdĚ_{4]IJz$!ŗ_M>@Ij4 p9>[ν,l%O8D,UoW O;\n»@p$z@|=Y PgUnY'e7FaR(g,뾍ҥXF- nigO<:=<=z峬Gga kןε"Nnoir~贲>۹ʤ3u>w3wzw}ݻ݇c{69>e݌uM|A3w.m9""N>7Q{BK'rfk=eó/xw=z`\xI-nṁxtu|q~$iMUnRLS7Wg<u:Y94[Ekr.ltr<92bܾlN {2x;Q ^Or˝]>t3homQ][sb}nVG||Tj:VբOaWyJL{k 6'$m o&.2S[.i{X.Z.b ~/ܐ5WxoK4kOL)|(<ǟ=O_ܬNEo_**mXZ6!%Bcq&r.Z2%M$q!*V: pJ/m*=uJ&_Zp$%t֊ŷlV(3DsHd)߻<,bqȚ])Xtrb}9)׀QY9R{xޮjnk :8T$6,WQI:RU&XNΒ  ,ίrc[-/Їש1^`ZLi>-wu8_!Y7fBK/K)Uqueƶ{v|]o;Dv"7r\ DF\j#.T®ٗ`ٖl`˩c0m8{Y~k4cYƆX?ټjG|:=|[BBBρwL+B5e-MƖ&cKɞ4YjgFn!nRqioU;^s!ju,TCѧTOFUFf(~ F֓ߒ symd?P϶JA֙M1&S@tZTii#JE@)C=Q5zg 6 6W[ &C5WR,XjMUM†JM:X`K舨WAT'-e:MOM1h0C" &"A:9K21:(ѩ.Ƙ8O"e'/3rgKC\ZJkyX o~ jklR+ VE.5;4]Jr5$qD0_mZC*7ZQ՘A1 \lRtXgg +P SJ+S#P3gǏb76i/HbC8Uɇj$(* x]p9&aJqp`# 3zOUMZV$8e圙jdb2nPsG,K_S6}zJ_0̚ak ZR0 ?t'K._?NJǗ-y.<,-,Qii6_?2fav|82jǹ-xt 1`cUJw`1kj  i i iݐ7qEßynU Ŭ@U΢݃~+M_>AGӂexztWmiŬ6Oؒe .q=|e㦻;wmzЭorvvf_: rε=Nn g_?0m- ~v|r}<8irr 7 =;|2ǃ[^C}'Z^ee;~3}'N5n5+?|yڳ/~ZU*J. 'sZ_PEdTjH^b|'N IwjK0U W#IO,SLOFnB$1g<dӱXB`dgݬ.!A6+دMZٶ wV)7=7S_,ם0cVʆR ޲ݽPO& k^[%&|i+2Fm ޕ= ʭ1vOamvQy.7]H}$1yvG㾱ÆxeI2HF޼앳ھB Zw`)ȪE9hs+JV}v܋kˎi~E$EH F|OB}Q7<@8 @c`6 `3L1 'f1BdE$``lTף-3B{٤t9fA{*_5b0VDch gN+QUz~@ZA#Ctʊ,NO1T&āoI2!V͏`6ՂS0[;ej`of!mG6a0 .ho[ܶtÐ*1g,MAUt@c:Reڙr{o3a縹Hm!mȣJ2l+84^4_oK>5_LfV}eBzp%_ZAH4PJmY)t(IZUAmFE-CH"g/#V\qA9jR11&;WUF[-DZ7s]k)㉝jgHU,>h+UlLRp(TRt*."Xƚ2k$h4 3Fe*,D2ep p"3 SQBA wOTq9\3a6hKJhHj[F9ϔAr|O_ H>6QҳoCKAjʆB6zƓl|QN@7LRZOB$+JH~$Cr-J5c+%5P&$? (B] M&^5\set#|mM 頭)QʢUԂ1ymkc>Jv}ؤ@m7ׅp=^c4yzk(&)jR`2%&0hIX/k,xd]2ؠ'L2z_\2tyIkÍ>ד!Mj T:t, +tp8;o-qϗG0)Z~.˛0Q8YydDz:g?^ݖ7e9Xl4\^\ZKEJ3 CiO\15þNmyvbO. 4:l 0{)H7sVwcOno3~}eicP>^~G@ =|źHymsٰlU\ŇUzSVĆ ҟqt֫oNfFU٣i~z6_zFˣ? ߵO:׶vKq?o 6?^c&*__iۻٖ+HzXЅaQUQQL ap$O%@<9L5Qd'^v"צu 9ߍL>+A\w,Ppqv8LjSͮwzįC|Nf |33 a#P"2Yn.YnK=v_%7SJ 08*%@͉h(+ys8) %.U; 9I+$ڈƀ. 2Yh^Ip1*j%5.1rafKor(I>z}hvQsuhM`?q֒0>.+;g" JPdBcDu:q ]rQ.YB3ge̼쨧A.",VRS%/0*rFx$u䍈KfoUV']Bv2tоBh{ӫI!AՄZ$hoT//]jbuQ ذSDV1Tq& e!]Lv:}XajZ([xĺN:Vk* }P2x{qX苓uJ$cH:)cL15H]Eer8+=MT)R?%ypZIVFJQCVC{"3 %vq#=x$1 )cW,]|sel# >kDc}X b$a Lq$֐I屒U9Uzx쥞p-J B%)#:(Tk[w!)vlNvmipdPWljI)lꪓH HNѵ圩f>+/ KK~3lbU!)@ݤ/lCnu!SK 7 8w~{l~y^29v9 .{&` )o1zMO vSX˝#dr`T鮾Rfqr{?dܶ7P($3di% {†{j+2.'2mWKA IAZ|zj.[3etU!Eq$gk\#"s[CebrbNGW'^ygZGHȟhzq9_} n^R5= :@"*vV-oR&>:$ ߼web)E:ԇ7PGSO%du{X/ekp:uׂۚZ *](J80[S ݫȳ9u۳^73LTe1dmFx VOF#LwQ{6<-Ȟ.i/wN}lHj)OzK|{ڠ!^Aߚ-ug(۾,?>lOýK/T2f1Z㶺FA  X,>{};jkrmEKz|=n]Tg@^#|*%Q 5K7Zq#^0Dp~/B V\#׀MDd3]J_4]_TކJ:)B@ꞡN}iC݂]ԳF$`P^/y6T@4s aw.$)٭Ok˟zX|VIHGWY%,D.-]`_'jl>@]Zꏼ1v~%xzcnzqT,EzCvZMA\W֔R+_|4.ؾ~#ɩ?hZ&EBpseW-< gh8l;[5(}5ް87_ۖrz9_\) e`Ґ;qh4Mi< 7.f@viX!ˁSRpBwӡDt[ġdxd ZPQ:wf}T2.ʸ3KADeF;)/z`6qsv/ Gl] ܘRX "GژF7ႲMP0Zܶuqتy][bE&@Em,Zۜ1vrWeXb>Rhbipژ#7ޖ9ĜU%P FƛJ>̝'dˠ*ǀ2CCkq2Zm[%: PUkમY 1.~Tup vҭ,l ֢-:L$Y t҈)l6(Cu2slg,0N&/D^PtN{ [dJըZ:):U r?S~qS:%5ՙȴIѡ^@RcȨZrԪ5R Cl"]@A2ߝ́^c7!}egHάod Lm\P1 ӚUzmغei Tml(T8i?=%-דJavlԭLe8P-iۂ*BJm5F (UJBv%4ɩ7t&3۠W9$ 3(M[#V̀;etҔ`3#a9(!tfBh.CfGlZ}f#/| FgZ8BaK~0nֈ_A?-]$R)"U)A&%TWuկmxT0sB`9vF/nv GD5@t#nZݨ!nGTŢF _z#^ ?QzֈrvO%Zz[B C# Ȋv5dqqEI!e$t3Sgj ;wb/)U@\K8>PH|F[yO..$X!qhU1mUMyP$92MPv5j/6jt3 9w1^R%%52CR_Q>J_kԀJ$aPORqBl0%m'Å"2;E.*Eh`  {q[o.R[`%qR"R613 &~lI'2k3ȓ*9UVWd2V2sD N6#*c(֩$er+^lH4:e x !leV27¿ۂ"v57T$i4TijYr TԝN"ΪQSj-9# AÖtHx զ LPf?Eg{-C3Be4rT}r)$m꬐ϒJ͂ՠC7`]a҃fM.72pݙ4GyLrI(/7o0[~ hƋ1ie/"1Y.)vI#,Y ԡſ9ػ`GvxM65ODijsS/ _8TzĘ~=Lvzos޻Xըs /{?Z4Gnr-%]aZSenS- Cqڅ4S)4N4*_`~$l:({qz^R.dn}MmjJ/ O:)}":esua=bm@Nh)0bK݆qD]Pъn}a)gPJC O\%>2t/mČ:+3GokgJF^)?-b5kml^E;ۖvD\ſ~DbհQOC|Rp?jOXxK"tœmMzqAVc^셡KoЋ //~BDcL8Iob>˖a־^S)`ؕk!Z6)):4WX"1IT\TreV{!bo~7F۽_N^(~ffWkSQ&Lh"-8 ٔ !qr+g,rP>P#h\5짒oN(gL BEe ʨ6T֙1Q [(Iհ_jZZtOgο9Tƍs~,-.\94)/sf){>88C]9?/:Du~[~?pN<ءE6W͐ƽ4 ]w:/K:>-:E̟Nt͙̍h+Ba=J Ro,OHX~gdˣ|ܠ8~K>,?$w *Moc٨Y)wIRJJ oҤ,}yu4:C I8%%r࿦/9@26()+^PmTs42\iW̸^%=u' ˧vrJ?9 &t+l߳N?LD&DZ>'P V2R]GKQy tY-hn8O3C|p}iP]&h@~τ,p@n> [PvR-ɟ[b?z7G`s@v~~߿]\FϯP7ϙ L$~y+]=#)]|u0o$%3_vi e*uh_k?FXMHޑ0Z@/sY~ @?v=z/Qhx1(<{}cg,-A?z5_/俒Uբ>9CIp|ale"w+ir6' !XIbK]G #3dX*_ oY?Ϸݘy0q*r *35A%iB3ZID6=b{qbW%;OgyM+6" n"`!0.&cP.ͲaQ%_:u]`+jrR"%DFx`r2S/׶NNy&bX_qʵ+ϔ[0= E'nbRɅPbq K7F 0)+kwaFICXe(^ڙS({MQ&F#"AS*aJ{6b#7 RW0i(τQr)Kr@9@@IiCKE%qY[t }4ؤHctʾ?;O+){ jLPA5e;`)JYQR晇,UJ$S07KF1X87 sk"5+S . \,jx̯sao*; EnkLoY7Y 1;rԼ0 ~٭}ީ-^cwlّ'ݿ~AɖdGejiN*ȺX .he~O+?JeZSnlyXm5}nD%pF8RqL"jӍdh) avAh* [ZjDO/4P#C2g}H7n4x}Ӳquf\W]'}n6,/S)msFV)3,TxNzQۢ2ϟ7֓?`+m@vDE |2^ CYlXC78;]?`g 7?8]"n(="ܭVLL(]X!ͅ&bB.a6} %Z@fS=Ku֧X,FpZ`^iXabWuT s7͉>%o 岫VɌXkfUɟVՓ>ܵ^ql=gxx\uƳ iE ИD4{ lQɭ;pY?Q0c^shߢw쉋k7tTT*֏~fջdOZӀ^y"DɃZߙia֪_n$o(tu}]Qew=j+y7\oM1^']C wh[³HF=^ìk0^;^}=X7[mIL#.uql=Xѭbd7sb$J1K:1O>e;ʌCIfb*/(&NL FEBSVj*8E_겖G*ې )!Fq}r=V^N'Zff1~eCi2}.Wc|nd1W"Nƛy.>*=#3px!]t>Tok}?z޷?y\1fri~w"!l]$rZ /u("heFڮ{̾.~N3SzYi}Q7]'f\] u@#kcpUqsc͍Wfw.f4"tHY0+Ac&/ۮ}xw 5_&)]uǢlH,14n'M1"bX{) B'S t&s;|}ru n5vm;Sr!b+[+J%;} Aت7$ͦq6;j4ޅR"+ cd'$ɧ iVGޱB_EN#K]f/g?t} Ic{%{r˕\#:(P5:dв+LJ6YD?=^ubW,4ױk*P#l Sut5zVIz|%!%ް*^JT] =-1=ZSt2?~~@ZKVMڣkt-`/^«1[Fx>A E/B׹.XA{PG:U[T ΉhfGFӌ;9#%3BbC /Y_PZML)mMẈɩkb(Uж]0M3AIT‡~0X̮먣0O ՋaU 3rVi4AʡB3ƛ5L '7w w˦2颤/o%&L*%q~6De,+HcHd$Avj9Vt:ƾ2Қw.Jeoo.'n?h{6c=>ը7'P~ylg+"=Ϛc1`7Sw># $Q' !8$C%ԤU@e*u Zp4R+)(!F吟Du(NE66h#sP I>DV?_$JV# ҆AD@-,H!;CHrHE+@Lٽn'~qrX_e >Aj˨16UcѠ]ЧWB# o%g hKq>j*ѱ[YGc{b}29Q)j[T 4b[vJ4a_ S(:-@8g` 0_{ѿ؊3Vg_8 ._[iz)nX硒cHHE:yu1{"ewns洱q$00<<50ĕzR쪳'VVLX]tWͼG>kdGPG&dUb٧ibhjf1fYr0h 6R XeIz "P QKJ&i`96*?]O_N?]$k``21{8|9O ݧhBKu{Fp / oˈ$(RVGxm4hu*r=>uӫ|f2ӭk v>E GV;V"VnHUnT+:ꪫbFCp)*bFmfLKW$Ir[u`'vo_4qHxu(z á*Lf4X1a g߇ȉ@Pa6Z@F[Q8*"k*gU`|v RZ$WQEU( UkQhL1)⑂ =0 v̚Zl5̑P1}\xUDRIYtY&RRLNꂐ1_B(xd_$;F6|2VlMxjɹFI^lmHFw$t-&d=Bo|U&3:4-sҀ*&CW69IIkLVRwf<תuGN&CdzD9 ЁDPuד:fi ]o"&%-_p/jw}L Xˏ46+m /ifTI+f9zRGɣM^˿ 8R[qH*d* Q o!(I>|1 B,2%wo)|$(^Z_zQYV@xד9<ǹ0?ܿLoesڍ_<>\~·_!׃AK쥆w;8w+DɉP`:V~QXy;Ze*_&mVJR0[W6[]V ԖbQ.ꌙXA`#_ d-z%Ig=N,e"uH7fܻnwrᓼwuUs= X'y+v?20#bj87 ؤC|u PkpR'yI9'B$d\IB.z"E|2E71MdܻD!%h xY"@[2Kq*?%xaNQ۬0Gd1j{$R8|"c {tH]_~~><ձn!TU =2_c$feg9 F'1SZ#FFmo9r| RE  b*neI8F/d̶ CRO.*uU bBm%브,yt5\mmX`g^ qŃ϶Ą%H.QLX_|Urf&(^SFEt)rozJH#m^ &uF[+npI~Mz,V}$D1cW=\ 9,ŷipe{>I`0 \lN9aL9a}W]oP#eh$MM60*cZbk),8W9AF0sBo ?B .8n@J!dNۖb+[u[ݯ;]vbo^8 5Ghޫn@N,\$Ck!'K5l:tdmUG7ݪ{4ɖdx1 :KnHb=grĿN'$Bl yCwjJ?! 3{Gfȃ;ݼ{foi >yre g'#8Fz\ _Hqi&xk FtӜY9ël}PϦOQ'%)KL䡓MJKCk8FzX-7QFNXQVx8[9r/]F˅|䀺V(Q|ي K@ZhVA[[F#RS M֜DžwZLI(1>vX.9oE)"CG?QS` Oګ38hSw!`{n 9ERtxQ}\m[A6*F!siz gc:g&-X T̋Zw_vcB nrGU|mbҙgxȾȾȾߨvJK\[d9E-fY%qY0!MCck))ՎշD,8IAA*iKݼAdT)Gj~vU]Í S@풄a /b*mH6fZd[y꾄ސ7|f/vb*!U!48J1"cRG:KzB3g<7§ΙcKUCH9:1 &u'*XjKsprHwD{Bwiw d2SȣrWp$ adK0-$b3e-7@l(V|Zn$ɓn:3J\L te"{$jQ> yaryp";Bcf.=1^e,ݏIDŽ8sg}s,s.sS!1' [m e>]]~Ov{V $Li C `_k=P wx̋9sE1 Q" \Ij1P$Ѓ`]J2m*].>z2 \ zvّ,dϹ.9=Ys&(C$1ֆ-#1!mlY[~HGq{قť>]><>|0׏:1/,Q7x][.[,GO ]oGWfKXl`bF$E) WMHR7ux@٬U^ e߼YVyoUWS#x]j߽7}뿿.ou|Tlǧq5̣Y[EH07AG]BMI)RHycm{_X>I7/?S޹Wj@'fX>kR$e_sW&0'?J熈:%2'>խ\-XH{dp!G13Ƙ̘"2 ,8QeAIq6`*V-6GOgG4zZ6p`18X"r@ؿj9}Պ M+߻R~+3`ZpC٫Kl+ x^=;GPbj'/']T &!>L \% *c)$e|HM_饧ԑǮ.Fm)碨J5TPI h! R*~R/qv?wP͛ԟگiV8>;#.5)0ejHXciʹi@CZ!-7h>3p'Fk`\ȊY[kI)&!)qQ9sFQRc&Z\`D/}55,u4-=Ĺ:pJbk(M<ŮwQ^@Sab{m Yd5:3ӐI! ΆXzU@ŔޤB": j'*;Yq{,=,׆R^x2 ZGN1\6$92'JU̽Kq\%uƳ1N$G.y@E78J$u=׹X#AAyA95,%rA#G<%ba@P?OfG9an!̂bHR1$^r*mV(j( )9`9XI xJNLJl7O53\ JJgns9tei%88'c KqAX{}#MPSZBR]rKU58 ftՔ]pQr ?:t 2zg Hտ\ǩbl,lNJ~E*>T({oi]cfֶ͐|9vYx,^{36`z+ l|Ʀv:[$Ԥ`mlhR&[G?x˵_h>WKt7JZc׺e75= |.WwG-foa5b7|MKɵҮŽLs}c +r 1mlh9kZ\~Go<Udzy؋;mAz~:? l=ow2:L^|'7\jU.炋]׮n?}tT6Jo6'~z=cH*b`RG̓H%*\* !]QhθK&B̆t)jSHWZ5u$+b`@u3KbbJ auҋ|H0?aA4Qe}\Wf+~S=61xv^)%EmP?f'uZ'Z8I~l&- < "TP9C^;l֣/yvgNvγ ^ <1֯lʚlIDOD#[ _| cm0QD,h{#㒧DT͍^M6i:};$ y-EW^xE!jɨ"GȚVhJU6ל[ $]n]J/kF-|aM%ώO Iq5_r~T&KF@lz 4֛ddS3C6$gDvv=Xz:# ;u1 Œ\(f:#޵8JkѩBTZ4qp'_IQu1,Ve5V2H ߐEe:W2]H6D>xqѩwRz\-pYuuܪs&hL71,^m_hKPj!?;6yEn;AWDzۡ q{Z;U%g_qMrAyj{'"⾕ZZ![6t~x-4=U%l{i rd.^EH sPuD\A!>Hh)[*C688$p)Y >Jz/!pv$ nuDoKnbصBӽ{Z!..%32df<<)b睊N`\<}k N, 16H2NP6 *HJH˱:,ՆvD۹Inj4#*^yŧY']Ik_,M(%tav?T[*,=&:++Y:zycuW>&)DA%AvH l{u6:YS&V:?[dM9ĽGUA\v2ktA˃=u踺CGas&,{K$Gs2Ѿ$唩\ ڍpuX:kdg$R -sN[P+ 6sͥAB&^*NZr1Ⱥa*TxjNb-"qRK$INN^69:h9U[~ou1 G2Y>?zSY8&!-\E'TajHm@{/!PMqq)+4gflpKbZaDԌ C]+n'p:[z6=Qvjm_2ZH>!T 8q2¤$}xMl} ^V#G `u.J&N!HQSpZJ"}V o̬U^5P'.Z쯽J̗ãb2IR"- ֓$ϬR\KaX١0Ctmu2ƞ_aOQz̒;I)Kaid) B$Z=$%!7ŋd4:(,qBIr#2X/mԆeM#d.8;49ߝ}6\S[6聠= AccnDžҲt1IBЉL&zC3I,"TIO$L6xKLUcQ˷&]Su-%˵(g/5>P dFBQGQ YpET_?7aeAFIX# q)2GKGW]9R?=vy>xk/u\ԿZxFf? ;CNR_0u?Je+Κ~.X֗x;hxTOV9ީչ&0rmUK`Zn}ŃZIE=v>3wxW{V[}̼f^sYAC'hYu:}1rUiч;=1UN`{"۞Tl%e_ׄ`I9#1OMDj!j,A fPrr~ӴRzf{[ ^C7dt 0M>U%I"Qo=UF!2s)@M 2#Mɱl9\t^{Yr ;(S[Gz^G8v'/vLE90Mh沱}U4?'R)9 V;>v9Z|L%gG'y]]n12^@T2kEW ƖRm@:Hnd S͂V-q uBcTA(/;5VF#ipD9m@h妣6'd[&_{MC/&|RWd J6{ptpfiS2k;ʍE\ѩנaΐP]95ɥ' wO ;)=k2װHȨ^5!1& 5#5f4A>'bbذI@Sy:mx S xpECk[.F$Q%QpE e+ }8@ϋ$T9>u[cB*atdk^w/$L"<l%̎yƍ9ODUNl8yVUHT!FH כ?R+ey-ޗ@6;uD U:zP}jn8fsZ{F-#,)FfkXBAlx<   SUd=9WƜ1H3@r* .ft{|K,4[YZ{oo>:]&Og+ qDLb ʦRz'a:Mdm.;Q,nHр@Q-٬Ys(m PHd.h2CE Dz\H%Z10sqXņXHBh7ߐ~d|KH#1Jdޡ{WAlFE!ԉJ<5cV|jqķ61bA751WD0St3詈j)C]b$:ky1X@.bHhO4.U]ayU픮޾TܐּTeFVJ٩Oʈ ֨ʍQQbcT. ï:%|[>KܥfLIMC >eCNxTy#7ǘ-p(i?(9ccKd\KŸaK&!\~ KT~1v}09.qRB͍J&GeJyommAdGGl Ԅ_=P!eӯV3dL$ՖZ4&z2і W]M ,jr-10LJXCt1A%Fg GrS|OQ"6'@jﭝP]Xgw8o8#8ͮVsVL:9|M=ƯlbKGZ\JezlwN3_rKo|}Ttwq#z4\em\aP}ӗEJ•Bi줪g\JE,.YPq%H@m% G[x pbqlv6Jŧ=Y:~ac5Xl 9?*1H?T's2SNARY0Eٞ^ &Bed\Id#fLb?ptuٖo{Ѽh  DUc LG蘓k:eKTش% 65pݫ&C D4ame(B{ɍRkUL⛷5lcwtl[ نL\1Rv,&Wbeئy}lF߰QiQ8Uwت:Hp:Hb:Ht,`z/:[ $Ljn2-{'hR+!h&]uD.w{&|9j;-#Æ󰌌Säb2(KD=_ZSXA)6 abP9ث%P? DgPMpscD|1ŸƂ5s-IPH'~HÆTdVxʶH0t8dO zWh'-뚪l ߺ_(Y3w+j؜ʼn@YbޙUЦ)¦UwӄELl6[F\:jI{ɧ6is,k, PN. Ձ-R(5E X26&Rrprח ?Ɓ թ1uudS7cS S{.^`{m@]OS*5HgVC"d=߈v2_O:]_V98zI"|ݠE9wBN hxƪOYչ?4;"-s2[W$Ps~JUB}9uiݻ> 1i`Lݦ%5XE]ٹT\7^U܂[m1uqJm[û hw/Р }vVT}#\*WKj~e$ɹ~s }=?+:Ow#J>{ [ᾈ+RLd?{=;2DX*fCƨ%ZD/8#ՅCfo\j[{'22nCo=!n,~>ٖ$axR{赸]˃UV.=n%^W"{o{<gq|V„Og}&Sxy:ݓBnf5hGg?V!{VJ,Ql G{+ <93>>sfolُ,'$⅝}?W  f+ }wgE~EI{"% T1`DVEDgaRZz `WD%0䴬zTr||r|k@m]|7 O_2odV\FykY1+)E&K[?olydn:fIXKʹ'cU iV44(5Iduc?m^GǛFo?k>v s#7v#fgD"^^"͏@pfh|LGuڎd`Kw/Lœ?3'(.Ƶ !jCK`lj7'{M-E4=yS}5Anȃ Wܞ0?[S#\okIt*$@sؠO,?!=;IWM늽ߙ';)Odw[~bstcvwqwGU]~"2>N(;:!.҇}AXhcڞ8WE6-g"`âr@Ă xv']ԽK4⣫(ogZo<"XdRgglGfՈ*f`SDLȿ^&goƴN*7bvfH-[Dƒ GRHDQ[载:+cDe6De|fT#[܌Z}I~Q0Tc-;6s aS!2CNsji,Q$/3y4(.,RS4\H& ׍^@A2:fi 2ƆHC"ېŗ+Jϴfٍ!h+_wQ  cjO i4 ՙ޻3eVQ%sd PU2qsvf9|_I:YSp?×0Wp6+nf'mKWR8_?}U|߿pkxf϶K|fA"̺yr{^ꊗpa~JZ]A|_gQV=9Ϣ^;޿\nZO6Lg4Gqe߷<׆:_Rbƾ%]_YZ?]; @v΃Qlq|졏"{zsك84oq˟ܝ9?-v:|]}ykXMe{VC~1eJ!K[A$ _/?}BJܳ'%pi!겖w*+~ #?X 1fق[g DJ T`=[ymhL'!#ظ3LOFzES .1#1w)]wn1ĖSJRؚc\R;I>\>`c0Ud)!)7b@>re(JICds6 ugȢGaA.}ec#V-Ks+ ѣ~ޯ>! >]] ݼu+)?3; !__GҰl/bI%4ČPC3G5襕~Rߠ x!U +=p(ͻB.E=ڻ[|Ҋ>xm"m+զd>0!f)#f%mi` .!'ٗkW A>;>j59C/e`埑Ql1Ip"lw"¢ E(llDY 'MJT.Q9kii4E؉qW ˁy.G^b0RLu P#Ĥ(+K:d½S`ƕ'jYQh׋Q~NP 'T^@AaJξJNJ= BFr *2(JЁ$erKuS!2ȏzv0`u Nz-1shg[-W{ˋol%9f@Ev%ؕ|FA|}P-e*[1 3bRYș\AkM [Jɇ(%3_3EBӋ:c1vn@сG\Ma@Lw"r9[Ilւ"|5JVʕ$ Ut=fjT-%C>J,m/KU$CR*YKU)Sq^U{;<9͡x = jZSC2|X]LHȼn-IZRZ5 *eR~;ԟt9Mdm#cn/ {$f:J21Yt#KhP]Uʴ#1t?@'@Boj9d"j[}Mk'Q{-gp#DT*]%R1% 8m A5@]9ykP͚}iV8On!WAD;ݮ?-\RFYuf7@ȃOjCNVT֪R2\)2JzoC sRڨt\tTR&fqs}9Y; rBT[&^aH(GOt9/-sվ'UUBUB2oI6`(S6y-]0R!$LrEP'RmūdA@! #6L*]F9`t&Z1U2()C&bJN0v#T%)iјC@UvAq_mg{ 0RFсSAۇq_ 0kpip3*u;bc!2i lL8imV[ YH5dg@Iؠvpl/hv1DG#H$oȰJ f_u!"Q&(+BB  vυ)$-u<2i[_~8ݺ"qIeH"f *C 6==tT$RTFP`jWӋ 89n`Lp$'Y Dlo9#:25ёS^Ӱ3x0ςkpno:p-/)JR1IJ ًhL8ڍuQU/ezظ+eo{A%ERNyutLN }.pєlMf ee<7Cܳ=oOxuU6e*J8KIG ʠm ZK+c m*)*U&ٲ9/KPke%B(lɈ(YI{OFmtθ4yY~2n[`e^dɆ;ˁzz' MY_v#L_]\M''oIU(5vN#D7~4B!r*cI󟀥XbtԤr]w4 ȋC k cH!i6GZ)!kiꪨ̈Efe:Sȃz/?rt~[7x(0O8eAj` ;_̜KP|30{C/%[$Sh#ޟ}0X Ɏy!ʾ5X=5R_yjddd S5߼#;󽚊d ; DqlIJ6KVvD!=ڛoSFҢ܀{ Z}f-H{IlO69RT+,Rk*أ,= 9Sφ㇔#hE!7.s9IuR@Bc9TM.KTֆ!\Kl5*=ݚ~?7W 6ϱ/K󾾴Le9?_ye1Ql5QdCN")sː/:\XBv_Cթ1w9Xu1mó#5ؐ#%*⒆ Ew1DL-K\$Gd#T6[~%s,?]\|3}Q|}9/ TG6 >M1y<nkB7_.T&dkYΑZ,L*^e$Lp낿2AlܚəT5.^cRQIzUa)`cMFB}@ .7Z&c)+t%Brsj]$Ѧ|e2Օ9gbѫP+Ll JulV%$=:dLTA,{_?ۖN 6韹{ ]Y'Վi͓XS^}Tf\7Am ?XEŐx׺Ss-|E^-[&hƍ["k&.ZJU(aSl'eHsh{kc@kefw?vb8˅Ǚ=$N _fHãBk!}}hpuqfOտNInУvr|xt^2_Q[ldm􇘌Ƅ˷٫q<=uNJøEI쐫Mg˭3FK᪩ʻ7m+CH;g} ˘z]!*%ןA7'Vm.cQ;"vٿfVCouߨs5Q0<nÈ(;[/勋b>zg†][ӷj{'߾yud9{wzق/zꛯw*v4b[#=x;=qpVKSB.ë{Fq5}S) psvz{<뷻v7[X;7/+Fx4K8Hkb ZΆbjTCR$bv3Ngl*Kp-Lh.ͼ$xYD. 9Upb}|s{Q*4cI~.\ʽ.gfx۰ 2FAɏf9;>Owkim)$χWT\͏D"Xs soOy7mn\ nVg8E߅;aq~BbuϨW ^qe sS?YftӼ-y_Z}'|}*:6O:FNXX{O9z7eփU h srDZg [z5?L\i՝ݲsm0|Erã0ט -% Gu-`9ܵv2)B 66ʹ$R\idr׺IחSkkqﮄX9dΆ|Mly4br&+g$%b~7f۰!2޸mz;pABl\3(O bsY["ӷpy>dBLjNv)m镣!һPxbj}0V]Xdş RȦ"ϐm,vA,-z(eYU[Ɓ!IKZm&.08u{2䐢;@b8Xs6ܔԍ15Zb ~ojm3dx<b*=fPԫZpm F1$1ҭ"1 o Yؔc `.8Q&:mgGx,1@} '#OL1+7}Q%C6@;*d&,#[ ꃓ̊F`}H?gMTJIZ`bu13G!$c3W pv gp[%fIYx@H X#>-A[IMcf$ SwJH WbT,Y$m%XE@" "s(@p _P59 &#% 9^v]$W@]P2=5Q@Of l*&,i)1+Y`ۂX;@#JXc/-# /i,r-!9l =ð9'c@,SƒvdXG7Yc/ʀj+`45wC ;"`6EF1+(rf AlFX'`E%XX-s<@$r :dCz.KxVp[,f}ilv'Zu ۨ6GHSge*z z{C J2@X l3H0B{z4EzFOV5c%^h6cǖ*^@Ơ}STN2x]&f }hxi9\ch=ycTcBuo-5 ѭZ$?0 BdhsK½)Ł6[.=7X + H2yX@"b T U\M rf L%A򠄀C8Dku1Zz3e'g2` `I2ZY6PTk3ce , B$BԀQ( LdrE\~꼱-BlтWmKfap4SM])V ʳ%tBvƊIhQc-$ᩦ᜵,@7azH捵h\NjݎjD1 ĆZ(f_8!BGc2:#-[DO FѬtRbKYZ{^km@ ݽ[`[-Hc|,-~G/[85-(J3sxwH~U0hO,u N+nkIJ 'Hx2α"* @`΀-5b  _K'M'WPkmd x))'^dO3Yc!5P[̪jbO; z yHSU 6Z k#$7R ՊZּ8U"ͷ%ݳ#6mn^Ak6V(xT@[K_H3g/xPj% UR                        <K> Leڦdm%R 9O- Қ8@1$%pb͋v44] ~/j7Vo7z!ƲF\H*pQcY/1K 5 p/%d&-]LԮ%?qu6)@)CnxF X])\YG0DznRxk,RQ3,fU/zO*?]?W$VN ᗊ ӿz=W?^̿|_ G갂BtP/$GSBg/spB4}cBG[ q2>z "TiXD~ƛk-,kb1w̷]N/k<]/bȵ,]Qk-h~ߨwԂClu2"$ɞI0=Y w6D=LO.XΪטU|Q/hT͍.'j0VeS=uweJeYGHQg,sFl6kkϫ6&5gelYޓ19fbM1.٢k-0!eeQY _,[ri-ʦv, =ՒPFJ3Bit|Bi:fP( rąB@:ba?_ Yʨ|REH*&IF 8,2ɐ&K!qE~ӖvW }Vy4|TBmquV8kh UO׍]՜.[Usq Ssd~HCL o?kQg5z )FY4ۤB)$5$4@*bޔ@+E,Dj8~N67,˵ P$ =uIz\`mr sqRQźSb'Yu>oXȂ{ Z|Y(S]rq,%r~T`OIHZP74MNs! Qgg4,xK!OCYHeʃEeuR2)`%EĆJ8O izscbAi (TejilY<cИ3e lInnK:4ˑef~ف Ѳ+~",>11QSe#A[z- }t[h??`}£ocUU0:ԟrf|1?i&T8*yEή-@}rfg0?Ne!i}k K2u?pZ7fe;[tShYk._h7U=Wǧ˰3;E<@WǽNMm&j[|e-Y~BB!n5Ȱ+~.iYj-}=_ȜGûn.?}h[zɪқc83_u1HSe>A$@P!g(i]C$  M<#HzwKxt:؞>)0'Lj$ՐóTnOy+Wo.ȏwGC-Waɥ|+= x"u oO`|߻y)g &#t:k<%ŇX&Ș"G!aA2j.RXv%", `,a&vgjYmajڳY!Om~FWM&Z>:̴N:ې#HГT"`~!s}dB q63yXߓ}1`s_cG6D>O|d9`|ㆥ/Z;A (c4.=?7IqrG=t:7j4JpțI9]p6c4T{rJޢeJ] }?2Z@.<, C59sRdYE]z V^BZ'xѽ< A2k})䢋`SBMxJb$ g[l#U$*zVO=hmY?LGxҋ\N+~!NS9#ڙڇ}dWD,3wE|hqgF3QEBH`Qn\W=m,sgъ:xNF?+mn =4Ǹ iZJCFSeB&ȺeO|\E+ycAa{P5!j9d/G/O?̮u}O*Epw2I_QA"n=LqiMFs}sm d6淟dߟVglc-%Oh??9ܛ;}~?g,}?H!%jALB4m޴Y1=qsJrpF^z3  N:dX7*oө Y=vǴ=#i7ޒW#/=e[hh~uaF}uW]&C5'}_oOs&CoCklp$q-ats4yQ tAS@ޣԍRVqԪVv .\W'Z\8_V"]s2xV- z"SzW]|j2ZZ`@%GkZt~m:=f~W95S֔ S?ڙg(:nl^r^#2flrxPɧqNagg9 7{nt|W#蹇7q1muu!7Gq>s{lzTF3Ě 8#2n8b̺6 Cߠއh5{޽7l-Jhy|77 Ptb7FCz[$*l5`c?yUGʽCΥ A!5J[*vU6yXt#MZV xqYLA-~s@ >QR&ӂMr SIVF 'C8U eɣm-<6FΎHZ 篷$x86cmŶ9H5R\ĖHEH#OjA¤FSI-HWS5>uZA|)o2v j"8u1Q@ g*Yx};)݄fR!D _1\l-C'#ņM0]Wuhyx -Ӵѭi]5>'oIjЛxO( R8Ɉ!?aC3|ĝ\/{_YM>-^=//Ox?NqVZ8OȉG]>x<8\mF~/+ DX+LsBvJKPW&&H=I`?gyUooggjńWC[mB>oy>҂i\Nz_'_g(^{px[|6_J|>vR jQ@>ۏn<;S9t?O/7CDW_m5ĒUqB!id0=CtM~Gg Cf[,6z" &!^ UeTu:h~xxWGy6lj.\.3/gBz=*|==aAM(~uE1k';5M~e=&Uߏ,'ss~L8.d#fC 7ToUUjV o(!FtݷdpZaK,(4Wr;X}P3@  Ō/i[T-[3& C/ԖŒBzꂆIF{'\TшZT=$0q!ӓ[G~~4hZ񞧵c? JYWxs~ 9D gkύhtT^jJSC%7cqb4ٚgYU y:PYt ړf3HO,"X!&L]8ꄅ 1brZ:mB<=!xuNxݓDλ,vN{&Q76#.bS&?3YIL]7A+~ǃVI;!(Ch-嬸̮(D>"^ uXPB ڙyyYTz_ՠ}Uڵ꟨әQڹ$wduP޵r-zZM!Py9 8mN9lRRQGV <[]Aw`:1,79,uBW;lF:-t_ceM#-v’@dV s!eD:[Q.Q!BcNDU77L}~1P1@u6 kPlhjJF&V|5N(meoX}ҸqKm=Q3%BXZh'1Ɔ/i0@4:8BM55@|,]O,fu& 根Tu@q gOf*%oslFg]4}6Z\X1fҩ"T_|t 9։Bx5ō-[21ac1I39h 2,@,zGkTU&VA+FbǠwksuVNgK7^pPtL*$LmPJZv [3t-Qiig׺Tw(={o,Pe10'6G9=)#>tpQgQ[4f+b55b.# zChk~5W" β8oj<;L7/{K=_ h\nFdpШyT5뭏1  G~s>Q;[11q$뮦 ,?2OXg^jt~r:v ^sy%Ha|ZR,A(l&Tl3(¾HY=)=(np=(J kB `"k( [BF;g}[xx6-`pJ(9LpX-ZwIcrmhٱŃ*tsn): 8b)ae]zJ."tma!^gT9nPЅyKV`vWy7faF&Q "#o~xY\؊Ъtzg5!;c6!~ZkhBqVSDjqdm1 S+8n{4A)C[ ۪]!9~MvT (ށS^GK ¤N@ލ-# qO&!Rndݥ.%6E˯I ǒMT6ܻqjiQ,[f&^ 2~*d(,.h=G' :p$Oy+1:+~\X}N!RU}"jb i=c S.Z!#FңJFZKVC3$tJ5#8'g;1\ƶ qXӫz~~\j"㹲#FxlzoUIHG:aĕ#Q>5f 3XrsHhJ)LILTR< JETpH`4"'΍aqGL[+REQS.zUsNE/bѓ6.Ft(wf1oMc.l+T@~$ڏ ki0Db<Z&d\m톚S"kE=ÍL訞+0Kg= K޹5}1/ ejkާj-$cqlvr.~U$V˲4DnK-^^zƑy㵯g,ՓԌ#5 PR-T3{w5;1[Hs, 犋 ǟFN/FJGd W3TH.Ŗpr\!/4о-"ڷ_]ڔVԣX*ZkXBmiHCJRPjRUSKoᨛ1wJ5BREBp-7CtO5aBjg;њf3aVUG[V3^kHjKl[K>({U>-[WdtL7ĩY~QF&o:=j4{<(׌;bor!v2mTy{ZkX06n[ءݤ2y Ae} 5LD.!`c#n\z2Gk@=.4~@͉5V_3N>mɱt`s玹 6L#be#8*:wt6k"Lϭ4&eHvιLݟG9ۋȓ#- ؂(RS$uCkT!SNPwN6E w AH}<5I*2V&y8twz!e3T0XB:a0/o܍}\:yV EMWS .sېu@m}vޗV:s)t5f} VK9ZܰE eewx7-m`iqJ`i;tLբ5%0X3gȦ˗:Fb:M0D~7SRI-% [)zk1]T!O3q כ8?B}˜1XMsk&d՗'/:ħr}|6V4~܌2v!#`"bcULy8=ㅗ;-zL6"ֈy (q uXR:iOhy9;P~gb0z9nc'>^?_2D9?~BvJ  z1 :1R۩l=/{:' Љ VXH%B,I.Jq C0LD*s @dG n#ΒeKNۿ|DN!IDL&g*5jS⃉Y(VQg{Ll:aRfO X}KCD\1Rp_ryE$j-upv I6Sr[(8v?) U1,KC7PS6ppTM({r_5yWmbe6YGPAΪ72Giw/m8ӽl&GbM7*҇ i~qlؾd'++oK=io򧜵ݲ"鋻jiXο5FY2Ŧdk7\B7fi;'c{Rs´kOwc/B YB09V>c,>:Kz+LnndݙcGBvqH1gٟ\] po7`Ď]ik#>?۟ϣg&4TN~c͂Յ#$ul)zap\]^,PQy6b[-#w[aH텹F{l\FN⢥ڠxL#5g V|$7>/(9E'#IŴm"(ҐVbY`XAZw VɊQlLaxr&=|ky X]/X -vBEϛ΄~HDI"͗mؙ,CuP`ZTl,e;(KNRN!!Ic.KךsWB 4EYQ5NU|\ w̐GPOްBR 0n]c`g˟l99?sף.jd/.Ott5 GG߯NY>&Dͻ/WT\řs,qP: cC޽=ņ~1΍Zϣ_Izt7,G E2u}HVQ+?het~ۯ;?џo?Xr3\R6EutGmߴ}B ,qzߊY#n8lT걳,ݕ^iןO'ʎ_z:ˬg@fkon[K:26K%B~a$lb<*jswU !RTʢᐫ#xѓVCV b}m7)|M@V+Av^>qs5y;z{q /&)u=191FܝSU5#Bơ Hh'0dW3sdcBc/fb*щ:PI6Tm]]<C1n-A[;x/z=$aJ[-e0U"ξl|Bsmԓ$5 :vCOQ[ mX_:ѦJ {tև*m$5ZÄ>3e-֬=ݑ=`mf lɗlk`j ̹Cʓm*W xg[e5 35u("@)؎KVfj6B-7eβW<|Q|th 1[_\ubP5u\[CT8)a%/UmTaZ7g#. DchXz(ksr*;c!سk˗^Ζ>F[mzGϫa&f DOJS0A>#T8s4Q4PMɨ#}Zc k= V!SikC4!uݿu%4n\[Yv>)s뾥zs`jb2ʼncjc-B\]p{gc1!*rcߜ׵2|Zf=P}&Z8ҼGmE^˿m$!kj.BTI# r,j%"ABn%W4o`ǃ(^sXj'{w[ɂyI~^["9=:.9^wC>/XK p1FpV,#ɉؑ ϒk՗r]꾞~=(L(Qo|~~5lolD!5FjzަgB޺Rbzafo"=5 [D`LXQ/"e~F;7Aю}qSe~#MIK*:uMVj' S0f(o:u0tADe:"r!l[wdKXR7Roj\^7[Z&QPki$I՟t inu;_FլV(4`<_nLY-*WUfDF"27Krx!_u XxO]C4t*xVT$@2j^RP20 @=8ێ<@kdhcR5\$IR25KK^zJfb6df 1;q`/Wܪa[5jxPc䨑M=OF9bB/rVWNg-Z!ߓrz.f3WBuߋʐx__Y]^^ղ8"揯Oa]fdx[oȔjw?Z|ƛߖp|Ӳmz&lJ6˵<+kׯVib~h,} BF8*eR63W˻^oeΏX"UfW垇p}1!߫;o; 9F/O8`DЍ,V5=iٍ!^J/-|{8g f}V5QuY00/-"jkԈ1i,@4B^[n:E|]5F'NVQ:b.Jw[DkLEEcꗂJD02$e iJQLj`z#n! bm(=H?-fa|4D+\# !gd0_N+_% !Ҹ6kklK1%<065TR^S&1GLvw;&nv33'$>΁_IwakOcG=XUvk:X% iv$4TW2#1t.Q-6l~*哉mÐVk2!:"4&GwAu}YgmU-'(>6LU-}qQ }]ṔrvA1l\HC@OxۍOVo]gHcy̞rne?3?> y.,C$A>Z'%HUjWӈv^nH>i<眬SdUe2:SjQȆ α U.+Ըa^dbjf ;\ƕҊ/74a$O'0Ú)L΅r~.IX0$LtZ9V6N_.О J%w_uZ&I٧ZvS#Y~]gD2_4c}AH|ur}ܹKu#f=J!w;'qTw=r`9AGAJarWXñȱ=+KrKG3[OZ8Vqy?\=;lSŠf}K5QjLz6tDL1${IwUEeTM>K<.EM8Y`Ak]e5dRrsJLXIuͻ9C'?6c**Yx Ab2H#meo֍}6C7Y7D#MBhSj-*qQ(t vhMgXSCmh~$N,#^>>cTۙrnǓ,X<N% ]dkF2ߟ[kr*]1]$&geRBO51C#EN6Grv>ЪjqKSt14r3EbQf0!0~}5[kr!mY1@5# ڒ8듢9W$oQZw5 >o?V,(#{*F1"mbbR%z-3DhynX{"c1ϱShdį#V+m-Rw{Z|pL]_Ρioxaɒ˶4΂ڼ{h,ւ}5Gj iiq8YQ;{ѯGP#u>{dg$Y<0_2œjLmqmD7 OߘD .8#)$SΘuHQd lpWY&,@ߞ;dћvv(lϝ>'IhM:it%Y8{&Qd&w3&+rOdάH7"yK++2Κ[}C>^ku†G/OjVveqsux~Oe>u`}*/ޚ*=z y,ui%H>Sy$' ޥC>bBK">4J}$/f .(\|y@QHM|@Ǭ x^YozCs}NN:||=PwQݰ;JOj9mﺠ?o ߸~^|Ʒx>qDy!>můð·0f]'Mt^{`!J1bYH))" y,d%zz D@)*QXc5IGIJ Bi kbw=PRKɖ~mFS+)+|ӄt 9P¨'Lu{NFׁM ȍlw9Cvd` dG}2"Kg/7+zrY7B>xo֡Ӊb/B b B:dFb (JHY6-`y{6,Ge(J;Ce'SDvQi*IkEPIluIrH'mSQWVO1*Gxzt֬2:!^_LYUVp9PJdH!j(dPF58Zڐ")1TAcITT [b) ]DXO7IlQS.ˆz;tǹ֪u {ȇ\޹Ƃ[3u@6HKpz.5d+Txr= "SגTI2?PIK Xk 2G[fpY^AhiD^30bN0ޯuS޻ҚXG,0:*4hK=~ ǖ-cxxxQ. Ja 6z()LF #@N9E9*hD\.&Krʺh}Bgj sQ9tL43;m]WپBxٮ*skNgߦf=\kKm"PQbZ*q[@"9OʢhUt$~˘~ g/ӳ5閔G_-2!]nf`/,{gF)xE_͎{E}hfV3oe)ꬴF;?v|Cg,kPٸ}&Jm1IDsĨDECD^iE, mm^ KH%~eM.;|هNiڮD56 K& RFJ|lr\QKJͻhQ^<<ًa|д-)UߴywuPU~iړ?}77Ϳ}׌kcB7jɣf|Dt@}}B&"Z?wz{ͻ_3yyi}%NGGvivGܶڞĿ Jݒ)x:ᅤca#7ݼ>>V{j.)w4Q|I[)?MHGfƍ"UUd67&"JQ'2J_z?eQu7m ̭7=:c6x2:iQ3MH^^ey`a+;#:ufb'RROFVO~6[tBm8a H+* >VO\g)1 >EAc`V*/⡺uo87ߪ:Y.9Zmf2~\5O`tS vjype%wK,J,VwJ,vޥ]QWd+6(G5EƒZ: -̤0 јG NzI u3Tu)$b2Lk Y8)Y L@* ,Sb=WuV91 !%cT GNUR&/EZ|g,n([ghj :X߸X=ιJ`A_;O#5̕3Wg %:){OϥɎ o}tt|1ř~5r%i5O6f VJoaoyB U|P2 KZK%2:agk f>b0Á(S7UzpHԃoH8ԣhM늹*= &fpFAyHE"(xcNq% Xȃ,dhC.h #/CFkoLjj;͆îژfm{5>'ddC O:oR"otU+f.w0ЀQE1*)]G#A1)h)4QdFp'(0 ; =/%z^ڥmYV"DY"'.@Ad4 pyd!snS`Me ShLl0c8iT*5+bZNRPt) WTr6cz{Nyt8O;U*lsNt9O_xtIKx(7tYo5vq'qB}jNr|yqvil=IC㇣gy1pcuv'yq'kZRv C!rC:󍭞lDO A46w+?6 ~%k]ҿCeaװ73Ճsΰ>7to4;[5UsctzZWfƂQ*%܆Qq6MuTmD/;1)gf"O/88W{3^C.-Tp7ٺbrĐR7cM4R3iO.?}ZnU;ga|_RB ))&)_ˬ,|ATQhe"z+F(=6HDT0 $?ޱY\--XՄqNc"e[.2 phScV:[BӤn,"6BܼZ72߭Úfz[71TLU'?.4ڛjO m/c>=-QGC%1Z_rdl4hV"Hmk5PRHtD9@HO:*ٷi) ⰶlMpY($/OI$]X@H`cOO4 $Fv+/bn%s6*R *RHe8Ѹ-fF,ER)`clV*E>;|>l@>3?e8 "v9!]QKL+$GO6lx{؅w}, )%U>w0P֑YRK~RVVUcD0>yݒĠj*ޙV]1\lP  *DAg_LB~`) \:<'u`_V{F瑎*W-ć8xR])ZZ7~ƚWRpv){ KpY~wi"Ynl" FLjr^:dak{[/O{EG4 jDQv$ApQaɯ&&c$5"eȗm4BtdQT/ɨ+HH́Ti$}+jٵF!{?2x9<ڍY4jK`hۥ =]?̲6hK"Wx&'V#KV#}x jY+)+Oȷh/*gmn5τ 9F`}َ;Z=ʱy3\vp|S yO@Yq>܌5rV>^0Y~M&z?y ZSWɧ'O~dh+?xǽ=lة,`GtBIYhޟ?Z2֕in=VK1O9c8jld/s@>|u|p/t<?śkjTlu=ZxS~L:nXͰ`ecc.ٹ>!hMHPV%*SV:ZST-dg['A5BOkwzZwM#Ù CFt `gbǹ[3:0چ̎ri*`Xn$jX6I6Z#EC6΃%zh#w -wnjY2g+@~ vޘwR+CBlpP |b w΃w?nc0)y\ f66:%s ^̪Tv-kRDD2x\  *nԐ]hCIc5 Re\n)rwVbLG=W:)rA8e2J'ݱcV6?Yj@$ y%doyLԅ?~X :^=M90pYt)Jt/[sf_7V󫻄Ood2E-) ދ(ưԛKz2vY}O,3,Ld-.)#51TiL6T]WI`*:㋉&+{μձ`Y}/CeЧV {!5!q C7ot> a:Ե9&'ND8ΤA9j1b"K!:n2jI9M*I2K))[[O6K&)l$+&Yx#@絞|zǴκKF0$r{"J]8$(mڔ}vCs-NAjjV%imr]SeώbՀ_<eLZ{esͭLU&Wg0E`6BVY s%Fʊ~'`'"O66wok1*~]!J >J$VmB) .(z;&f^ۡKk(jbqt಑ߝMqٗR6e,Mr:ɴ|M  }SAb)4;ZQ<<|÷TŌd}$>}xȰ~1,g֝u<-NiZYVoU-/umgtV;tUk6YYefq1Ηz6wiOP.XN*ڄ+ jJӣ6?彟&fʓyF1`^(7)Y%ͼ_#-^,TC*acތJ'eo<1X__)g^:xd{ktb )hӧ^U>nT4 #;**UԶRT-LU`ZH0OiRW8%A9Xk<-&o!N|~>裎hͨPIFed|BhDll#e{ɭjrݢq\LGeK6Q]/oZ6[E0(b"EăRPw&Fg3yDIL_Rڌϰl$TWX2 fHbIm#Hk<"FY2*]SO47;:dKTo\g1|}ݲqfŌaȱM\gfVvѠmE6A Y*W[-=_pPⓅ7Oo9;9|eV TmQr=HuL +f(V \wF=G{W)2˖;@ ba{ 2]6Lg pjt"ռ:Ow]lZ'^,~5/pmyGQ&1M^%56LI1TvE^[kTMjeVj$)M"yc17If/|nQv\^Ⱥfmm!<(bb,շ${-'5.nS{Wvu!0SB Z !ѲzQQ lllOT AA,,U01Z 'ۚ&>eW/T_!mtT# ?8?w^B[L8;HCh4;šlOu=?WM=Emיקگ PίKS5L.gs>+?*e[ 8R!FKC>&͹/@;HU %+3|)]"zK M,@҆-& >bN\=^w6=P?M>[,zxZjU B\fuT>awWsClJqNddl &i\ҿG]wS{2,dp.Y>O3$o3{J%MҶ}ɺtmL,_iQj-Tw.?#z1j)FtdI4A-lf{Ӎ̬c<e8}km'xWvaޝ/5ڱޥHmXc&L&fV'Qk/ sĒ@b w%]3Ieh@Jݙ cMN/`/65=8ʰb \wG)+ߡІ@MGY,sc&"T WADxGWQE)рgS[OТ1X _L2 qGKܱI^? 0D -RG$H'{ pJ#ĜqUjK-#As yEMqJۨeEdY't-dtW#!^wTSC.Nhdӣ@)M:bV z&L OpI0ZaBktDD< { >HKaR=.iK 5{M& ͬ j8mҕVj hseq i-s. V,Ruև{r"7EkBc*{CTe48$׵@ZYJcrA S=k"zT)  =Uܮ~:k;;W|LaV\++onӂC@cX .2(YAM#3d2jLΩ/^*XNj;%B{\%tFo$f ,*G\"pZ.%u=bPZwkK!8Twvwca(}\!󀁇BGvi~hH!9F=Ӗf+ ks{ڋ>"T ԬvG>uI1^_uRWX0]aAdFKt!Yr`I(V't?Qɣz.@H՞`brJ+D:D/j7[:pK튓z| p9l^0e^AB`?((&tb=6ET( ުOx> 4(-NJIsF ™x"cMdʍYL+H Rzr+!u51*t!qΆ|? 8<~5*ʒ5B짝t'~ yAVEcG+Bq"xfz(βȉ<3SjAyǮI;KLZKTm!bTYJQTDBG@\CW.mA(iB#a.f$fuEv ,x\)LGXXwN%>N:h9:8NBeYsmDUV&ؘhz9F9y BZ݇t@NSg ac iyԳ:1ZH>@{4`Zca>fٚbM|"dX/.鐠c'zdeEdQ|"e2/>m}ٙd6B+L8X@X-]s7mA%CY=aXɐCpUi)"Jo1áod8!^euBp DiCR.8%2@ÖyiSV/bw*$(PnHi !b8|d8}w_ S QbRJ<@T%ju mXfnBoq4(md!AFv|=$QȀLv:9NЃg|jbjpY^< ).AFќͬߧ3n MJiG viXK3m Yڛ0 ?kx[|ֽ%k^q_KHgU ify*(R!!K 0 OL+!%7e+J<| av@V Uv y\z0|^;)Òx !~_fyFcȦwd0S2 0 eT^5{?4քx7[q\\y< 41Ic/˓h9>!t¢X -[kRvzQmN6;=8P}wnxb( wi,3+Pl}A~V[rPoV$J2*Y$ЯI>6@!\[3j_)ӝpܛs~1Xx2{I'!g6 ]:Hupv(0S>?S U$6f2CvWZHPĤ@ϴω0Y ;9+nzz^BAoC1{νoo4'!H{-|ИAs$skw_w̧Qٳ#1|qa8N?>a_(*h^q/sG/$NQ_ODS=cesD?YShZȁ+%@ -YwO%+ݜ}匿 K??PEJ4I% (f bvpŻ{H2WW* ϻJσӂ,fxYgNq4-e ja it2AL4;Qs0smZgɫ[ظK6\cd4Pfd!YW48^F'7;?~L _shԿƄ` ZٍR_ܷr٥}h\?p9Y|:n>gnn'"XjN:Ը|# V IF hX "o{H[,]Z0m6~ ]$c%d1 fgSE6j U{VS;v^YmkJ ^\"P)3-QUi/X#ߵER7ٺbwWs;xg֌Vez8;<[ƌ`*2ujEmbi=#;V#gϮnv$VtkR;5loeAPy2- 6`&R8tddK,FEJ~oOcLDXaw VWmEF 1=F3l̀}V۱.E)&"hM0%lEW|ki+;E^u&[H-n܏6w@V~_9d&]a;z} @ FhS^S^t$Ծltwd..#$)pDa:[['ԛ'"] ?QVhst8we\)y`\ YKgAg$KaiF:=3Z-(nYj}Wq}]#'9LJk==;9O w! ;x[n ֿ'e="™99>yDK*{W H>8(>f1cboG BtHX0c}e}gVXᦓ@t;vdhުrr.kP.. V x([%@X `b#\JmYQdfQN)z _/VqL)p@6T F-TsSB5v8`ѲUD6%PIW@c9I@1ZQqڿ]ЅH.2p}q2&u7Hy2Vz-:^yw3 ù>^q{ƶFCpZР̎dͶ%E;""L"a|L蔬!-Ӯ5njW4*L2aBNt]jOB΅LU+1.CZSⳃgE4-Qrp6aj'"n&Yx~/ ?aFEx4dtW mFf#␳{JFk Lk*-3G#n&ߗL$pyJß߄17ZGSyÙwl'.j|5V>q&He=erUb0"}:]/[c>'; H@bTHU/u+;Ϛw d*f>W -`>ҝtް:uK"}ڙ@F n ׬5rCT 8#*{ o-Ӱ.wI'u )"]*A9CkrUje^ﺸdӼ6vaQЖx}XT}{itaH+;ۣ5,1gBdCv $Vv?ulp{cˮRevUTMUN@xPo%e.x͊S'`M%7D8ǣDFp )QO%Q%оv-cᘏ;d"{=> $ KK;VQ> >58XD哎am5b0Eű;fh4Xc BDTٽQd$BT^eDT^ٹ;oRHu|F$Aw ӀZ[07L$Cj?~iNff{ߏNnM]$Ct{G;j8tzq6Vz a|Z۫#|^}#sH9rkw89 =rsEbiMٻWKK{1Q*772>>?9Hk`[:Ixz,pX'ϼBkuSٱS0&ܲ?:p8CA*X WcM,lq2bNeڄph|ڮ/9^CM ~" zSL\q#'%_٥LxL7wFpw92[tw3IG, aEp+_cHlh|75ZH'邰9%vbTXn:.29U 9ͼC>^斃]B: Zct%ʥ"M"@BW6;`ņX4&]**k;efX}[qqњ4:5V>!J|e>:<am[-(S,s0izk&eF5'v1ltGWGrB5&_SIP{P}ٶ:1kj`:$$Mzz;g)¶VSvT4v D$ΥKdI\eձ*Ϳm6c-?'*pv;͋,Ь=?ò @ ǢY沪A9LeK*ewoBԀ.NRLlKka#J?\.hNo^3dN4sr}y&~gȭ}x5;M=y9!N;ˮfÓi2wAީDVY0J#^FrGН1Qy x+d%뭞 aU"1xG"mpyLQM]mԶw7 ~+eo]MnC*N7Y7o}jjvxgܻߤmT] 4Eյw?=Zp)S?~wqA_nM7gMGb~\Wbth2sݯ?Q*o'dj# w1ƽ*DvPxD^n~jK2L/ՍU&ЁGKׄ]9G*uvv*{Sf%Do͛C=Z,+*rh!p Q9!X V٫Q^e O߻Jpx&/ǭq?P1Rwz9ѾѠ'_ɱOp[iLClHpFwz4Iԡѯ0u ڀmZAEIQ\NEHn˼Vr01[P8IQzJaNrETҝsTP3))YCZ ؙ-}rw=ro05H\&UH2Ei^젧ԃNqL;LUECƅ[?߾lξ)Zq2*uHr5GJ'^Zlb!VBRމ5N.l,;LnA:3&yBE^i+vToC,`j`N-cw\!Ml ܳ{ y{*;L8噄-];)l?E.#2gfki4n&*PwQF҇(yqVbnnI{RFi;G&qZeC*<2C%Ŀ \p,\;;3W5D{K **c8D(1eM\I{evLp!F Z[07L$Ӹm^EY=7-k>+Xķ8k,00>}e6!50wDiMs8d7 ٶ|rs#1Vkf2o[-IsA4poyd&c~7GotN%gjlq 5^ricB!^`Cˍ!'RFCcu,{c z5'׷zv\ \_e9<峋 Žϻa\)ypHq6rL4dP7h8+)YxA흅>_HNYzwNJV]רhR(2 i-[ȽؽS`W wޢy%ð$.ӓ2bk#?Iy:>*zt~AOk$5YZJ"%x4YxF dJ" ~eDfۭ1ѽ=H%lp>G*h3]%5OC"8E`$L M~y4ѧ<,WS2HLb3{jezo4*-dnz*whouvw׳ I㌊l GΆ_dI[x_zƋ%-P HBٻ6$}v?%`{`%`HD"-9jRdR⨇LʐꙞzWW* nDOu 9*|bbЀڱ@qۆQ-J{-E EO^Ľn .Bql~kޱOȄb3z즽߷;m4+˛k@0k`tDqA1DBdw5![M<5gj)v}>^71KEf0 6lMxI4wӞ]%6%1bdƈu):gDA'g1qtBur~BdM`cLlf "8ȍuή(Dԑe2IּLՅ/r*KS9 9M%̴ϒAN qV00ein",y)aljf;(d$GsiRJ;<SרI|Zkt O^JK9P7艴cO*o t=R| ɼ' ފ(ڴũkLRbJL>cNh،`ʒ=!+uhTMɿ:'a)F֊L0Z͊0釘Ei#'@JPI-DMN80SD*F9uS2SGݞdcCBZ)PeW;Ƚc6M 'OUggbY,c-[SBuIZ6=wja:21Zls'g]|XR(M1i^R-կd #yHW*Jf}tU G,ם_9PSK+ zy|"ɡrf= z WMwnN ˝9`LWrSCJgK;䟓#BG\q]^ئmn*C;Hb߳ ]xA$Eg8ȁ9"M\Gc%-יuIlIñ>nE͞ӹ{|"|]+q Y' 9K$M5EZ-lo/.o1v,~S m—4Uߝ'Fޯ}*\!/Į~6 YxH{{ݗ[PC=J;@l:EЌkSAaP{k]^I-D:R b[#,>AjwA jT.SF.q-EJ<8'jn*>p8/tJ0S}ŋD9aR~q-億Bhֱq_9:)-)3.5ԝԁn=xxQ&"z*]d| R' *&d KY$ǥoҼuosXH0Eac^zpeHT&NEqeҟ `3}4Xˮp=(`Yi뾡n8JQxc-$'1#g9E aV1Oxǧ&F NBh,Lۦة4l W” JFN7/r@0tI7M;XPUAv(2Dv&eK:{~iQ!T x.q>).F *e<ŦlE5i9js6>!NjfΌ=ob's`>_R5@kaO-޴w9yj*޽ZBCO([);.J9Ϗj -W{{5Ɲ[~T`>[AbXޅ:b{HP\hq4NX7R}6IC[|BFྩҮIL`ON6Qyq-;e&0 7P K%}ɛ(  %5%Zt`d2"4P[q'Yѻ$#:zK= JR̮7|hPHFBt,M\XO{Sज9)g 1EyyL9pv+ —7B2LKrPL,+-&$?<>Zzo*㨐A!JD&ݙ.Z*oP;Y"̮6erIkLJšvycK mM;/i0 }(͋ё$5 W9ultSO[:𴤗j.i'"JKK9P7Y~)A&~܃֮6ehSpSm뤃lTJ&g]ѹn˄ƕr9 p B3A0@QX*ƜOYFiއm+:im*AS*, 4ƒ,?OO/zU>g,7]k*:>]KmK2`5 N(UA;JS+ /ģŬT_g0qTՇϝH/.ZxJ,V.N$0X^}8;Hͷ'gŇ4K!]z<;Zjv CC>:z~+HGI~]6쿯4\^b6_I*Y{ޗw?]eufH~ }] ,/{;mof՛zMFV9Θ?UŸUCh ?Ϙ02,>^ߟY%~/h:b~捐51ZPoh?WO? > -Ո`hG5vsD;>5mrҭƖ;A)GL9‘W{jV͟1rǛ/r ¦S)x.؊h<2# MEwǧ;sF_ࠥVՇ&zQcbEQN.'F{/N[ ˩S#m(7y;(lc='_䛝deup1Q[K:)gUň DD.!FJ^*#cZ2sD_C(J!}u #O𠬁edy\FSƣIhD0Tݥ(/|xAk2 T%E˞ty!>EU*D(9~!DI UT)){o٩KPYi.u_Dby\*E=?HhT0Jz;I);SNdr샵k2Y"8u! D$+jNkoAn2Zg뤕\8KL"V&@{LcۋqX:S4*"E6F㏧,nl8HT ԁ#HRyR(gI]9JZP;9*Vߠt8#nK!N&fXlܜd| /E)K$!wYB 6SP*D߈djhn $JE35d:b6CBdO 3dCB}yjS8G (8n- BRvqVK/L#=/U93m1b9:9IGQHΞ聝@j;޶rO-zEhubAw۱lf?}Ir,$ё'I,YG9ÙߐB> }O͑%Rgtj,177$C}B氃BmnFHm4# AsmU50 6q[PC?y˽6j@W`` 櫩;mrJGy!ORt;LB@%'j}HhmjZژhmHHKXC@`ϓujXcKFr89ֹ3>lSsću*U|{sy}?9^x `:&k\sqT|2)"ԴZM>WAݣ>x#,K/v^6ڻ%W%9sF\l{+F-&i-{Ѝ"ER 3;:Eox[V{0muj^OQ#8=y>{M2+n{UȚXҳ iU+$\ZOknRyfg1s\T>i7g_z\ދ;,wՆZsJMcP}mPU.^,^KNJA8& qo8PB\/D/qჱ1c3kO5){@(졄vLSKQz߿'y3$_eB 9hziʘ7Owo%f4~PMBNt3(Z.# {Bgɨ5Wj;ڜj -Y0gUS,F BǪ}ʞXR6|à gt% 5i0y9$]oXU$՚zihNd@&SGDY|Q Ro=hZ˫7VZ ^S^_#{(7uC[3HAPM$\Xv eXb PA3U#Q:ޣ'ttO7]v\^U Q-g]_|xz wSaފìHaLS* ˹}jir? (2?\F_smB=^?t<|\G``hz,FVhQ #3X5 iYiGFjVSId_FjOƨKv.dJSC8:@UBkX1cVwQ2F̣c}0s4!={SM\oZ$LX`89jUˎ#[k23BƂ:_l65#e2@;Bk :N?E4N4VP9bGTB)FDzW11 cGX%lo:Xм @b)S1߿ Sgqz͛ F*p!Msex|*7 ~b)}RRPS `y&I 5Cs:L< bq)F l! ,Nt!q[5́×OkiZbo{}r= MIw2jM+QMh>H)A)|$uQŬLO1pIt RNޡPX'*" ޲RU+EKK ɂ*4Bǁ9Y'熒/'|x ‹(~I[*UbTEr"7Elwǀ*{+:5J1|h& ^a Tu"WJM6Fo01׈4ڮ'v.軽&| PfOt4w6-D9d^&Dd ZU$L9`S$X l) [>i)8[TW@(Du9 FLPk\YP{jpid0 =<)ڿB:O@Sm'u@R ]P@BVTJlƈ釾,PG!})ޥD0{#bM692%%Rr! FHٽgj;e˴Nu4rʁ8:ORGSo5bBfc0#ʼ/WwT]v67ȰjY0ɘpy|OyL>CO [c5+|_ uw D NaNE$bʘVq2+71F`t9#G#Jl(\){nOO#|t7X1&Kf3W:M=̶=K /t ?24(X6>[U=X=+YFPޥ%y [wk 6ko h -RV m]Hp6J&۶6(툲#X 5Xb,)X,`m;pzetd-C4nl_U=}~:~P[7eNT]RU>!zzk0jUlW͉:F)]!VyHV <2YWF4umjƝS䛊Rη7#}"G-S[;XaxW6A :#`9kE(!a >+Sԋ Mo+ƌV ZO'`I:`ET}SךHjH1 jɉPE/^ZK4XcBp8 =$jҳn'fF9C?|V`{2mgʋ&I}/ IIUs20%SU ULROIֈrɀ:8i {*d`lDjj\2edc\dAX jW8Y#ownS5N#U|璪_B?4g˨|QxFJbH=#St}&yV1ȁ'okL%11L&%cL]Q _#,iQ;M865PupmEL@* }+Z#q%\sZ$U#?7sOr}.g >Y~4#{s{LGmdrvy}v0;̴]+>o?&{ՇOw1_"@Ie͔Tr*gd_v=|0ΝFzkF{SCt3)?=ݖo h254$˝`9{a4<7"؆uDmk_.; v wa\G1Ŀ>ݎ'?+z/t^)=CƎ?|e:ΏSj@p3afI]_fWFȟºׄ~=Y}7ayU(Zg^vzc|nbS{z,?nCn [jdN51\390~/yuY||@{穰J@I(߯1DLzߨWHVaZ{DNc#tAwsiE@!n4&“lC55} O,f;&M7=ۜ)ml;#?,k=}6ۋ6D9T* Uxֺf'W*;w*;[*;d ܬ/&'#~]f!tQЛlcp55.uRىfMgXJ [] `t { gT2 h0d0}̥zx?D5bsV$V%7&͎tԠhBOK:|Y776'#$^.#Z- ɷ?(OյkOEvbwH 0$v`S4,N gu 0*l4d%f)ǒ'"F1XX\b"c֮RNX{_]XHu7H"]ҝWӎ:*^w_~T4'" b*ZAEWI5 :I% aG_` ֎ ð3w6X4YP$)DvDal|a^0xQ0(A;qy{UxKSv{@\6IIZBDDb)c#Kh]bibi6lX)ZG-.ZrI6Ye5]͑oNbFŕAcicƒM2Pk^OT3ZN'/ߥ+ 1sn(\dp&ѐPz<0@yQ7\do=Vda͓ jO 4q9]S)` 5Z+Jr"K>:.!rl \U}tHZB@Wj@ Mxzyԕ1:յ0m[v8~e#}]vK:sxTT!}bq !Zb1AIJƒ}wpvYNow(HH|Bi9kxPR8HƮ dCM?=-4 `pP]>$%GM 2^Yѓ[VlܝMhO(Aynf" /-WBCʊ(hT-<fs=O=Tu01` 5Kљe R1>QXVչ47ql*SIv\VMwO!@E-4uʹOcl004np iU|N]Od`lwx@h-P, \f,b@Imku^&ĔMMfR}jwm\=̲qƞFOUcrySM_ ^윉1rp<6&;$}g1@ɧK?p;IDO`+PsPAYAbr㬆AHT\>y/W 'Zkg {~&6>6j[5Q=Qr܃X)[!mVBG-! i0Y OT0q>N=NK=ao3p-Nj, <;âW,3s"-}|,s G>]̥ՑӓN3.>;zTU.Ǔ+Uw?fuJ EV0PtMO^p7E"x?-9{|w,C{blio_E"Snb`El$"Å%%o5e$7N\~wns{L$uO$1nܽoD[wY?=vS*Se7s޾lPXZ%@2ݳ{V%6Yd-up­.~"MhܛX9羏H` ֖~.(3BS(]%dmJ1MXc(!'1ž8b .(ˉt7dg@wo|OO7>Y9o<{<h٧}}Y?Nt@2|cA>@lujai<$ HOlpy 9h5ރu5\@liVY! %H1lu;'eOV.fAf&^'^쾚-1?| 1d}a`/ BUUɔG-ODc ZH-Pj$EQ~BeT,5iRRYĄIr1`ulت(k?:n>g:e/Y̳":ejU.A?XL=5-l~w ]z5|lX*yo=w-l]<׫ˏg^6V߼y k=ti|dzl8OfML}Yd\婧/ zQ^)g?ע̏?]V_7޼$4~_xEJC~3P>Zm#vܠl]6:{!_HR׋R~ԓw|S1X|RyKҲH ?,^db%$Q-jbpuwpY2W&RE{h{}Yǟl4Iuj_#Qd_gEQfBʛsC-f C͛erkɞU<-Cuߗ~_m﷒o_YJF5ޮ@_h3+>5[ a&Ԫ4 U T ) v.\rN/ ׽^iHb"䚷1W=u%c&Ȯe{* `@WeԿoxސbDa##K?q7Z1]"ՠ)l6 Y~Tk,FHI%[CX?cN\=ūFZ܈YZ 5U%j(KcTm" M=k|SjT^Īkj(%5 qAq[ɣZgPYuʆCN-u-6N0vTX/$N lJBSWb ZM41YxVtY+ .Z\fk۽(Ls$)KX7 0F sh0ۨ 5يOQ>BiMrYXn 4\+M8)Rk̶ VpfxE0e^,iyGsC4]/SuW`5*-eE~Dna=X!YgSž +Ep\H \^?A@'` x(/rZZlEqhnYrmЊ|NǗ.FyR.Oj*t! _@8,f]$ Z%EX#r>zaEf~4·qz8$+9ft)2mdU<"3%6Gv>:ˏW_#4TI#pX1:Кtn5ONSr+oE.yXrǜyĥMaʵ@JJzɢU`WCr-Jp-˵`k=&jg @:Vhhޤe*`~@̰*d]zG[5n_=rCe|7^VB~(y͊4A>5z cpi4᪊*]]O(Uý & הy|/)'Nנsu@Aչה\t UTS4Gĸ/.EÓcZxEb =^1;]'ZnNXǂuJ?|YIm>>N<'\0ԋ&cTOopz=;[.>s%(׽ޏϬx:͟StfFq\F.qzy}L*[ͬ -ߞ(W|`H8#_*n(L4h K\\OwSn/ʌHK "};Z⻧ W]im?ѿG3]0]\<8ܯ RtbVǰ;qx##_x}?0[9Lퟯ3tOBF?hO) %G贸aGq2dsyT/|VYgʩ*pZn#qZ@▯X\_"W 4OOX031c2x~S?.^6KB9|܆-0~)%ş̓Sәӈ3ڶma^o#(= g/\ŒY0-UfA放5P+/[uyk(Έo^ξ2<}W& 38hf\OE|zHzx9 R(^z9Os͜p)' !2,8͙…O9YNϥ8,zDH/eEˢ6Y-DzCλoݞ 4zkKcG_ KT:jbc" O$)ISWg }Ӌ=ۇ^ FJWcoA;ћ@reT}3r7uL΄ xťy BGEFLRBI6χZup7ЋZ%UHce .,%\TS9`;gd%GoY&k+:3q)MgooAY%bYӢgZ3 :tTk+:Í폤k7T% 0ċ z:{'=7K>hdCPJFLuLR;rFe1˝c1K>.rv ]D:|e,ebe^A]{k[*sTeOZ|09 CօbچmSqb[,Ԍ%7P6Ru9U=fM5jb4bX2j5^]̝(iN,m=ݟOF/dkT朩͞ugRٓy$gd8P~meJ %o/.֮JW ^@tݟh#|P>>8v2wR>˻Ի]M >X\R}uޚoCvAR.萴bN}1 *Pnb1}7_wY"pSP^M=?K:lyܮ0x8*8o5j%^KuQAX:fh.?-O~a8yzTsu32s4(A_ϓdʾ"doThW-9zr<}ŴlSq`ޯӚ̀ξ{7y}T;ܻ#ߌPHӚȩJsJl>H-l&gY2bcG1iJfq%L:/K&6ZT_:ؚ% ;sN&"btCMr>KǩQ.USbʨ-fh$Z'wTimd I}0Ng֏kʄċŸѩe6Ĭ%s)XKҔVUi$BʺVKW TPbk5B,RkӍ7 TugpIC հ}wER9XHj>kZuqD)設w%B6K Azܺy,8xuM$6U9IRS)"o'ycuRԓ4a5 `TeJաejՆHGY붋: y֚B-|Sh!L]۝5T6bH!S`,V/h6APLjRӫ +C':b'G]Ws۩%_0JKr:R=| 7yJj'M܇,^#< !C~m:?5B"lViiTZ _ ]ioG+1SdXcװub}_&Ex)MMvuuTVe(&Yo3o!ش &"nri4f8 NV |Pd״29 p( P+Bh2j9hБͮ5TmU ?DF0δ|rᇗҵ:E6a  BhdVPT2lRQ"$TNJjT#KP%ʬRX6 -EJ?}m ^ 5DHzi-#d H3*3ՄHvHp[ t'#`dj& b+', +ԷuhK*k-^ #j8E&vSf^hJ"R3MI]"@;v!4ލ0R&UZFYbygC$ȡ@_%LAeʩD ni5E 5nط=[jkfxfHB$f 8$[5u [IQ̕Yi݊4K}E d#[]ĨU]vM' BdQeNעQBec J&+QjsUC iWaTa3=!k8-)z(|ߖ"%{ȮZF(]X0&3P42kDx9;RGY$mE<1D{ |j#18d* $㶶"Ml8B[C)T]U9<m^g P[aP)T-b}& oCz̀T1*JrGsP C3Hу ."{>gXӨoMĊ%6O#v͆jM@- XlhE,KZ%.0n(+cUpbl?z[P֕Ie#+:'(5tdjijlVT kE0v ~*[m@n-pSeccdǂLEH[ ʪ.&AKBJV,+QV%c [n6[]B ݣ4kNL *`ԩzE?Ï؈XJ ɵ0ʕ:•AD p߶hͪhE6][@.@=Q[2bU`AXXGc3@~p)y$n섄UT B^B0{t {py1F =)TL}lD  Ԥe@p?9#g [|Kױ|@@ @$8<4UYW@Rbn3VoNH ꩅ"B! JTZQ hX0yޯ5P)e-:bC'$FXAVa` S\id)ZG$]d<-lMH6`S`vX#cb^.dp9h,'%x_&y*Y,gJoj UCb @6Mfnr% 6gP(C])Uq$`t` 4] hۘZWWF}B H.@CI+U+\8i>H]0&FI-XAW4 G.b\6k EW`5ĈɶB,Q׋BŢ<Րp,X` ꕩí=-I09Bl)Gq[N]A腾'Qun J:{ᨇj ЀZ7H\J;-\ zZލEpsaļ2[}c/&w59_z7&ԍޞnL8nM7&Z8}7&X"wc3vcrf.[J>Z ~(Y 27:S+ ]۠<5Јv-Pfr}bĽFHE,g8Y$"qHE,g8Y$"qHE,g8Y$"qHE,g8Y$"qHE,g8Y$"qHE,g8Y$"q?H{ m{-#x5"qbHREF|W{;zAa6^'@Z=R Ov]QCp/S3+qY nyMú'}G`7Yͺo}u߬f7Yͺo}u߬f7Yͺo}u߬f7Yͺo}u߬f7Yͺo}u߬f7Yͺo}u߬~ 7ۤFr{t0WmV%gAOP}_=Nwa%vzoo詅ߠ~OY-Bo>Tvk;(9Gәƫ*}i_Ffe5Uƞ瘫D2RR@DEZd- 2vkA5̜{S:udC"BbB&IgQdX}UZ D N:+}G9߬=2I7W׳a>9>}i%~j}J9_WWWV,yjڧVVfO+l43(|O 1{Nr'<:>~?59WY2աћo?ʭoa!03~&;WsA%;rqg˜zTĉ=#ɊHn0휈[jv>ttnJ06KR(ҫ˜!l8n_~+ŅF#"M:c<`u>p{m ظ|ן.~;?D+J֭GwAvOc/,6B)thrXi-yƥsc(X>+.1@;s\NT0oQ=EL~_ލ8-Q|^ +㗄Nj=9zh \|GƯ(=hCI7{;ڮR-x{!v}5ʷ$fwB*\LJ8/E#6smogK)hSuyF}͓"i%dK$D$OLAFr)m(𬻜ne7\gg{@k1z(}By;-%ԴTA#w<r63G58#ThvY wx` ~?>8|0xۧO@K)o'Rmj@5 1/9|UlB[BǬ7d[`r9[xx[a4CI#i88`Ɵ|M:UdS!]Eig{u3~qݝF!BD!OD*A F(w842]Óݥ7\Nn~{ݐ<ä[tі"/,j>?m1r.xNQ eRŜtHoǦԼ/zjTt7]Qұ<,ݳ:c{>w^^`X1~"mYGӕu~<#cQFi$h )(BY #Q}rA?Br-ڃ6]-;SSUTJ-*o`c!PEeЕ#6,s9\tS> 95XG'ZQxWe][/{ͻeg,*G钧^G!Y]<*!)l99VcuHULαz+!&WK5IX I[Pѧtc 3n:B ?#!]o#W m>m;\Y1 E[R$ [?e9=`4dXU̥$I2: e 4m#Rn4Ok_Cz|)ykiޏΦm>bWkK5B7W5k/!ݍ.f]a+&HyV4|U77.Gz23K 2R 6̼WW'7-FYXeq/oxxO. OD-^-`;z7 h\|u쇝['[T>)6,&b5d#e~.Ъnw=}yQջuNޯ4?D H$-g%i UZR} 1%{ ៼2\%?@'XRqX cD%Y'\V8ze02&m7 0GX'kkC6P[)RD+l\z5SvVuhn߃Վ,,햚R6?WJ[`?kμ @,;Oy;pO1@4z /I EbwOyKvpr;OVuPm}r%ԯ>UvZ,cC,#فyߏ~]!z"U6,`O0B;}N_hmYgznnQ[rC;&hW`2ȴϖye3$s1/UC'*'$&Y(&z-!y;:(J{uW$E_e% M8Y&νu\%(T[NdSkh*1%^nd N/=Pr"l4r_3# tBE"=#1˝/eΫ|x4"\T̚3]|MM#*GmURSHnzB‚2* 1)'ss( FIThy;ɋL2q<<";Ί~K]+h^lXTR' t_Od/B )EhN#^SQߩ[v fI9ATk\Zn^`.kMJ6N;`h.Q6F]X.e-VY{֤xjn8lE9F VhZSuVCJIR#qUg  Lv>!Wc6J(&DM *Kdz(*`$q&!m`B?U4I"ː^8X(Ιjfi4AL'XEkFEjSmզ%d-yJs8%mC[}6?š~9559@3r/J,륨*hG\/>aiV}ogpe~M3fK'Zl;gKW[nKg뗜htBJBn1d M.rAtYw:W JyDKQ@䍉"p<$҂94fR@z]3ק:m`/Q3#a\"hְ2t?M& i G?k&x|hW-Ys_M6Q#(}\iq΍vkvS{,ˍhxr _YTNr|awʼnʞc]874&{+78wk< k5ct[_wRd?|S7lgjI9aדQj8rs,Q`i(Kӥ@̤JPXjPtHDh%v֝5c!nWNۖ䦣jTtSDk|IM> Z9ZpIcp!["QHZe K*4B_\!ՉWZic0FIEΉm HhM^>cm}SD X"͞¥saA@6"Yiw9)z8"8isCѪazLd-{Js*Pg{ ޵8v#׿"̧5~ 0v H5$;0XJA{WwGjmY{{HbՉIrǧgfbWoΞG-2̰DLp8s0 7NJpw86u ;3PP/ewq'4fy+ ~g"rgj X4}1bn֎qZ :P(*cj4_h"V$y'"1$2>ڤk 03pqb/U{GH1RME&C@Rp2z)J%] "3{/&Vȑ)Q&/}0).Zg"c RvD]ABRGyZzj u T1(PIR5(!̮Ԥ|0 N}5+}Ձ 2k cL,l@Bd1 Q%k]I:xtXJTj<&2=*qJ ,g%A{!V̌A h<ׄ!>dN2 r!lD%QZ&Lj"}q-B̔ C}ՋO;]oilRWA[Uc](H &TȀBL t ! `L8f+é!$[LS&1`2נ( 6Sz{Okt2loS*Ѩy rLkQ=?SVe딜&lQiZ~;m۹!t}saqj{gís{jWm:]ԫ4MM}s3pV=żjf<%ZNnQs't+j OS1n7nth[u'"->jU-]UO&-2~e +-5=FŲ??vm鶴]V웅;663.?z{]l#~ y5xot=x8/g4טI%K0"L6[be A6=e >ВJhzՇRgMv[%AI%0{2c$i]@Ok#d$ ֵK7ˎŇ>#>ގSGkJb %T/<a$xl@V;f>cJwnn[+\bbTWV" DGNyh0`Ac[};mSS{egX6MkEbNpևcdRh6DZ(2&bxΡ:zOr%1W":[tJNdcJƚ! 7VstGfm^5PPV7ީ˃/oES*K P.V [c(&%*hUSǾl#=2.hIQq hY5QbMfQCвu-臃Խ5q:Y+1!R ⵅmDtH%2b-Q EhI#^Ϯ? w z h;"h:iE~M-ڳt "ӏ~C ũj,,* rԭN Mѝ1QF=f[$IUslnw&4J@_kcĈ 3r)TTQYT0LN3gRB+*m^vaijdˍsZ}RD]oϘtw]GhY~e)}jΜ~ՊF5Aַv| -_-`pkwwoc^}7\ҢbewfW!Ac{y|{;ybWc9jj}(dCJ褲xO$ClJvOvɷƽH`oVܤi^4f{k̍ xR<&- wUk/S(tZɄ*d7g9,%)pSvǬS>-ZQ !bJbk;]k7z{5!$8-:*f PYJcXˡHó1g>W"6KRoKO_d%I<4xyTMs.Tu$i s+-C W/ϘWU7L<)[ RW SmBB: u& "c+`$/.6 ?6̇x ?Ŕ?<D:=|cQ$))TVEO*V>֒ IUM+OŘ/(Izi&p)09 mJP Tyyۚ!33cRVơ%_u3%w-:X1_5NYO 0o@!ϔuV!XM-JzpbԁxDy?g%:= eZ )՛YdW4SVVl^㱃! չ}c96KL1K@:H! !dkH[tXLi,{pc5fQqqިk[]Lǰ:~uՌymy@6/Nai_pzz-9js`6i1{9ߎ:ju1u\wm|/NxO?r;u+v|(',U|y?d<%]^v@~1ǎ7). ?}Vtyo4>YcʧG |\k( n_b>X/߽{s`PJPQdRzsS,Z_>{-ͦy%cyє>1gFMK驴iYT$\&-,=IQBڛGJ:\W0߶V66ڮ=+ O(TvP̊XC4äj}ֈ}vl#Kt\GJUBJd1̟Ƽ@k%*Ie'vbGpsG?F3[5؊2YZ8ׯU>z?鋱Y7u,_uGfgOͦ(aMD;D gkx,8e 'ǷÓ ~e&bi:a43 ˆxҶO*mUëvwFp Yh(>;#y+ [QV$'Y|lap*I -Zo%s38qdaG.sq{i9%l#D  +J8WMZ<0mG3_ps%m'w̐|g0ܶmb3.toـLp9`wE)8RPNI)]v4T:n!O5& "[Lm)zS %Vlchka[Chkmm;t`I$H䂇mdyFsƓYdB@*NG$c&D]*-7FЂM $6yRp%ZbVS]v%Ւ% c}4}E7"Ip ˘L;^j~Z-Xj O!Th0P*#h%#2ZgVB;Hl5m֘<3P3r`D#$s:^Lv;[L^RۘL?^jH*]xs l3@r Qf$B-F"!I7gݺ&' najnyyPEI,b<$xOdZ" [y- ,FotIq 7CW:7 ؋`&\גٿ,AA![ ئ5znhYsNE/Tѓ6!FO 'Y~`"e,6sL}JATS1\e~yǤCuIJ+Ei IJ3)㈃X 4GO$n-lvh~&vp$A.r Wl7|_uw2]?4/Zkz$r52ʹf):<=rֺ3:G y =䳶%nttgnvv=Q=`fy,|EHv5Sn|}5;=@oaE|~x:As)4FEV EB0׀srZuUJO|h~YovOU}6%ϕ:/i ,@,s!m)Zlbb.}h}P ފvubhJ`}l9VĽ3`NQ}֬qhgld:񢳑ߓvcoe/ϒ{#4^ aokqa&'#U @ʠ.2)*E-[ZE\sc3" u<R T)v\ Y!G8-pӽxR鯕sߧ?W`w%yS *bF@d ݵZVQ]q3WGX@g\+x;)>BH |Nj!<-ܸHiur7T wNFg2U+)d(#@@1)mSհky'f()1|:de_P(Jf0EwzҠK]JGSX'?ZinHmٵ]vsxB 1:eہNɈI]u;׭ISo]RtAgcŠ{cwo_r]̰yNuOo% vv#<#2!>T€F:VSi^yLa$c>g%MFޏy(uE/e2X?-`&Ah NjmXo,RSWG;^԰uA`JorlA`vB>z-'K' ~H t7R64o Ϗ%ҷxv-J!zIн,i(h;mw{77o7%z7#TJ\Mο*Q[ši-G Y+:i:CZEST1A?:fyA,yn}( PME;blszvZR o"`rgsst"S~hzK>mEaERDVnJL07C0 tH/3X#傦Uh*T$K ͍#esqL]W2DސKQ zEĪ"hL^G9?;4VXl`΅dA6&?ZTGL4>@{Ժn!N b IF$[EFlj962IYC~ix jsIZ^- /IFr%1!Ƽg$MKv+.:`i*ET; #u]tmV7Êb-%YR&g ꊹmH]%ZlnHًI'h\F$9v_JqpzB[sy"!u/Q!0ަTn4+1e[kcOPk:g`aoCk݌!e̵&ҮD6⪂'&M#h e+eDֻ -cgF|$\[dy^Wd`c IElZ6De(ca;>٠D"UKAF/VZZ1p(-  # pV',}⣉yT(:getAeq4H٭G؞8mI裣F?^mԠzQ:QUw|uti'ؼ'lj=%/? BN fpP&AyY  gJX6lCuiPq݊٤PY#T#j$ !T*بUDꪩS?U)wJ[|z{n4y:?TH+ǭU^^(O@c}vxJi~v잱}VVv?V</A[{tAn#`i#W{8b OfMA%'e쯷93y'09uqPAM:@ JUB4¦ZDdo@Tr*{Vy-޼?o^͟4PيrRr ]ZYώ-: 4UWQZS .R-?_Fr.UɠmK!+hTI9ب1$؟9s=y7rho0d0]pR`> I" l*aCdHכopmzpάso/ X}$.=Z-vk^o<fs|2S'3YH UOJXvE`E T#5buaEXh /:骫b)B*A%]fBǨK9Y}_ko[Y#oǗkd]GljzV;(w:'īz0ZE%0"Zk$DL{cݭ klim"hF܉]ȓٽK)₍R) JI F18UI` _cR!htrAU )&cժB1^bҹ k"CR+ t֜ U>=o/^.֧͙_h4L/5Y?Ȏ/l:*`8\O'' ݚdrV Lw]$+ikLIdlJ*8L!lFЋͽvnՊmF4ŰX.ƣC>{cS,S)#YnVf0f=enwnz~}g}0a65XP[Úb ol36cxۊY#m1bgO|O,݄?)ӷ-.- EF/vh^͝%uv~[{`/6 ׄ-*VpUHŸցв챳Y yޯA_6!]k'qzsg\X{֬؏#W1ǥLϗ܁ߡFd%Ž`KFmKOB߁ʾ1? ǔYX.%jɹ&APVf4dcd/2;hs\\6rI+q}5<+m ?iwl$aLJHjX&6;BRETT46 F68Đ%PCtTd|1Q>>Meewk S;NRo÷J.{st=g36_w=2PL?*V69 (Oyr\H ?_,Gp_#މؓJ]YfV} WEJ"PqZIJާ&ǾV/ɿ^T] A XG s &Y|2rb;EMɼw/K=@rŦfvbвM[ƮF/GTO\']G7]䰮*bpNTV 5%Q A5+3w{h>h^3=853JE ɐ!3Ķe.{ٚCv|񤅙+ӣƞPKǝƄGmUu_`<%W;bCY:ea%(ZH:i c)FF=:|5;l2`$ϐu!b`)D[DP1K-FڞmjzdbMvioΦk*Aډ8ZM(N0%PTUGVKz>Dc7!=t^V2߯3=[-y;ɴe¡#G"Oi4t[dn[2]) 0 d'(luƑ@fRvx#+$-9Od9(hbU BbFT}!&6q4#L]a|hOP]T.~lIfH`ئ =|g"@PH'7[+m M/ۻLʙ~sߵ񽞔V㒇:)뤈qiEoE1(6[OU3{Y9ϛ?gRLՈqz}jg"b0g%_}jYf9`NdW۟&{?Pb~}x& !-jiz-A33UJRodwEL'c4:|!2ے@SF\n s⽈+J{8Ex"v׿>ܡp IB:d e"Vh%ƥ*"t wۊc=A1hwmب_࡟ ZMii%fu: @NFj JFB6)mKJZjk6RH&v5 &F .iLN :ΌYs:ȌRIȮ~Nb` RFX|&]v)ZieF1wS>3|!ݝU\ozx=yxN$/p>y*Xm "|;&_$ӎxi WUΠ@=Iᾅ :rodZBRdrXՠQ2 Kfӭ s+mݗ{01ikL]rݐ+X!HqÁ60xNR]&崬*0P$ciWԇIӉ'"vܳ (gXA>e ,ז 76" v )Hu@K \O@Oi0掀Zy=F20L(KA8j+a&8σ|ӡmz`Ma"_vgvPL*߈$n>\&wkmɭJgtsݤjJ`vm9Ca}XBe26Osybfު'iқo]0wo׶f'pu@f=^`qxy-\33#M^rf(nfǚOx~a8n\ξ{'jSǃ<5'fS'46=6b˦^N7lP';<[Fc tRXVv U:S\G+AŎ=( 0.bpq1 N1ЃJ2ML&Kَ8irO"}||$~$?R Z1xyʇQ鞮t9CɃ2BlG"%@AVM]fн"-V$}o'.] vrs*Q?ݥ)V-ɻC` VojN7>w Ѕ^w7"HبPnYgA[2* uACAL3Ԉ>vfHQ$A68#VxoMș%jkG)ꍎ&v&mX:}۽yI4Y?kKR5Z 6GExSM>تWRPp1ՊA\~}C8gKo e՚taqjt"{uX<;ٮԓ~genZEp9Ob@q]}VNܠ@"ecɌA.T{޽˻Y#:A(F@p.Z.g®$ C EQKSP=]vޟv^*3d k}߅۠n-)Yl,(F@0VRX%0#Πa]|5U "(BY]".ԴsRa%t +s  1PE-/..e%vH;f 1yLt^vp$CYxsU.! P:kg6啈@$ 6z x{)af}7g4P\@Ii<6X9{Nn(UC F5Qe-ek;=|p߇ؤEYHR %dLTH>]]LGW~ R;V%g 訲B)Q%dQCE[[l 5U!3&k3h۪mjyǹ឵䜑v7ގMuK4F[ELT%:9UbQFTZ:"jSQV9,d *NT+CUȐQVtO̻Ţ`8n9< >=# NM;^z9_8dIc=i1GGl瀝URتQ@_ { }$n[HY_qGgr/Z"L]k65B)Zc2 1֙xH]73+1`C>{:Ӣ.<u"&/A" b=[4||鼑|QJ`.b5Zڂ22P$S)焺$%'GCH6YKE?d2'"PB& ?^)PbLڪCgP)|1b;Ll_vztNl7ZWlVCAL% vN883f쳝3E_tn~kE/PO*}Lk&+bV*2h(\cɵQA=e(tjm3|rfsŊ02`p~7q6_RJ@$()iCrF1kLBb̯&<(jSYQbNcOAE%M5Rfܓ?,7`+y:0{bI4"iGRGZ1 14d (I9|1R99/-߬G\mWgJ޷qR3ٞLISut5YRrId}iY`9D %#-,8$ՉӓNݛǾkErmB޵DYIٴmIRXC4edI#_o;JAYhR-B eIe 9eE Td9(<:bnX=' 8`҉Rdo)l%R2ڲI#VJ;F\4n;N~͵G%Y4N>."GvgBɔ8֤vtTpUᯓܼ-π_I.߾gfQ ӓtUg6Toa'Ng6o0ٟkǠ*/o1>z r`'iNOfY>œ6Y)e;Hi82>'S Gsi{<;,mQˎ9^姦FO˯s_sX!]s(voY 3Xt7phcj߲))tOdS:NG>N ]5`wDǨ0dțGyi; FѸwoMGxЋZxZvQ_ GVltj2G~ct.}eYڱ7/egmQ6U=7*&$P>z+%Z&7RI[52JlpL0T&,:tpEo h~GaêRwIB2Xɲq %PPXP&PtY |ErKm+wqU!AqXP%j{˃>^8!"G-@z{ڧ24beùx -#[IQK1 @ce_ofϨ$"dBЕC{H$XY|gV/k)JɌSfFf:^kۜog'i$O;M,E{3 Wl- {>XUvVy;6Oy?/uqbgF{~ufX)?O'UZ&E4.>8pv:*ߐ%9_u+_>rkwN>oGsUlksa/~}˪KnAXaE_S"Uʬ>sK~}@)Ŝ2x><=7n8wdG 43'瓝ituis4RيdEdBσdzxrD}r$'uc*b"kFg]uz~L$w=_k]\N.'mT8 !B "\拯]*۸F{m/U.+eUA`dF%#Ib? >h`Υ4v|x!(ҔV$W:Jھ("e3^0~sDXŊKM6L=m[BV$l$-svtn2~NȎc\< w FoЛ!et (xHis}pq0? b]']FR0#vR!)+ %%bAձ0d fdo}KYBO#Q+2Ɨ1hp]ֵe,'.R9J/z8n#Y䧇91w uF}Z!sJ#r[*: ']c\1ܠ˕h7i'Z'GnQUe v8D\~jHCYd$x|H_ 7+WQ]dY9Ṓ/S%q"?n߿ w?"6@#DKoK:ٞy:=?RIz(Uȉם7q~)yr;`( ެ Λ j2LS Eזb {fpZ wg@(zO~QmxG2ĆQ)՛p(tpLr|vh(E"Wo`= Jܢ,-O5#B;62^V {\-Zj+1\U=^ N ݁Wb.*ԛvQTq+nXs% b`{"D85n&#Y6w9 s[)R2fl !\+![ s",$2]$1`%j >ht `Nn})boߪƟ]kϿux>[Rv$-Y^NI\s]9gYJ!'rJorJfK$` ё_-l%7ɓ: Lz\VR䯋K,WJW]+:TEDĀ zT%쮟<٧LzA R{IQfbV0G(tQ|~iW/U&?)ǞKb ,IkÌMe)1#}0xӟ\h6Yڱw&i"MhˑRO%j =i%>x % וF ]mN(DIVY et,*c&;R{\%Ijg2f[r2hLwNd .eKbWwf` _Lc<~7452Dc<45v\B.я_JkyS'$? Si,jGzzS>B,&Y$!d/׍gۮF~:uq廛(߿T2Cߟb @8F.E'ֽo \.w\/Ag@+ |R8 N荼أD9DQ ӹu6!cV3neQ]9Sv۰ZZ@QvNr%CoUOTKesxd}G2*X m)Vu*d#\0e6XbNqvBNZџ]d%["# JBl[ft3[: A5jd;"!rVCrj3;#ku uj_i6R.3d[Ǫq0Y5g>* Z0'lu~G[$"X[y)x}]Ϫly:d}AɣM;p]ĸ0pK Kmq2 &NG/r ёxcR-,7&O[nN= .cwv)2zrr"F)KrTU'i,v~& $Sۨa ̩ Σ\)V[<:jTU眃uZ6HVnm q3|>UzZ#ހ8h.$株bxso*ƿ{udW{p C:+tIp^>H4UmpFmPmkW)W'0KEƚެd*N30[AO5Ȓkq >!66NUB!mc0K50'.vܲ`=ȇU n:OԫweWT&Jp޼dɪXs<{k~k˩3Pu`K$KO-Kc8܎QUʛ F3'>;1ȠErvXpw}$ a/ZC3NpPoW8stn XNzhjF.ti'U<W M)d%,=Cɀ_ZK-# ?x#ܨd^f._&OxB]=>YK X rC 36^5vuAN]|l$Br$>/z"|ZUb? _8&uF7|^/U'~Z^b&K\LjROEՔM3QYư`+#,X,pb%zgpT⼝ |*n&y G 5 PLcQE`x?u|mc#e6E [C'`wqbe̺qs:\ܢ5 4 l:Ux `5lhLzNѵڭSxB z0Jn f*Q9f&1묛\g&;'>u<ыMO Dft rFK&b|-mỵD&qDi? yΆ~?]?q‘-6٠MtՉMLJ 8w^&D8F5Ҝij!Xi w0txyO)v^zoLۿD ^fdpzL珤E]ՈLW JNIz^$XCcn>~yv74&tݤ- ɉE;`9@m7]89}A c ݙ2G8,jk$wbYhdj3E8/{:i* MuafY*ݞ \GJE]~ 4@CupŤgh=GzGe #ϔL[s+acj.&ˣ`}nj%Qe@؛*>F7_1EP1@}>x\r 2 ifj:ubPq4j]a2d.>KFg!cu+m#I0.0iy ۳h`ixx0RD"%^HRbJ,&IF°-U/2#)uN+% $(a8 è> N_BuK8:Fg*$9WfwA-󳓳oCszR.4حve{ik##,KꨠJ6]AҾ6miw-V'uwp|S{|#QT ĊId2H+zrU{" ploǮ5\',j+#>Z^+W^ZJId~΀Ul;Snj5ǃ-5K[9jj(֠Gnz& xG %EĕZUrHo؍hAeiIcY& 8Glj_Cl c43vm]v[Ձ}@{w@ ߣ _Og_{̯'T ^yިyjf ջg'R5ZI]$cQDb#d̉ ޏk]I rv2"Id,11l:S(px$)v=be>~ i0JL\#6VLZ誉sМѧ\D ,"aׅb^5mspIF隷M@9!i5>^( L x|Y%fÓiWJIW >Zju"L42G% 'Μh Em[ isC"0GjDrGM`kiX+jy(Fx@@mogKrDv%Fw;Ÿc: 45cZ=kwF"Gby#&fCgT-!CeVF#dUv,\WO]Me+K%G $BJU*Ի쵳M&gng*@=f39YX[AفK𽶻w0_6,gT*jh u+ KegM`2Dlgq"9c@ ~ЮceKgׯ7$*trH291TDE7ɬn`){Eҵt(X@1dƫtle I$t.1ĩ, QᜄDP\ze)̓Jь$0L}qML+Li2{wd?{3ן˙>]\gk;6O/v:Lq&_=/zV\Bj=L<^LJBin!vdZ=,Wk'a,ͮ_0z۬{1uASeubc B)C$F3MiF\L@|i@Hٯtt\]%*hU%Νٗ i&(xLZhWPXj;tt?LsI'(k9Mo&j6Z\򖣔$J 2f$ BYsPqX\`R)iC0^%95٥ ,o(jkL 6:7,3=%w'C\OUHWx ĞE=<@tDޫH./?:Ӕ) ZՌlΞ^qzk%ZSUڝ/׮דJkV엮tNږ"|<35y BYke4741NC6 J/3^>@͒.k0\tNkn$tHQB`JrĕUi^ `(/Ifjo=XVp^45S 2HipLtuop*RV^zear~E/T"\^HY,M?l)5iΑPC2гVUԦ6u%̮>Zl6DJ=NllO \6Ӗx Yyi[ou&MlR r!bpD3# itt蘫ܰd.xp  N:c, cL4]malz\Tu˒CV855PLK|TOŠetI$oLTD-/53,rd]ӧ6>.l%4/<iDY 4IJ1肴9,tvK u 8yD&NN0HBO+sڱ(y2 hzwtTfuYUG!vEB25/>H:wE;Dxō؇5(#jw˥;Е{, k/zMUYs# iSgbf4~ܸەnKH#ƫK<@pw-}~s%O":>7ǤS,/X'tY\ğ4"Ւ:>{<#{wGԩQxkl,vm$9DV$Zq I*xKSʖ<& zg1Xɑ oeu.}*eZnar.ƆZI #Xj[)VO?kʃe:.ô:`fv J^m"`h貢%JnvWpT p噲% ^)?ixw>~ߘï#WqKG"/PbzM8IɃ֊r(._Wxx˸(}~EZ>Iڇ}9]<~ W]2᪓ n;S,O t榳nIГ$RJ5s;+"{Űge~Y(+_>uXrfW1ͧ)Ξ뇓_aru^˧$nNx4ʞ oO_|uQPo͸zŬWJٖ7Yiq.LxpoM]NgR-< y{ߚ#~iOؽ{wG >6&!\]^⹻ޝs2w~^#6@l3~d2ޝ+wt7*#{ر ^jGFFin4S<}NӗSfi6F-sIdzݟXK$ï IV|KB!pq<l1*[t\MB \LZ#3BK-xy`gÐ=rLͫ$ưVF^j?' ѲuNh4cCt%kVij+7gr`y1l~̙l6)e(K-JCiF1- :/85\9A`݇˫}LR͠bS2)F)3";]ko#Gv+|JӪǭ#dcd/F=wkHYN{IJMIÞPl!ݮu9un׬lQEεr5ڦVJ5{ˇEam5X)YJO^{[, $觖@`GI3s@i,_#vxo?{/m/;4b{w}2>ʒ| T(;F,>d۪"&mciYU/PZ1`]PC@R{Eo Q>rX#z|cՋ׸x~Bݖyk8;\wtP8#H9^Čl~̯Z;)o#$["axUwįrpX4WRdy5'-k=jJףI|qR&wQUy&ewKT<мLd@ni˿@ɛ77Fh2ɜȺ4kgGk)B1(sBؤLΪjmpc]BRRke*E~6ė#cH?w}6/ptTB0iךxr6 eokV*(d%%>M/TktQ|VNPfKU J`d!+eO2_ald7xI~Akڄ$AQmD_L%HҪN*=z !eA3 10)[V^ jq5ŌƋHm:Dq@GtiDpQ$ʓBE^HaSo酊+\KE RP2ZݼiVȐ ^#T#'SZژTiI)sV)%wB=9{Jv:b5S HHRh7Bjb$IMLN@Y_땵D͢6i q8SQD !4Y!i b!d `oHgitdmo?S FPJvڻshyT| J-j{ 6%Q2\MM, ;DItHij!2^2IId͍,TlVw W=)BzoP%+I5Y)(iVA imCvKRj+@J@lTZgFM(&X42kdh:@cnoWgٰ1ԛr[4J8oQ"aZk*,@hh > P@-(Z,D1AV@ [Q(ՂJ&󈑿 =,0iI:8R0ԐS+yEqhT;QE(X ;<N[my/f| g8[=p-n'FdM Q@dtd"u0OJj. gtB!]v#_V!(R'¦B¾'X-Дb2ľhUd2Ԧl#&H.묨TRXZةSTkLurwմ$Su& V%T`9$#+lKVj/Bb?:'X$M嶣eutX>9@u P CzEjD#~&%ֈeJG)~:G}Sžq*d 2 vZ4S´'hR kKJ /+~ t4qR  Jbݐ0e^B)*0>w>t:w"rOLV VN=mXީ($X}@@&*J`%l jq_;vCb'*%6醄"I/2 hPOX0%r!7_Q*R$Lǃ5qZW00JJ-@@i me&!@MAq`I/Ĥ:- Z0ÞDErb6 ݗ-QW{rZ1FMZ ق1hڨ 5يw͑ RϐP.jG0Va2´hB}4Bi},5b*"hkVmW\ Bƃ(.`ϓޚW7E+\P9D]j&)J` gxN`m(| [⭵wFś-lf7[oxś-lf7[oxś-lf7[oxś-lf7[oxś-lf7[oxś-lf7[ox?1YM@q<opQh?v6L`&obomޘo_wen>s}9S/ᗷ~w#IL`H0 wBl~:^t )n"%KH=!NGn,8g B/f :=fS ZsQ@ԃFu*+[-hmMTյaӶ@w͟9G?}X}8ؗyqǓ?oshc7WWy 34/_pv9} 9zsiKAJCiI~3@,nyPc$6x'w^ Yˁ,]p? d0PNYh:^/9L-"[.6F|ʽj&0ug( e5`pߠKRo;*e'\_@ySYg/+J ֯^6oX"pcy0tgI?/]6uj*S?7J~00=uz#ށmRx sQW}#FC {=e|Steή O폰ݺ@] n<.{_H^ܜ;R=Z1=ȿzG|yrm PW6S?Gl=]v,{\`(Ov0d~=KWW6ǜw<>pRY>p}g8>p}g8>p}g8>p}g8>p}g8>p}g8<Ɔ Ac?~ޏo+7 +;osjzN`U&V޵6#׿"7i$Y Y,ƀb;ɟOJl^(\ݶ{8d*Q来5$dqIZY0 Y!KK,ܥ| Zpa5W}ml-zvE#aLIH0mY8C綠Rznt7ݬ9 Kϒ{BG&qvBhy۔cpm;hlESUsٱ?դ:f#.~ '2IJ)!#3BPP5{yb8nY4~zZ(oyvȣt.wp€l-gBv-Ms{%AO}us4i.*v4J!C[SKLь0'Ch?݌%1Pg=EفlX:Dpu«22R6223]u,\Ӹ}r7 (&s4K)Hb̓wA(>)M? D#TG$MK9jOR&pPȡ:%iEeLhW̆^͚kqz7'_30=ZU;a|ga䩦WLmszxs;d /?7>CN % S.E/LA{εlřD3zkg"]jduis8r(:f@cJ9NAuoGnfٺ60hn#KAt64V&5WD- Q7RDtv䳴)Zd+jf҄n N:5h"qLId+;Y(J%ρNˬTKR _mFLQ+,G1*va-/Z^|k>k 5gbF>K`-0G}4Lpg:5\Mk]R==3:vNz){my1K=>V=KN,_K,hS,PST)}👽ٚy Vij V~={a]“W 4p# kcoQré--rK.S C2H^H.IL9T7 szrճ9[Rë?Z%͖fzKV5Hr]tj^bFS+V1^8aD|[i̬p4"YWǑH7&1gINހsz>>{KfC߆bd|N-Vʙ[[Ți9΂Q-vn>O~f]ƌ3:VQ&B.1Hm|,e_*F=)9 Rʎ@aZd)̮wzѮ[ҞWʹiRtpMo? U?:H5`6H]>LsZ"! ŁņٜYqj_zƞI:AcZGQ+,tpEsΓqIJp]퀻BmNw$k^Ãz{ kkۚgP8Q6 é1hV?Ĥc(I`OO g #١@h%%D^mpd R[J.wmY%!}qSX+a&+LO5D5jTcD#7&;FgD/~-lp5ɂ)МZM" pA朊 \S-iQ%8AFSVVB0rߏudɴ?wZs3Ť׎b-*`%n cNy#kU꽶OAj 5qCihDLʼn⚉:Q/u=GD @ HZ>j{v%=3p}>/ʷ()Aí9-Sr2*`Rz/)yR;(^NYqik,d.^)8r^U)8'|R^sIv/Z9+$@$UWk4M q4j|,6&RM?} !C_o- ):ELL9'0̞{LoH9jHsO%ͺs 4C(M$t0ȳl okq!lA@?ܕf,E鑍y7;Z|0|qS?ǂIԄ- V?zf?_Ac@LY2D"A޼a|򭋧7+{_x}rB~5K %.t0LaPgmig=noe7ٍrqkpAJW^PD@"D*K\HB9E'r\SU?Dp6ZDil4>(BʎZj!DycUYsWmEHɗ~b}/j{a{ akrO|5E![BPЫi#k16} DȮyB7dh9YWq-2jN2z'N6(+r:QpS 9lr,W)7%YL&`se'V7,^zm/ʧDۙRN|RVjWj} Ь9[)6wC f# $M#:WI[ D *8mΥ`b15RI%eETVZ'l!,*,L{4EcPИWcYs^%V +=x٠+S҇6'9" R4P6hEIFIxb OhJ(YD0%jz4$xm%b&& h&瀽l"#uJ.IcwҎfyMxlj;ltيn?#8~ d0pN 53tOw81ipUWzM6^+uQBo—/p795|0}+CߝNN4 ao׏>Ő~afu~LVLwKۏݐfr­r'Sa`S7y_b}@(?ǭҫ|cSyy.2P]Lm๒Q KL'Xf%ҽlj[xĦ6Fx"t9U*"i5)6IEgW#f瑚ڔ5!EA@B /BFG>):J א 1#Q Xڜ|.ydv8 d@:A% S Ϋb7*iUu2~#E^nqX[T+sf M>@Z&YC+.b &]WY-- cƒm58:\4҆Ɖ&\H1qN29*y:Zz̵ke)*RDb1YBӖ<50FI) @qE693S-(+Lg/>H73(iqѯcL`~7uaj{*?qmG_o OJ  oVi jx?Bnn9',A8ҷ!Pg4"tLY%%(\`UrVqoToq;`2cCDݎ Fȝh#G4kP4Ojф%K:`5DPTi-},m[i[ ?W)Ce yidhͶ0RLJIJQEmBM#7&?W۴)HNvCYe;w_!ⶓ:UvsbOl/,8^_&VmcDIn}_ׁ$'RN'ʻe4w]~oړ{7yuxͬ'o<-38hg2b~qRbQw~^k<K?ͷlChOo7彘u2'YjnBKR469^&glG-+#2O'|3i-b4|O[>s+'9?c68^OsӧJ ?]_zR~F'vԻB[r!W׮VH0:$V)"?x,Y¶.J~++~^ qlk.ͯj&%@b/Uu|SܬMyN)i-9~|8zvѻGD|gηRBc5E❥Րˠ] I!IX>:<%QuH<\KJ >+Mk]\Nx&ݭ,bM&dK }by096*ΖP1haܑ=7fS2JɧcYHxʧ|,T8 =H9(ιTeUN,,gmTMSKXd6#5YT !m |[Ȳm.LA0]*x W dzxW@T؛O.Prσ%Z -D| rzѕ躆1VSyxk>yO̺& ugrw' `c^Alo"8=Ps7x?}lBıIU=]agrDb2k,{_)&6dY|09m;,zBVOW#bu;t6C^Q}Ff7O٥C"١sP٧IE[JEqfIqm geSDcҹh;ݻFa]ˮZg[ݓ]eV1G_nYhyma1%[iZCMZDU?O*^jz;iak r?~_ZCLja_Ngכݓ뫓1zq6[6[oWf.VW+Ս4_bu~q~ӳ4_go={JH1,B&BJ B%ԱUQAue~RV˪1TE0ELYA= zD{xk4\eg#(hJ5^IL]sA{|kl xM%)hZDbǙډmm\fl=1jy2'Bmy#ڏ@{5ڱɀc5e1'l1 ]VXI7݃vNBUR(H1&f\Ar13s1.#֝1qkջ eݭFkt̠eva?klidrڂ{b*/G'KFDG}>bUw-.($Bd& UD먕ѨT^ˆP^5L r`:zUI 9tiTl3 \>F4.=D*F,D(J#]襆8v -XX"FԬ%ԅ'<}p*]19 LPOD$anX NF?}Ӈm|sҋ*DE)&r p.t#=>zt{0VJkpb*&u-މd)c P~D};b7c2 PJhkQerQł,/p`DAҊ\%""H$+csqzj rkkbv(jϓ"Z#|Ўy0&AG߷o9kx3J-+gt{(QdƌwiIO#MBFfGGEB@xӇ2 ۓطdի$bs8:0: cF9:V.6+Tn Y&-lXe5}p UE0""QJB5+MG5Hf=TiUxrz\"yw]f+>/>P:"EE4%JDc{7S~XMv&O 5DR2deCFY PFt z`V)l q,w[~ؽeJս ;ز=v.X: r&|7|JCiST`uE})VUY$z$D=e++٬→e g=  dbY,i;)3bHΓR'MX=FiZC-TvJ(`S5V *eCYyJA;]?ܳR|]*Ӿ5@WuYaYX-5LjS^#b)נlt9lEtDEf;{6M$XuI5QNɇhsy$g:`f22ed\$@BŚ9`,\1nD~A!dd=*Zɒ5ҕ , 5i[mFl)QQ~I2$/C2xJ(zYe\P"h KTAmsu'saA24 5sӱ8Xynf/Lږ$eEJhtMd2{Tx8 ιQ d /KF/+\j`&hTi K*'tɸ&UInҮ%'uV)?({]}Pgzʑ_{'!Y"`fg.ftK1vǷt߷xd;#ْM[!eSs䫊f{,|Ffٿ~o?.3 Ո/QO4o2`?5ݐ6X1 ~'[dqW< l9;>9i+G"A\8/bFgW~i&Įo鷳:?>}?;OoۏHO뇷d U1mErWN{%>"ny[?ǵ̡UY_/10uUo։*}ҙ?{s||GY_j]aT-eym |+F,`bA ZV&(D u/̇o? !Ua\Zd M$N9)-YmHhĉºPX4~N䑤RnjztZhhɿk_~ZrS쏹*_)+eKY)iY)\uxS^G-ĕod )kD"5TZg'X# ΀䊢uS#sdofƥ+J vL.V+ eleh(zd>><<|#ewZmڑp9O@ `qoFCIf/L96l+ Ee`6c-: `0E*=3a-a C-ۚ=vmįqmvѩʎ4Xjg(NVb&MJ52fc=wOVdce]5A-Aa~_]~w#2)[}8[cJ#J:-}9G+wֿVS6Ŕl#$!fk悘J2T ^\)ǘ5:UN-߲4ftI8?y4PȹP"26h}&g\URYu!?QQ*/0KP@@kr:OuxnV/(=vI& HՑͩ: Vul6v΄ "Vk.`$A0V@J{T h3iCduFD h0n,ѥ\8>7?.QLDkƂp} +u^̿<ʼ%@ߎd>~ ֱEC\ǣ)2nv7覂K7ikU-ߞ|6GoTA^":٬NX9(<{zftb~X\칫eճX'swlysäz`]Q-r^dz?KP0N>N8fƣN!رlˊ^/ :_._}1Zxg4d_qZl|Cw*v"&t=E}N>oѻO|D^;#O\¢epzXإnOֈnˮe_1Sڙ|{[K}{|K@ nj`H2ŔN8S%*-?MF|֘ |WC)t-y_bJX!\ 9kgj$mTSYtB߂!"ƣ&.fWS,@ԻSnkńԵ5ډ@+,0qNN"κbVkP9y[XE'XSK剂QЋk]T9,Uu:$dg>(;s ƯZ_$-p^B z/CD|=ߞ]QwmA vW77 bD'g5 dl!R~`jLSW&|+";[Ne,tۅ)aJxxA fMFbK9k$gS"K!Z2,xD?#Imd{})6!ׄInnFm=Aȼ':]ZʕɉHyg0T}lC(Q 恢vdeg'Ja2{= &< "!"*d"ȊuT̾uh Rdd&R{$-k _刈Cʡ뎇sqY;>5̥* 0 9TR<$ 2O&ښAdidFrP*Qʯ3Ԕh˨ -f&ӥ޵ٛ O7Ql ڹW uIpr ;%_~xj}pRCZ6ԛtO]t@>hhlS$ 9Jb̮b vf뱠隄d-/giymo9AD][v>bAB 7 p̬(aE=5AJ~g]|v⮺ƀ%%&KC>`Cε ַ#ġX,^g6=NvߤW#j"GC.dt0@Z9Ȕ&#M ̾➿{4==]+w ˦ٵ c[r&01T?`y 2 Ԙ....~ /W2p\^$a|?=ϏZBnxY 6B0P Bʹ![J8񕊹"~#?[7v^Z{AXOAh1&E멂|`X!ѮU˶u૫Cs㐡("1T9aL>BVuc8*ǹz.Şh ,uoeI->u㙝uM/)V,^rr9mJpVHt 6"FrFm%*qFAd8S ;\bhDJV`Eه[_t[1O<n,Rx}<7BgFITAx ""3 R$'qA1;C,+jy\(~$8?M]SEw+U Nv֋tsKQ-~u'Qy񒬍nT z yNihZI΁R@˂:2!O!/3zQC9:s_$-Q[ G2Q@g92=GӚqd`Cka1x:\+d=Oy45>|j& B 8Cn dut~o$HW2w؏" XII}o^zӺNo~#l(>G{n& ,T勎ŶDT![2 +jʙiCEK%c^|4naadm]XeBESaP3ŋJ(dIVgE8"n'cxyz O*b|ﴹdyଂг[kξ#_#~m$soAKgAr aܡ7PWLJ:U'o=UG.V2 1Gڏ^?o]3-\\^6zg^,mߞ6SLvW_ P_5*KpRU@]%&*0L\Z0[<;Rek}*11C5Ys@Ϩ)VTk Rs(}֑"ȊC:[!)*5%L8et)§XrS]h843J=Wo1PJo(X֖>|*vgmV&ÖY l>iv' pxu>{JU9VWgRu|#]ؚ|>?jt}91=mc Iƛ^oEo&VGcNgHalb߄OWWvY?`򢀫fC[5]-r_R0]6qXR? @+;;7 fn:jy@˝zDnPs}+`wy}Ot5/O[)89)Cdskl(RFK=n핑qe0^ZC(ǃ]D"4/vhl͊G)G8#!Qpul4+; n;5tRJ8 RMKn$U K!,M;0VU 輔Jf);8A!1HHxlF BķrԝSXmtnZyy2e ]hyjB@͌qΏ޽`docTJtB]4)oVX6 ^s!a$* <qErSA qR{}Mi~z :Ld kVRA,ld)oᰄń^/=٥D_:h #&(5DKT<-YN],VIRj%>ABr ].h$/D:bH !sʡ_^|C9(X[|JWâZrҐ?;k8d.&ʅyֿן|Չ~BB̗VŐ>i[OПa9;@$& _L-尀߾&s5=8Ps@tq-]هwK:}./N j asq [3>,μ?_)3e{,zn;ޱ{Uq8 9-~:i_֑^s38̡; iNw7teQ8eufk \a%YUH: ml$ -fg#,߼xƿEܫ'G8L;;8w oس:Xc:ן?}<+|8;B 4}eng'9gji>_~#Wc0oP`AszƋ~_\)s/c&Kzvh\\Og ~ǟz:,py>T6L9&t@Ak>u/SM^y^f\{8kOK8r81A5'MGFiݣn ֵTLJ\N65H Bڕc9|Q.޳xD5^KXYK6)̣yyy2/ی!gHtHgwԵI{1.YpvCk]DKci34Z[Vd-.u5.u>knC6u>9%qeH&PKJVTr4||TK;73]88=>I8Li+at3B }MY4C(@1Qd'J|]7GZY-ٓrqKr('S9vTquFdKx$5䍈 &K''ӣ!tt!ٗjFEkP9xŔXX:ٛP+CR)*] Z Non5Fx#Rc5XKזͲ OX[| LVm k!Dֺ.W'0Vԩ4ŦSakU#'ckkӷL%`C ,F.Uk;5{o *ؼW\ |ZbM]4Ofl_Bsm*;-X,MKޠ`!1n]tp֠tfL $ c І.9"RJ/< s[eQhӵ=W==FhQj`j(1cDfBH$qrru/ kkBc^l fg,c_?v? ʱ'彣Vk^6usk1:=Fcˣ@Z&4֊ŷ-  ِ#)\Ӎ\G$4l vʑ>!ar!K˔-A?`RX;[NPy0|4Gg*܃W˘څ'1m2xO"zgNO{ O֛V[ o\Հ^o]0l7aƥs,ڣWbPQ9JWY#Q; ;׷MWE="8cΏ T"<&7QV/1?8)fqO>OS7ZahJQJ.Re(0b=ZT#is,VKrijfA7 S=ӥ WUj[b>JQ2ۍ3!yuZSz*q% UUr'F7ɓh%~::g{֡8 'Co8=דT#(u>{yp;0lS}>?s/bUOݳqǎ7$mVڂ+([*dTU#g7:zg~We='OP`"ad)ΧӐg0zkd<}S:.Vސhݱgh!["qΏ +9:tU|Pa2@-!Q]i%szA_rpur(˷.bHv%(0Ps/% `!l@h=]a@|+&g>O=A/@{?ANaHy..UET/ıy;Jc~Tb{Dz9' qyj3=`|Qh? g/fW `3L1 E> ;5aleNT&W;,$½B+ a~F2~^aq O !M;0-3i9fro\ۿȫ ^ |k&Y a`b`l"){kwfuC>'@Z׌P1 U9(!} L&) R`dgg"VuJ]96wb\YqׂA6n JA1ytx|kƎrv0'2ܧ}_|Lڴ"\!]!JkeRc/qULvK(MeCI7sgS/E^Tzgap. 6m[R5E l( [yv;r!~,3øl"{UH$IG`9p6vXHwOU@V0~(M(V͑e.hR,SQ]utAt 0Qy2\5K;•Q=|jQ u(2i mۻkn+6g uQ;?\2̙*G/\AQ G2WUa7;Gm>;*6yrASQB?SCJٻ޶W<ҷ ,-@<ڙ<Ŀ>plSfH\40DNU׭{Hp ̉㫭Va=;S0[*E!$et*fHL)O.Nm\kǣ}8 N=5gL5qHsfW\ ۱Ol|!"^Zp1ýa6wb D~!*잃2De!*AhBT>;?)B WG0#V~[PH ΝzjH" |\epQ^jn)m;3Z &3,ꔷ݀~3=E֫& r4S\VI V訧U"<Фq{{4g=iߵ/LD;Yz\+lZJlB6hy݋ddZ"Js*U֞# roH<.!h` 2aعvVB|&%Gu˄i%w7Ndd; ,;%_?=OD=6&6׿''F/֨~w?_|3r&Ƈ_\@ktap@[6 d)fu_:ӄ>Ceʱ˕\ V8?ާk;ƾo@..pQY=yJ$x3ЬHvhlPc(A-A'] ֟Ow|"]P =aV)_}Ϗ4.E7H,yo?Kr޲dPwNpҬ2 Vԉ)UN9p\ҵ-< Y\N)йd!05RB׼Ywr4ǯ@?'@_mbYp˻w"CƵu¹Sy{=:RJO 1#J\7Ye||Q0{1{$x/nENfdڴ2e!lFz=*NCS밒S <=LM̫ /4v9jT>[˒c{ [cZRlh >N^MЕ5 N IS&XHy s\OCu1-]g F4^8j䣫TB F;skge/[e);Ss-C][Hz {=™(0T5#W\y80~?JEmmc'9$;[ط)4Q1a򜷓ȘWK]:'5 { G"dSnƮc2}>#Hl+܁*:jb7:/6xR{es<_;Pge|׿rP#~,kw~0$&xF%/%p'ZMRekFp")K5>^ -ǔ7!u-T3.d]4#FcyrQ%hbp9c=,zJ .~oўgm,=w2E('-k􊸘B|96Ϋow&uSnȒv马UX\4Ɣ6Z7WI9dv둥YDͻqU9.q:$&vL֠ZcO VYw ѯo3{:{'dxqeʄ: I蹆Ώ!~ L^S:1Pz]z.x~Nf1^ &Ɨ{ )/a,؞|Q&zQtjuOݓc xxu ;bx?^CT"{VhuOk M2qE]:Z \"t]/J]fjMumU AqUzlJ*9i$CϤ ɾ}$6hj;Q<ݹŐP{ 2q Ӄ[[3*H4T]gp@cj>EH erU~\IӞ429VFK)!{5"knXE4 rXjuDxFxFΘez([Suq=>׋ HJ "i=;Ҿ89$ckYJ3WUITYQG~2\sCeH@ǖ]~屒5qET4rժq4҃mǔaդ ut.w[R]ָި=SҁRS Kծ:u5#w[ɩdh$΍Nnt$V^nG|}yhE&Q;ɛֲUZMVNtr>vh)b Sy@j] Zb i!*잃dA ˫[59ދmU8ʩ-k9N lΦ+Q?_QwԘs,Crh;c&ʇɛˆ\#N_ 8p*m z-,<\[é=wNYkD *sc0t-뻉ך\ǝ\(\Ú\ߞ hL`/iJJ8Nr ̻i۫+3G^W'WH;5eޜ|w7Ef?D3ǘyB)eʸi-iPrW5qC*|W.yJ y.c<N +GQOk/𷧌i֤n^?||>Ù Po| ܼdS_?kI=bC J҃ܶljʅ P?YjuD1j3 /KqJts"V}ĭz/"~ ϐ,$3qj1Z_7k>nʼ_wX[G;*e00Ԭ8=Q8ѐK.JE;$~1Py'qDa/8Q5tUS5ve\[bgkkEua0B7C "u{Mŝ Ђ@ a, OĵsiEm]+|T*DURVyIANj<'aXTrh\6226v+gvA77"Ǘ n?&?n@GْGV/sM=e_h=8.Q 0WԽ-[CQ} JQP ]r!cԑİJīXɽ{oٍLUGK(iXvemS Md_%"ߋ$Q+=Y-cJr[udVB4Ejjm\Lpќi)~{=xt2/Y[CnGIφ`<[j 4#*Nsr໮ۨ9s/G)Kڦ}B4{3+d|1'ѧѴڹ,ʩdhT!΍מQQj,Szf ƎJ7O9W>sF[uJ H6Zb[zA[2E|>mZ)?;n7kVwJ, !5հT튨ڴvD{& R.-15 zڌ R(J QYoi)eY 4FwN R 5[}::E:G* : |mװQ`O_e`ʤ''y3.߾/UY39tUrS1WL4gƜzU$Jxy= >"gsl:OXA3Kov`ݔ;hWrsvq l]-ݜR5.h+/J*"x@r߶vItBn)+%C-`%O^}5x}%8vJo[!ER 租S;aqN _ :Zc3e>ڣb$rNC?΅^x.?lpe I*B*rR'׽M J+uF,On^}}I"=<4=<{AϻhAu]- N47ZOW`g` 0d`"t~MOK,U^R)04P@L菷`mbFCI!YH#]ؔ5oHahtw.SG!!5j~4c;dw{އ+:8/&H WcX9iP5g]O?f=rofחeULGy-3>ٹ7ƕڽ$+?A`;pN= _5?S Z׬i_^㿭fVnny;{ ۛp7[r? m5ssJ iGkǸpњowCkʎC֎AVZ$w-7~ʱv?⭠k0$}f?û o cBOV2{e]uFof?lhK(Gy0Җ 7Wx "T( uDJ<'7zȞal?GN7 _5{*7"dE d jn.; aF++i4)>mC%DWB19*R00W~E]XTY>-T-;nܚTJ0$%k^t/Uqv% /pj"N-^&d'#((*V/.0^uA; }Uͽtڵ*}`F\VbbZqr΅i] `TQrE" 2=9uyx}Na3.Kq5_ $ƤZgz44~mN{@w1FR**A"ߏ暺569֫ <)GnmVVk>}{BYE}08Kh9,~Xo%Ka+^ExS#`JyԈȧ}HMraha@zlI">/kB7)KU@"TC,+#IeC+!R ,Bkb ~/TÌV.䟿/ k0/V\:Ϭh "9ʠmbGZ > N];k4هǧuGY&1 ij.%…} KXg gh3fK+  xb*g6%{>K)ߧ6$Z\`Wb^ȳHn=hkE-]%:; DG^PG_5 Jd>X Oۘ(a  orJ|+J|򻧧C(H (˴YMJ +b*ߏµNpsxF$_$+4 V Gja#x+8ݧG O{A4}#d"e<{e7ujEuADJ]Vg&CnN]UVQ{x*/⥰v8߫u Qf$rLǾakaO;c}.^MJ kU>]!޵%fCbb)b!IʱÒckkbaZ*?jLXMG]8ρn~ Fo S05*˝M)E2cQC&%+w Ypc; Fk]xZB C\鶲hs#ww; 'dBe =[@Pʨ52SY&J+RLĕa2ǿQI' id1A'4j"e޲?j AdgW bȺ$\90"#)m~%Ӹ'g$tIH޼{wD* KYԋ$=2S9\ٮTRc$G;`m$\%VzR`yRex0`ᅠvt%WR#ꭑ:j`:#E8psR mz):N':w"C$55hi{Q:Th޽}54r.ԛ=N*a׻WT&_X|Ր5ZpFڸhfkںm :g"]z;&8ՐSгZ ĥEU9.5 ;ٻsBVKmlDж ۇr9 {Ljf㠢S}ǿʝ8˱蜤W 0uD%ZSU]##jVbe6WъK)kcqSO˧v8ޫ~h-Q覐#`GL+ [RY(3^)Tt>ksUXW@Ū|h&9Y@ c;΁SwS䃳ˊe@LBԜ ϬekIkdZd B }Z,\xO&%sYl@1۳PG'mMC-!vwJcs*9,BoN ~uCl=yM)ҥU&0Aּ{z伋.Bm}AH\lj?ޥ1ǫa<>ۅܮ `Ej6&pu֜z*[eĞ11"YIfZa\I*,3X48wU2oFDircL.>m݁u5 !D"܁k"PaJ-CX!m]\LiBY%S94Z.tǿwGܯz$fs=H'[1L,;@ϻ A _*wEus{ _7 dvT DO)cly-$6l DHp}wKcmKi@8 Q`L[t;1jE@̇3K~$*xi_H˥&[TU2 ţ}jC#FO$֨auXt3s fO%j2St1qkxm*ez@,W=՟UWVy!4OΐZe\rhbiY<W/w_̮~Y["|4vf=՛M8Y?_ [ykі---yw->Go{{ˣ{Ξ>—U|ul=m5c@n[ Xzw*sIJ^gݍsVPTWyiEV HT畦5Ӗcw7ݷjiAM( Ɲ5!Ʌ qU{`Q{9*YUu`DV R(7Y`m^ ԞZ"[eV-r\n!4x-⁴O?5I9H i~p1[͆bK_ Otc] | !v.Enw2J1yFx-#baf*WBtcGaEߢe'!~Ȏtn1QHDz="I="+FkHD9=ڈ>+E8JVSriu*:ez7CAxL$|zJS^麠:HYlZ{aХ\肣eڅh$o`*nP?e Jaڦ\LE2T"+r*\slz<ƶU1eaP,~鵽PG9n\E*v Sd(}8f!ʘEg LVo)Je`oC,o(A[,/(:ۅ`PX|Rx5XPJ&JtJs%e0v/۳:CWD}ܑuQiM 0f &t2xJQ5Q۴Ov9|wzF44f,0Uި=DZML2Fakn|5D CiEjH7*>tBWқڳeQv;dmw {;A\mϒT&L bg4,sQF•@?~j}e}-\p#4Zes:6Q6Ż;,);éfvwO*Z^r5?-AgJƑ_1}s{*5]$3$宺NA#ŧs*8b\6.erUO[huϣ:½u)!UR-Oh_E1r`X0.`TT{غ% yUiw7kTO/geQEwNuz@ר *<5}`5gժBŹ`-ͩ)-Pbsf1TL AdێFHSvڰP~EHs5.I9>-O7SV.lAFx@z% W]A|CtϦmKiZK'Aq;u;9J[X+uKk=!ج'=o ϋ?6&Z<%CDr+t, Ó-hu./jNe5+ޥj] z'vI!IP(Ж4X I;ZvPȬr!SVVyQWBN7T(qce Fs$$FJ㍼ }A)ιwjl,iv ?Aoj6. d[GϏ2GH׸Ҳ(l+qQxq*LE*];RZA2UKY&Rb,.4uĹ 7͙KݞUrÓ LrvHmn1+AW ˩dl;et|&|Ikxʌ$7@rUP %{4ֲ&?%Zs݂Wi37c-)/ź}OsB$G\XzzAJYtb4稡!츅4wG"(* [itWWiu z42ޅK)dTkʚ Kp H'htSV|s 4ʮ2'/><_DÞ&ΉJgekg_D!"e&@[.FIN:fhŋ ;dW/Ǣ"'י4a=m%:^r#"^$uLXoC:^Ms1ytb]ZSI9Q0\1Ң/?ݾc_`"rUZcvN+5];f)~YLOoq'9o XMGOg ~=!;oSie+[YHyku_a&g/{>mAO1 )czd;DܻYziTTHۏw#ހ}L$R*bĮ\>-ĮN;vN)cE*=4%8-zW>s}Vd]"麆|w3 Mm_ۇT]C>iIiJwL fHD0 M9̔Sƚz0% ہ!7m;~A&ިVY}8Նe,؜hR--Yf ؑAMՋ)K[+k.}/CzdLj׬M|wkjs& 9#yMl)TZY)Zj;{8(t]kYJrm` aa #cD.\tb5P*w NKǛy ) n )unڔqw1TR9$Uc`oq4щC+N1orbyyH2(3KQ^g6<俿فajԓ6hsbdudڤ)i}y>.?Z43^v|MȃY'&ٓ6ƣ["kFά׆xoF&VM (2.z|`:٫fNNkU4#sRU̳d'*i")mz!ȜHl͵K[3;'@pzzv?6yGw ")c nʀյSƞB-_A], C)p^^{  ĩP趝2Hdv^SƨAOD,I[ %25$(2V:D)ቸϹܮ NL :6 ́RcqНj j|-Ł1DO` ^!тr,hE0݀\h:]A!Ȟ8:#ooܶ>>h_WxRox!)sc R/i YTZE]pp4aʇφ891ZKܿIjkRmaqz2um}?;̖9 - 5l vl!^׸A\&ӫ8*@./ Q`u57"H EDe9*CB 5_Vzd<tĩ(`bw'/6-iSJƴ)q;Hi_^!FMSZoc}/oSŔ-/2NVL9ђ2ZQRkڮ7XYpl*' 6ۏ9}gKyq=)n^P}j2&IR4c&^թ5}<4 Ҝ%5X'yb1.wgV/9Mcmv[KegAbNG+uLK`<(*1<Ɋ>TC4coY7ERкቍO&ۿ{*W:MQP NJڒq{!Z/nC MBŪ=S\mT/M`*iRrJʾ|\[9LM;KT.5,˨l90aй" Cd jĪԹԄ 5 iFF.k'@L)Lhb*`ՙ<+SM{VkY-3!@aMY@P^`n-JhK3MGLHyΓ@WS0ܰn{SX4>X5gK0:t}}kyM,F))* @!JdQ*+AŬ l)B@a%u&$rCRcM2ݻFdg}oQkn>E78k/4'*6G/Üm:Lī4yP‚krSUb^aoݠ/Od9*)^R@2'PLdZfKƂÉԮKYIgUIvk ЀYB]|c=^C!7I/KPhiꏆcDҖJ NEH[6X> x=TEH,Cy7@9 ޜ bSGDJߴuHTPpS钳 b7#%Rths7U}@ċT0 @+-t?"l Vrܦ4r ./2ȞR .~7Wy[i 3į)􌹸wl` #:2ǾC*&\&OwCRLPa*R) dDIJ@Kq3vy6/u{}&'д֜j1ߺ:6 8:x!&+vy nXo$.pjVa؊$]s&Z9V1.ɳ,^}MG`.ujXJS:@R! *b3E bB[*<2rY8^ctF51`$/̵YA`u&Yy9Ѕ19nܿ]!ytj2*:>];|(QopfՇ_@~[#>qb6\q`ƉB$b8jLrKM5En;&7ޚf++d0 iȳs w,n@SO8p`Fee8#ʀdZJa< .01Ac@mT%NN_?ɊMG")'nTóoaVzTOu*LOM9JC Bi:Y4z8;#xf58`nB }=%(p`5Y`L~j""Td9a0$O'LVN?;ŋqvG %H7sZ ù$C'8s).8k6}1n5ђN<nl0vA#&;/!7vU?cvuUϽeb+b]=NP ȹӠ1<34NjW/:},sy2'~#R4|e5T[Akjd-۽\ g6o~?OWVdx#wk/}ػMruؠ ݷW^}]6x R'$|=խR{_Om;U`h~I'#&f`湾zPQsYF_ Ésa-S$0 .6iSW3z: 0F8c.2ͳtj?PM%63It߿)z)3^/ JaʲqzYҖR5nx$-UY4^:tFYIc5DoL |';PG;YK.!b4tEjţ狇nQ(ԅp2X5vJj뺺>Bz˘ r=g]h(["UJD)A-JHV(lS-.NŌFg4-I}l ~Jd> 6eQ0 n]gq)a^ոkzҿt%X!,}{q yB\"ՈMiruaĹ)ֻkA*TŜΜl@lK&M,e!uKJ^j)Tqq8CzCBqBz Ec.B݃6H ּ0|/@%Bc8e!Ӂfa(O؋̢y.9"a+UhpTX.5'>__2j+N°ᗠG @= cD FZggYEj$F!PS.L?!]_#ą.%簖*1WKWr (n]t.$@'yT˂8|K"_"|ε{\sfkR:ms5Ek!Ԍⶌ2,aY=e%FԖ*K4)Ä -oHT>o|эЅ([}oC8mk}YП#c'V{z  faK:5.xfX_$Ӂ3qtۏ/?)}S64c#DnDB\bcnj HزD01u0QTԊ *A0ƠwFu]P|0^^4"p|9=:J~vMeuSg*7$XmÝYEm**a? $VmZBЦB q)Ҫiw%B$]Rl"DC5OVH}9c6 ]qnpOhbW9AT+T2% ,bN0)_l-Fq]K Pd()!qh·㌂i[˙ *DI0k inkd98rlƼgWvOc+7MU"NF?ӢD-pg|I|PM(R#ِL,L\h*mF7IqC JHע/8K9I<_gF@AG[jKIakx]I߅aEYV5,t!Vfדּ;{|f .n )[U/|'qNztd}0 GE &kD%wW c M[+czdž9t6N90mT>T:&3#sM3kוlYZd5sH2-٣r6;p(8,s! z|b=:܌[@6 &k!0So7&`F0IeG ؠ"?c{MB>4<5{N1}>]KoԠGGBg˵{iAAdWG׉2Mwǯ>sef!x(~?:\7K Џod0&,?ヵwS3FȤna>Rj7PPџqLIH)=B1#uw^4h 1i$r6޽7RbS,L x/.i.+"'RQhwNIn~Kr0M#xJv`DTkRe4ƆhrD|B*=pcnFxjrBSZ+`iNyr:9;jfp܎kxtדˑܓ i4\#e$64>^,F(ƒ֙qEZfC CS1 {ΌSO#r 8 ogQPSrGI{ExXG_N>;׍0% Sla$͑U`zWI z;)E4N^,}:IEo$.cw[JkF3;vl2i(Z2׶D"ư&`aoiA׷>ԎU@U SʤK5_V%bm[?ᵑ=Txz3aRg&%S)uWS׈1UUܖ#k_L).ySNWRY'`zFǜCeTi qG~%3ʼnʼU@X sN7&d j3c6|!BtϳNHXI꾪0x{ah J#ƳC's}'p:_J X~EC5k];4}&z r D! mV0-Ahk F*S6A#{a1 }m6*UM½Y"4Xc|2TM0z:YyC&o;AIe$Ԁy6)1$_ROAgt✧ӻ$o~W,JKGsNT@0Ip]yGSW}LM!so4E 2x4C'3U G7U*V#KJiQa蔢=v  'BƗSCRiZ6"B\* VR$h)^8я7m`'B 3Dexf8=~JׅJH״ K$Ѵ xo5/i?ԝ>Ӱ [#Y θG .xVO.'ZlÏB)'[,FNp.f!9\6~0f3s1bsL>cMÎs1}ݭH얢طnuXu*vNTN'l'xzZKz"?aaKE;c`'y 1p7.)F/?:wf8k#j] oE_;8O/2lXWE7o^5jD fum_{ q]f 0afr~e>@C/MoG>Shwa/.2uk@}5V^^uU {X`]5c)yITr.j0Vu~SxEO3vtϱg qj4`{vxyH"7eV3W-,Xb<_ ((Û ~4狲] f_uFfV'1$ԀFU#σZ)(WWWW7LkUem)bEJ_'iXz8?*(}Wr6hZU ~|&14~+̟/mι ;-nI7խʪ[M]\Y8(λiS)<2L EL39 @uiAQڮY2'!s=͕i\ɍ JiݴT'xq[Asœ }enqјpi6jѢE&!L=C<ȈER>b*ѕ#Rk<j5C#Ji [m ر©x3|thhj=Ώn!>fSy ž) vO@n׻g{)'Le'$ 柖ԦPkJmV#NSE@:yiR,1oVZun#U[N' cx@Dҵ>ZN"뻝Fk0 #@\zsGAc!mL&+G/j=n #^pLV!tݶ7 `m4@V%>0de@ V(Yj`C4F+YFQo)s˧ GDzU=]D(vL4ed%\P^iJJvs<] ؈CrYl &$O a!_ nN3Ɛߋ9"[P4Dh+ϝ[i}o#нŷ(7ZÍ8phqF-2QČ<еic!s[Jy#`=ލOl4PsL`G`&BVpzMtM>h@RQ~Hʤ\HR+%C/kL;{eːҲJ!C8mIV|[K` `S(cعFHREY 2 v1_hïXI[ˈKݻLwe|>K˹/'4aGN@aPf)+^֚^慯ǡݛq-__bV|උ7 |0 tfbGc;^x36yn6,tg|ۼi&qmR ]3(饥BI||r+IK? qNd_cy*KMƼiNs! 17>.',oAQXjj{`4´YY<2X7U6 8p󋊛8*fP/*F'Q,aKmZmuWloa4?]h .ejE//wߩ<]ÍU#\X0/_,t]6w}+(^^#;HF1~>mNIv8 &CQ6lAfDLiLUOY<_OgIk?? ?7sxX Atiq2HE֖/> lE%3~@o YMn㬸[=,{:%[ׅ%x)oi.!*`粏TFKOeBixjf]^C8A77WpB [aAі` eye}>dQo.4,Jx4^(=<*/3ix;# 2~K$rl |a D3S8Ͼ;^NZH$<=u/MA2I^^bQ(댓-!kKA$cσoGSg87Nƨ: ' I ;&Ƃk2!`tPk -\@@kɱ<xyC:5E.6d{>Bc} ϳ3u1O$ ُAk3~N0B_fO測}o5s[x+JpH$Qƒf*5hϭPHn &`LdRsCg탹:ugFHhfBޥP$jU~iJ}F<#KB]ZՎ{gaxd9S-t8p T9:Aۘ&SzCXC+IzYsWm">Q0Drj]΋{.F3[`Ή8A)V+*|!|@>kq&!&)"|rrc9vxJiΣ$JkZ ϼ 츎G-qs\1%'fZ{#Y )]8}AQaf{&ZiјWZ S),,7-)FؼV'aBf.W6AӋoz-bgVW9LbObYMt0O"֬u k}"|"E׿.L i=a.Nq*dD4 xS㒈O>ynh4:j-Y=w\~tM^ߤ@hMaFJlYc| ?FO80Ee8.hL<aH9%k`^Jࠬ 毞Q.u0j +vZjmi#0Fyj|@m9A@/Cxfe4A&g0k+Au?ڱ WOGO$Չ(akF]>Zq&d''H$t%-h֞lZpjq8%5+x<V~KwBFG[qn,!&v'R#lܽƎ<Rʞ.W9^Q '(&Ê;&d҇d:""6%p 7wJg3yHщHFc.V|8C 7T03VD!nA6D\4*9y>U L4ԁ2/uQ?>uzt$4$3$VqKFB#"HZi$C*95M{6y.oÏUGq >Ij=cBx-͖]L*$aZceX,M}>̛Sne~=sdrfF18if,8"9`9@ 4PIRAjã\|&SrY Y9!]'ꀙ0Dg@O'K 2$j(ω(0S0ñ S$WRk 278ک69H&d`;Lj =(t),$OH!0"%]XJ'HV)`i^ARzvqngo61 V3 M Wąpcm='b 㐲1gf .9>y:pNT@G=+ ΧMHoIF'rLupX2/pA`ob9io3w _3~&!c@'b@Sxʷ3柕^4HQ4Ǔ%sci\$+^ݽEb 7~On[5q}4~xخJ|}V ۮl `0/ʖ še-=rw9 ׻Wi{^d JA#;УM3΂׭5\jay0pmi&`"nݲ_UUT}Qڋzxz8?-^jqU^{fhZ)_xˢ< =*Ǚ/ 'Nh̞X܎瓦W5IQ۵z/E/Pa2 ~.He:5|4>J?{H]>&M^  ɇE&qm[v,{{defl@(I8{xPƸͤ3_.NQ󏸃xzAyrgUpk 쿱yR3x3!CrI+y8ڭd[ ruwܦnf3jwM﨑O'Nhq6+rk+Za8y_; ͷYI)|oY7?Ӓ\'~]mL"H*EFmse!g[t Ī\M)r6VUZ$Ps.$,@ɴk07I ޚKj5LN訫Q.mAF\)j+e5ydOCv$)TZC4kS"7j`夜B50r¤H|6ސ0} x햯a<x9]k)U]ϋv)dkVXGO,1.'0Q"s犴Z(AE[gHD {zT=<ˏEJAF&KAbk9$\Ta~AnޑS5E&" Lil+*A˪jp IaJ:yy2oχ}[d*#JU $UL+gNj)XlAGx @mATc1 M(0 瀇& ^ E)1@P% Ph]ĖjׂJwQRE,yXK !6i ,Jj@Y(Ek[|EbpIMkUxvj;*ncФ@;[0U@6Pe5kk3(KMh &#L,GH-*LE 1&Dfpjxe0e ^,ȇ4nGrzg6)NÄ"+Ch"j&T p Lc>b5mM!X u Qukk9=L8!5(ԁE xDhL6h%0/'?n^/˷d=ʱCI10HȿY3dٿSᱶT):e*I#::|L;=(U%=$@Hq(2lk E=$V7:t)ε-4וܣ2E 8_~]?x=GxҜ@(Gj8y۟Pmܦl5oEi]XiV{o ^ A>,k}+}XZMYiC#aݛzglty`k']Kik$4 䚲`a3BNYrgA 9Fki})߹m'JRfS _R %E)E#.V+s!@)iD9L_+6׮, >Q_r?FHmed|y?ɇE[ ^Ykw#ꬽ?6UIswX!he΋r~vX-ugTm1>O(_@WΏK|K鶿^ͻ^ oIgȉ퍝C|~6,[& 4ׯ.w+ᮇ*]5& "}hb~˰/inOu$b]4,|a^~ZmWdzjQGUˣQFA˯>eמ~kY̅m˲ڲ9Bs6g7Z}[V*}w.}(.\troWURzR9r1J =vZi0|l7sKEz:kC~Ԃ<\\]18˿\t3,E &mbh#*oߘXRnN<8q:a4c{ʐy|#FfS&A d׃z]Av=Ȯ d׃z]Av=Ȯ d׃z]Av=Ȯ d׃z]Av=Ȯ d׃z]Av=Ȯ d׃z]Av=Ȯzp5i&Xt3׫իmz~%_']:BS~қ g=Yw+S{0J۱/'Ű{dzRކi KSˏe+se]VN(P<=9ȳ뎁|vyZx6=!}ȋ ׵n/MXx7~r;aby;CڎV{^/duy솾vVby~q#nURz }!ޒ>5 S[!{3԰VWOm[_Kק#S@:9}!$~X"50柍N HQ\&ҔCr;[] M;i˟@.ݩ3~{RoUMOKhV˱tњ.ZEkhM5]tњ.ZEkhM5]tњ.ZEkhM5]tњ.ZEkhM5]tњ.ZEkhM5]tњ.ZEkhM5]n>|kjii Ǽ&qXoG 7ĭMi-km !:|ypjҪ9 %< VoNP7!Z1Z5_]5;}L߷}{w}߽}{w}߽}{w}߽}{w}߽}{w}߽}{w}߽w=$YJ+Uf%fK=ZQ)lQhH MGZ3y2K8 ;;sw7bKuq }n]NQo1{!F1F r9$˦|ՂǗ˽_z X^yg/SLώau~td_9ēQQdg,wT€,;zMYv μT,-x[9l% m?B]A=k/"q$"F@qIt>c &Y.Xbg5espY0 Xw~cb:wVM i%jꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦjꫦ~CN6hr| Qޑ6L }xz=kȩ㴸^^3r|vpf%`i h@Os sjoVH?t`*iGU8n=7 Kpdj(߷7)SJ:$18N>-ӝ+)GXKobd\iO\a>kh3Ȣm3'XcNI .(7(, ug^skHPˑ O8IV[ýP 0PJ`[MV)f/rT;Lo~pV,H s93,f*x5 "ZTXPO+^WsFm:3_ mdb29 S&J0'LT,P8I-"`cEld^]F>xu|8Xo 6(`?v{[7{yگW;NBX 6``$5Vg&c}^b $)` J!3 6 fͩcCԃ1[R\t8AH%q mb F6C<'dqgp]RN* ,IyU2!2µ1ժm]@N]G(IRT3dpk,,ZtFxWD %UӉ댧f.owû@p\0 G ;椒8Ljd"m&PJf'''Jp%8:󻊹T(v;U%"J0JڧI{]ɆhB4 Ԏ'"2x`J@@W@}BTu;b^3.G[.}\}U3Z;!D)_$}㻋ugEjs2i]2DaNۜs\vR vc TzU-Bh-wԞ6AqA%1 ~.CSϓxܠ?_ß?543f|?0lƧ17$>w5wZB[?Ok>1g[3|xz<&p4Uz,Vsϱxx9n{=:68=wƞWzhUǃ'm}h;R3A>7{:'9ocic /s]ÖO5ǚ8nx~b"&x"CҌO^z"'E/[;1s+{G:=Ӧ}yy<鏟/*|d6 C7Wxޙ!aL{۲̷-oQag-0!q@CHr)&W7-3L, 8X㽣h8ӛg{Ԡ۟{ͅUN E"ͪIϘu<̎|p_sO#Bl:h۽xx4{3^njWzKW{?%O{97.AggP?9aeh8p\t {/gKNn(dk_x'^{nm%C%١"t}ҮZڔĔ4)MjJ䝧43[l[ڐC WtglWg?} $52)K" F>:iT.?#^(r/M *" $ΩmRe'Zar&I vYND[tDAԂ[źYGo7EV~% V2d1y\p)dR$q؊\Ƈd*ъWATg9dϏ Xfu`i*M4ds&(X|m@r&s U KP96ʒ RF.&rx|6(*)1d6U;Ok/!H < 1䤓 u ,0:*#W:yM x yA\EP GM«c-(>g50U.E炉SuA`PBd"INs\1TCٜYNGG k76ib"NHILIh0D ] [ !jQ TS90ma` ۈU!gS!:oCWlvN dzU Ϳ_/ī9=%l֢K[8y7*6m.Ciuծs+XYzkUAJO]RZ&iWI$&_Ņ|խGJE+ʲ,7%קtK80˗]Jfx`-oծ?lnWhE|33 2)Sq6;twPb45|\ ?&gEYD =?}nL5v& SNwJPfyπTxG; LP0gŨ #N ,1Hp1i[IZ?޲5,Qy8xZ!EY sQyfBAӜq>!*Š \Z|[Ƌ;KJߛ{$3QR6"0-<_aX3mA(k5V$Nq!dDH"s WaI8fkez DPgW ];jg\IiR0Mܩ}ݒӯ07*>lc7р'b(*B#q3Y;S4W'.ϛSpr9 ?×ӡol~CÄ0J`hӰA$'NB;{0 h OVz?Ӌaެ8`({8MS`i|Zۺ)Y,Rɹiؓsl֧{%&s3k jK߇[bޝ)/#\W'mQV#CHD%QjSWsޏYYu3P2+pGXR8ll@4VRu$aW˓ es2eUćO+TN@[,L^r*ID'fuĩ1a$9\'0.DQ^r eTyfIDITč,9B5H!ySKB*Y7o*X6׫Ͷ&/2ll0s-!<"k)@ FB+xȘ Wy{9MV"*I*$* h.@5K&f '#*)j%xì`*uuR0^,}("^"?=: #t- ̘hUIߤn AqG|4"= 4xn0sjH B(3}2*YD(Jc2&xIia. }Ѯ\u.muweٔkju[=N ۋ9eNh>~2 :Ɨ27- ufʕ݇ӏˏѫhL40C;Slb{Ў߽\|~#ФȂdc.{v#Na'[GIю/zxшG=LL|pEDWXsX-V$MZ&JRNvP1fZbIL*oE hڣθ63l9=hq&Ɠ[ h kU]R>tJb𠔑XR:)28ƚ*˔k8h4zqVBxkʛ弬N:i" y-,*d?!^Ѹ!בdz#sp`E+}-֔NlhQjF=9io/#"jhmX RXS\|F'YQ^?TR>IL$kTN(~٤BI*ɐJKlDd/Ah<5 ןҍ袧iR48[R@-h$+c$]+c |b6x8[q/E;oH^,Nj/kB0kR#Y~-M$瓋YDCM)h^{f F?[qٜOC#3zLug_/d5nzOU &+eQ;u t2p6R]BХTDeЎofu5wE髃iA?.] ('KzdAX~lPF'_Գw7kmke5p¯m:riv#ɧoNe+;[R(}ՃîJs`v6֦%M`v9sxAi2IP;uv+Aާ+m{Ђn؀nZYJh +nc] َf!.X-DVhX)fU մbf 4Շa 7{3xH3٩T0D9?8郒-A:Dzhc#dXO۔zkhd_!͚*D~A*ׅ޹A#,5 قC*^SQ>A+n A29*z ]TEilPؠ{|AjA ZpęA[f(mxL 4#)4h'H 6vF6hd)-xiMN%nLPTTTD}G,j$k!:FQQ%UTH:k|1;${ R礁aOxI*T\8>b^d@f>J'L3#lpӥu2?W?gSIf|L6))[20KNNƬDd)gTڇ{g0=[2!AJIk 7><\'Uv6zz* ,C+5GSSVbКŌY Fk *Y)<' K ;7aUZjx8dz:=Yo HA&Aj%& oƒJTGlSbKZXo|ֻ~ Fjލ&LHtKyd5m7ާ w7މ/"56Z3ˀݩѣ悱ֽۋ{N^4>[}x dc%_]4PU6VhI6 { tHIGpƂ%pm.Meݲx]~=n/ XvyY^3!d>~u `EE)\o1jw]!WDd?Lk[B/o%cZs \Ӻ l?.dQkԮ"[,j6Gx/QTXT#oϺQ6DTVmJaZngqUy/g͞k@q6;/ ++AAc&k 犊Aג\om\fPTMA#jS"o[W`Uo1aƕgpAeo ÝQNS>|؎m?폗CtsG0cP~8(sJ1sm#Y_!^/IEyX,JLJz}$ʤDC$/2p*o]U_U)Ls{JԮjcfKT-4}~Ľ[Deld@HЮ䙤H&,=Gz{HʸYћdrV <+泂,% E򟌉:!R -E3;_# '%TQFl[ݬj)̞H1FWk=#2O,:q=̌ޤ3;3hfӱbmeOQDT$3t>Kp"T;ufPK5NKC չUqjB`G# o{ |X\+IrTxjB2Hr1Zo j{`t9<51;}.llj0ٞljfx*LmM qz{?`=ΘR & D+*$L5HU5|5\gypί/[U;1jKD.xEE:fBv d<513zX69β|wIrqI!n&7M#wOTkb~Pf-hz,FVhzeugՃ#5+;XrTƻZ .mYLRM0*lS c#J(_HAtGFr:W;o4m{S.|AʇovKs7N5$UG}H"ŌJdckHU_9 .6c![tdteQ%ȹ|P"\[%l/[Y}\;q4 ȞSUw˗,;RmXǁ20L$&X QR8i:R782ZRɒ:+t*I"`P){g m,r2d1, ]f '{ ~ y9y'/=͗=PL-IsM Q"gl㠔8tVd&L'}$ZA0$g! db  Bۍq:dyjTV3=i:pɋd!,ٛHV:gv"lڝB{P5fӔ*(5$(Ygs!f9@ȑlެѣm#Uv2 A!y%!j jms.ϦR҆.mѩ !X EAԶU\;~sqEξ 0i:CK`pZ_|g.O$E/@ B":gc. |$L[7(9):1vx kNT*hTHVz- Ӱ eɢ&=-71Ӧf=Q6Xj:>Z;Y\࿋Q1ا I?!ȃ0O=o^5TKӷ5KyK}6`DAi+v(@I3r #k- |5P4ȶQK$eE,mlN$ kiAk{@V8cQȨmQkAXfukSt2$MIj VSo_/`E;LSJ0uŝR9ieObq-X < u=ς6&9C8@|[xW;X_A#)X@ˡv>-ȣZET|IXAK6[i+)sT@10#K9"94Ȟ@UZsy|_~E/e;׆ADD@-,cq!d_)VG4fw  ۄ1Z]HWLحŢk=L!BME78Q u5nbXrҀfT8gO{ aphW'WE)Ib8g`X*XEi% @xύ5>Iw̮k R*%fb|:(K X ,9IMVS6(HV _4JVQUAm ʱHgCzqZ&x>3?}X76gR~ɗg2~MMb`#;:cAl@tr2䅙/LNy84srG׮\}R% ؔZL( Q\mwW,(Y^B1o]AL B"EC&./ǚ08 }AiXf O6mv<Z J+BT2T[.z.I-u9m6Ewz^6;!~-m#ԉh_?YiIUzffû}6焥>k9;o=g$w܏4^4NuxLgc)NtGqtYƗl!7֓xqu^*ɭyC$PXezV3}q{:ojb:}}ĩp;ڪ : ƫ.gE=fj a{G0T|qE||{9y?xX6Pi(JUGM7 _GϮ}o_!+.>C?.u #?Og Ga4Żmr]Ko\n+Y% EY%ْ<׿>,XZӶ0,wRM~$47y"M}`|I/#WN%&Y>>1%@'^XaϵaƦ\d>{)F\jE)U=)W8icZۥ|gy6m\F ~()ᳲ {e|&0F4 1 OY~~}y{QG9Hn} BDKn.y HrRކ*лEηP+J&!P}Gx}g@)*!@{OUCX0{&׉|@̈́]ܤ!XL& 3ܢP^ahD[-ڦzW|Ce̥ 7: kƈj.^v 0ꅁ??U4 K?.2r8]RaO5C﹮y1x\dfG5; \,UJ1EW/q#au]["WL6[NW0}]4QO?i@ymKeS_I_kS֔*\Gվٷ0;zR  hȉV1yIfe½={((jZ*^I ػ0vckkRI#ڡ-~K\{>NbgԟG nܑNTu-A>RNzjJ =;X} upPM7JK ш&![AcM@wCذG@ 3è*TAM]h"ҎӁh{w-@]tMː&GʹDeA}eG V 5RJ-9WboHJmUeqxFvJEoot܏n Tv]ox Ssиr'HS-Qco2[BZvK6h6z^̑`؀RvhS壭N{42^mcru7!FD C^aV-pb]H%2[ 4(ܴm&֊of9.rweVm1wө6u(zob?CͲ\Jggm}4$ޢJg# !`wY2D.sc^ģA*z9c_b3(ay$\\=;胃xcGsrD4\/#_e ^B彄Xqk9)Vu1р:Iؐ4|c)M[O]^߼"wd#g*{ 8=O58}.'JTdwQwVOe PF%˝B+P(b9*%~BW#Rj޻qqϰuOr[ҢDt6&(39pe4<6mgp07wdGI a bX8v3W+ .mJW]A! ɫM˸{Th7ͬ1m}6ƈ"s{=D.nbB%ZjDa6v~@CW=y+eDc@s'[t9Au.34378?9l1~oDz1T V"M:wJ0x4Em86I.t6R(XB >Bh]lTq"qqQ@ l -"T KUrU:ɏlMXa(w[ %kK)=mIQTvۨ!C&%?!ƺ!!raK+e>ongHQ}y%dSwr v%1qR ЉfY0cShzlmaiTsþAWLònpeVkskCy#IUDr0踺y_%U]1{H]-u,f U!]Ƹ1xyңEe}`YrXY ; BCt9R r5)&c*Y"29B*\penÏ*o\ؼzXQ7mN4f/|$x0G$ඌpY[vփqСLsCs$&D,cؐOzvAr=A!חgd]?Qp8*G"|q#Ю|v !װ_2n H bD\óxuIj#R#xoU['|4O!C*v\Wwn{KזkX(cj}pxvEJϽ#7GۣQe1lv/A~ap}0t*PoyQL1=rFհxnz $I`)n0qQ1t9=?-r K̮[!df5f;>A4$,"άx!mɸG@oC.3?iF|_xkr~zGo^^|; ߮_^\1U{'r_9_&S~NQ?޿|q9'K}}{{6%KH9i$_E6P$ -_F~̫ʽ BFt`vX+aH swI9([Cʔ ӛ/Oj$0I jvfk>U薷cq0"\goU8N{>{ #7S Ss)ev"T1ZCx.ΤL9PJGJ[#d Ѷ|d7:0'GLE^IF $k0ȖTls'H"zwD^֌XCK xŭF\Vq-b;lN b5x.]:f}Dw-[r8_z5L GoiQRK"$+2L7W.$A7u;} >cA~79pNDp6xũዳ c 7MDO&^_$Bc 4DOY4:wYz1-s58FB7\(m";J;m-N=P-<7RMtZm~+D+8*b-`1H =*g8NFeXG2 0 5|4|M\\J)tiϵVT=u | W5&I\]GA _C,XM- U_2|'_ECn#~-_$u6\ @6ӹ+L$5חe7KELT EGe`'Q^W\Ket^D)iy/I " ڤ&0š._+ΚaaWK^nw6+Y惈n WhzO=GVћI<ĩG;UG-z4$\ݸT)QRP&4h# $׉=h6g(ӷVyj~[jjO7j%/>{lug{b,U,IAٺAXd؎DY[r3=V=/??OYod^̢å/bTGtԛ]CPc<[Hj:U ygM>sa.].BۨBˤ tSu$Pr ~}CU1.׈8;L@sk0tƮ$1g>łY1-iPXV{6 Qpf4^J[᫆2E!‡~I [ݝj%_ >ۓd Y!UoBrή&rgw r=ȐA\zru~vq ; D8[,sD[YvZWdw-"~tG[z έm\OA2!x+-~ḄPl~zȼp5,<o}¬Ĝ=jKo俷]o3Iyx%eRw!{gpr,s6yo2Xo9' ?>8F); 14tçBpO짷3 ahlkm3HhuNSCd@ Ame A.Sɪ;TbBPfmݞ~y>z >+Gۃl4mRZ+;Z>UREhU-[>SF4t~X^$\տպoBxz2ii X5 |O|?*qN Ź6y;qr}po6HF|}P3Jt{w>ɛ{sM`!߲#tSSJնtZZ\;\$F*:H):lsijD^z.b%rc$ KCNCX uWd-} no 0_BLhC z!>ɽ7%Jٹ͛ܬlqi5zҹ Qs etURᦷnEIpG;>Zr\2=xοO] iQ?ènr*N[[XeOyl7EB+ԝ)ۗ|˷E9sT!<[jI)>33ar2V7c|6Yfwtbߛv.ZZ1-ZP By0Be%I܆PWF\,|u iCd|ź2]Ŭ lΚndaJ{DS}1ɕE8ro|l\殸\J$V2}mrC"";F=+|+ʳVLWv2z!%려عȧSƝf&ۼ]@"ӻnuu 9]*A|ǭEzզmh:ȮUfNPv|=/XcD3b$53.VJ[K&vH(:% &Ы9i:Uqu4_DZk)@ee*U1Y̩lww_{KChVLբUܜI2^:|p㪹7NUkw&hEEMTiҿqrqu4T*\4|4ħ9ٮ'U\ T3[;rzO>o絫!!\s(ug73sY v>꿎VcGbhVߖsȺ{?cv9ow2ob}[_4ٯ89fG|^DsP[xz[-*ޝ6fPݐ(_I`^OyVDr=ѻ|&>Rg{ m]2_ڃܯ&IzK~&Q'ަN}rm>}{}txȠO6 ޓw-,TYu=^KٻT.r j# wP% lmt*gQ2mOα? +YU/OO|û[UW; A+Qo9K*d}\Ƽ`h>~2enkIAsc?R@It1g&\t7,5  |6gQYwvO2tR/4_'H=*0tƮd5;?_,`o?9x. aU j شǤ&[H"yl]YBۨBtAND(Tn@n~y^] jq` ۥDM<'%eQ],rϢ1PS|WՊ(wXK(vsces_|I8z AVaR*rdgW?9s]45mq\ZG*YTv3Ԋ'ِSIy0ogoP /:,DYbBc7'Ec4qJej \?mFϽ\?_BoD/LDη7I(Ea$oObDzxެ4{9|T6ۍ!̾f" Ȳoeohqv_aFǛsTmÎR] Ñl1"徘 f#2h_=[_/)=yU _=ݦ?JQr -?'$0+mn5 ]$% e6gPpu$is#媥< =@O?}֭^S$ #5xF\$؃ DIN8[ӡ$Vx߳FwF5`Ldcp},ŲLԞ-n2t#)%^3SQtyoNT2R8hvί1Ah-AX"p+clq7IY昣W*#+~LH(_Ζ\;T]G)!}jQږ.QڃxӀt Ta͊qx(rɥfc*J^CڶC= }ԃNq̉XO#Sy݆9g?4Z$mfpå\)&SM(oGhPn*eH\C^)L"o{QY؜5}˯JwI\yDqz牨xC|9&ڈrlnRl\tx5HT5bzOe ѹRfN !ZTig"0ۊ*`nт~ l QOb|FO"qjmaDn!cvt]ō N[ޤ0k T=u Z*3i`I":7у) \[٩4 D%Ga|T+8ށ#Z)@e*ʍ1(l'e5ZƨEʛdT^}vxg@&g~ьT%7D"WJ^nw_Z>lÖ}4Mqk܌Wo@ruǨ'xU69U]gS #wwCCAYnȝ4_XuUUsR[d4&bmOX79Cwtuv[K➻J`Bp"IQ}p<  {_+rI&~p2t>x.p3ˢlnt |Ԁh؞$Kbz(!iu%h8Ċ2f%.Sf4d DK\4 |U,Kģe99Tx;Q$Vڇ\6Y?T kcCݦ0PD;Χ5O…{Au1PSW"!wXK(Dž&/W諦 IK*MDULPkЙ9(E"7EA0O[_/Gd=+m0˻+pZlיdxۤ"d+U+mxOoTBa &L)vf:U'o=U{LXy,$.õg,'r`o";a/Rsڠ0t-J B%)#6]KcݢXz;zx#;b c+|{%hV؎J?kr\6ukoӰUcIg)\_J_=Om_ʅz,'gt=)uף2 nۿiuv=,(͍'"WS.\ɭ qx磶?^=~o_/ks2q[MKSB?Ӌ}g׋aYy~ޟ5֪wy ܷv ڨA5h1h3~g8}\_~'X]jR=p8]z@ˤfO . DB C [b$I<.M5_J2_(ڧ،Ck@h"V)T,rZrA.2_MxAAL8>u9H&|>Y$ Ok?:Gc]&`э sx=:M8"}A.rWQ1Nr1Eyn=-@Ƙ}4.ˮ8W؊O!zIz}i}.909C۩'}>}ӱMklƎsarjH TH:6Sm9#G#"myCI[hIQkkEk3ԷyIOEEd2Vxp3jAУ/1O+fM{+=NhuR8#d1s^SvV.[t_OZx@+9džmeB/جfGsAm2$H|ҕ;?FV?FJϫ sW# "o0$fsץWqLw$SॲV8HrbZMH!Ekإ]9vJ[iRr%[/v((LfyN gB։m ֢H ԃěZ tRwIhǫ ps\KYi ,vd4aEW^6IphT}&(uiHv`;`{ (YRXp.I)ItW F)t!k@)HNjT!aek T~hs= }f'Dj`%o'`h}KSg!%!xRb =]8BSr^ El}ZuL)etGYT7MaS~y*)M+pӗ=lPg?b q\{iϾ=R.x^~̿)_UnR~?zp,o~/goqCKgr)^(B|9;;fGN)y~mXu`";P`&@yxFsn.ٽWy{\˄Y%%~E8y}\fw1娕޾Ձu^&}:ȩc2 XYr1I{Fwk~r917hxCkÒMZZEvN 77qIٜs9^7c-[1.vq%t$;s.!D[Q]N 禡GlC;;*bZIV΄Z9PES 袳s}?t= ޑk&I8fk>@%@͉dKي[ymۏָ́]fGl:ycVa۽poinCk蟠._. l!%B*Go.7k ۆ)iWkuJ=fd )sXjF\5@J 5(T. b cgnCᝋ2;t ,fC=(;knFyS;'+Plm!J`mCm$,Fw]85gw:l$?6G &|! wU1}*ٝ<8X9d5.9"o72=#Ph3PW`u[OU.]qcvElnd3u,uLa.V**UOPǫbTɫ.~ q#&jF@@RBiv^$V: h G hoϜA(ƊEEOtf#oֽ0○/]imk/1j4?hiǾj_b^ZGJZ7cgS>l/c>986};X.| GW['~࣋;5?{4 틏@|j0J]3QA@/2grz?wت Og!Z޻tSrxQ_O߼˪'AH/^v´%mź#nIClS~X']{m0%OmITe 6DOyNE6{YۘhՀ:ݸ>݈wQ[iצ+_J~aa/]t~Of>1_5u3nAڳ; 'MHm "/d:!4 ]ܾqxuy^jjHJ{5](/3Ʃ4rf8s4 Jok\\=:(f\t!["q:_2v%:8:t|I!>mj]דвo.`eB/U7X $=ip5dZPCFdn'좂RjQwi 6L]#nӠ_&2 14*$tb2V&5h:/㎰]vְd{'~SWd~6`}>8h }禷no=d;oc G T<.{,K;ʼk׾e! L Di ]T~v\8W8a3Ax֜ħW;|ͦX[dNux|^o1dE$``llW)hERi׏fſ-OXc+Lv(dJ՚BގFsCT;F*='쁃cV7x-#PD%;(NBkXߎKpl]sـ fBՀTf)Lsi;wBOkrRXp.I)!.*1bW!#R5brKzܾmov7M}xԅ7PjMov;y3 _n­&M^ٷ7wϾa@wnI%a@{}ij߳om?ͿG=d j@vCؖR'$^4+ћiy"VO#Z6U'>`g-ջys\ DŽՏq&q.xzz@>[ |F݀3zjv <Lsx#b߶I/1qٟXQlΏPN؝{ Z5.OvQĻWASw28غVCOT9s2HMbG?f{cFQW>`EݚDŽigt:v>p*I*.]ԜXxO)~NȆֻȆMɳ"r"pERc5X2pkIxY6pI7}hGj\x|xAYlh+8L]f T$yʇKtU]<䶇9<¯oXbծ楑] *^1$kMH.z%PiJ8TMUhr7[9ߕY\f*F{'B$^$+ʌ@˒Nf7QQh!9#@pZHdUNUvB:(t19W\lj%}];Hc{|4֕B\MwRuNzOƍ] ixsI :K&ƽ](ƊEնuM.ՁtODRPIf][O\I+a-bV۽h[jiFZІr6u:EUojZ( \qߗ!e+\uC%Ol ?%M=B)BI 7jF-ZWZ^o tӾ$;cоosI;o`-kZ:-}Ir++6o 8GhNx+-rT7mMCDfS|KTO=b>Ӏ`)$U?mz#{̛;ɔ۔T+~@Z:s@5Wl2f#C1o@CiTy"3CD*$so< ;6@)d6R>}HyEԵ*Ψ~khi|g8C?Tn%(p4P",<{OY>Ɍ*Iw~[-wUkQJ! $ZvxdΙ"߯Lgl8 "=nQ'٧v[cNfn)ʼ/Ip5 8%ݦّr6iqdè`Q+)lҭ]Aa9]>ʋ(e EA2ޔ>XUbF&bw5le[XNXkaMt׆Υ6)JS zVZΰT_J(sWCEE)uV L9hdI>hQ7JK@R]u,в>WSyN}ެӌ0nDٓ<Vr>u|>8n-smG֙L/ph~qfqMib9qgb|vv @̷57Jk[WGq$d0A:-f$Ͼ 4_~& |zO|V$; 싼K8g0!$լo{.9Ʉ579>-hwS\0;{L!9Ř 7z :/5MW%40h72TA8SOFPJǹwkg61nClKX|ɞe&=6VGSX,> orMaMiAaȗw`4pV(C((wc.~$^ņ*:6l 8]UOO|^*܄I 1|~fԽ6m7ܞn3Ku43@!}e)\qhUrSS1޻PZd4:UDa@ZgŦb &uKZ>AՒu~vtu[LV湤VN!EУ,&"Mȭn) i$RնIx19%ys}DI+6ggQIΩ>&R^deMHV{"X!55xgONܤkE6WvކnGQ)zrk#GV VR*۶܆Wv9r̐lXf%Ea 7)[ښ0հX,YC,6ҁ%˴atMr81(O=L۶u4>}r}ܚ-g{dBe*Zqixjܚ[NGާ KmoIy2a" pY裐+~@iJvMrvar[ m'W&7)!Ҩ>"622) iHvH#)0*8b 8efi`S_*ifZؖMph"-pyj>/O+mi,)Ώ|ljvM18Mkjnj$u7{-"{O:M6KZ~bSb-+LEIqB0h ^4NZ|cbT΅JA<'m5-6~y7l^X*88WR""Ei$OnkB3RD5 {CY7IXԤ L gW{`֣טL\noZ`Ź \/Wm2c )8}'Pݫi[LWò6,85*$/ $%tRtGMJj3Η!.%E)ʃʪ&=[K7ie.b w208R}8( *~OѹIZpFSf%fZtԥxWaj:)VP֬ n5( $tTɨTY^u@c5+J4iK6+^A<2.)\u`yF"֘Qw =v:rc\9b%qYp)ДrBV0e4iߟK7Z:?6J|kRz80~]bS軼?*/> Vs`Cc F Vg&dzŽT;o'}dRݲOZVNM)ǧ˘4!-3X[f>U.cxǪ/P?_ٙrI`obFfuq:PX4w{]tJv`:&=7czv'ly"XuF( 8FgLyd!d r+lQAfZQ&u>-aruΞgD,7{L9t]e1'5sD>4ǿuz` !Rx'ITtEl6CKPgr.; *1iSr}̚ R\jfA8[΂~ِݟ#k)}' 6e`Ej:M R%}J#P}'z=8Eds*CPJ t?kT D*4K"zFQ)vJ-)l \JbFҬt2ي +r,ut2Cx\'^&CNO=cslt[E@92c]B\焣{ɰ%RyC:[$EQ.0H1"8w/w h/^X'uݯ'4d;NQNX*TFgdda;j-ݗh;5dPMt6h62Ud {˵xt/)>)AtX07g+KGێ&/Z6d4 .VY1sj4; &Ĭ˦pj8:4ijөƚ"]jY d3{kfIiGȪ U v%)Z3J KNΑs<5lSG[P Z5xxmDZs6C"Ǭ1dlB0TfHVqc\] 9Sm(T)ijY`ŨkTr6[,@&V\ZgP#ٳt?5yo?|;onGT^wZ+޴7}o̖޵Ǐ|7~TƧot6x_kU>܊E7U4H_ E*81V_͏~j-ሏ`Q oai*`h@@ 5%Qk[QN( Ͽ4׷# rގ#<ͣ"?,_bԍ~Ŭ}x}wphJ:1SI(q΀7rmWa7e)GyƳhSJܚ3gzhP[D c+Գ;!^ !~j[x"=;޽;朰CiRcu?L"ʑ4\| pɰ[OTzHn3I1 yQcx@Hapelt 3ݺ|mQ@9}1%Bܼf#x5zK9BgxKـasA}}7G~@yw: fwO1۱~-SF͏}Am:(msx%ȳY2 hw ˫xֿxj}Ga1WFû{ w‡fePSLn79)UpsϕR y+>&TWA>ѲOW~ @3Y/ufzHVyDJeD)a6hDPBl̅Lp[h (7&XZ>TazvgKe@!u6"%Lm4svl?`MVW `G"">q)qOn~JkaHFW FDې| _nMc1Mx\9b[&{oDel4/!5Yt~nn6bumT JQW۫ߚF6FXkdXc'YyWueᵱZ"j:XiAv{pfPDg%E HZJ*&Yuܔ$:]rSc+U>}]EHHQ=d)j(gI/CWlKneYQ(;ޏAH:J_)ӯ6'7YC(9'חgb|vJN:"[sh]iⴒ/w, T2dv1P1ڽYW>%|8dJls%yHma b$>$0xƞ) B Ń;.Wx cs= l/fuoF)/b|L".aL뮅tgL`-{H,H ~l'= X/CJ(@~bI{y'bN,Cm.pW2wdYOxLg~yny:BxIÐdg0SXAӌ=WTJxm8F 2v edkc CvљhLjXe<"( uA*lTdR9w=C)TK9SߐͻI|lCƭ )T,kq\SIc7S@jxYqᇴ,O5ym6n"E0J AئJ+qÄfӡrik ,YeS%WKIdJ:}V"Kr.*yq1oø|hjR`k$ ThྙV bbao ܰo)A.Eh՛S!VwqR(-lS\5{됶m P<3WCT!$UzyrN,/?xm~wt{CO߶qɓK_h10ʣpw%#s(dՑY_;]~^X7Ai97_ߛDd*uF> Q}IX2 Q:͔V&B7]Zzi(eHT"tެeGNu~Q.Ugzmvxnogtm]VY68WjHKhVY]s: 碊ߎ-o~xoy8(|@H,D346?y괭.iRE*r`,wV|M0A+SHhّOCZAXjW~#zç>hN{' m7ss|vxj.mDQzx$[]:`3nM`:$: 1 x{:`)ƒs̞cvg fSDijxu4cs9,{^a᳄Yu"jq-6G`KDZJ72O}|)G~Kݫ0:Y"8w~`v%IA9w82%b,T0;KV RpL,Y6,Aѝidc#ݽ7w8?}%Aϙ ѧ fvGtpwS %XZl_5Chd)֙9y~̝CjjrI>TGluzKUyC/vމL QO񱢯W0szYfyBxʱȆ%9r>EKNegL {mbB6"Z#|Ў~S!72cdDILQ]lr"Tgʖ:n^ ʃj:%T$'O%[giHь|A,ha7(_ݸEJ6 q @s\22]&; 6_};WŤGgvrEMKb-ZȇAx2;&c WuC3]{,Eil ;OCl`;I]9ֱb(rb u/&tܦv}sWpͱh ށэ)4,,Vmo!9˜8|d۫VI2GzڭUd.2P?mulN"m])פۍd㪜yŨ[yWj(Oe@kMt VX\ BH4r#GTIrHi{b r[B2r +#j4HvVErR+=4KB` fʋ4}Jެh*j3bC=)5QWpAΞ?s)+;vd$lkSV =Z4 Nltl9Œ5L~dg>ut/!mBPJ09} *(Үރ~ {atڨY EC.Ӯ(QmdZs#·$:IإӘduj4.>ť]'tGt&C!]i}r>,D>K Qىg _Ag=?}5G~WIpzGz w=|B^,O%>xQiόx`˹qCMѰJDnIɃ%mN5X;HJe[bOqѮ!$ZXxr%OŭoM-|k`IoJթhT {hrL5DMF->xvm[-(S,` r8{.˴mvhӽ#XM*㯢JN=&j_ 2Kvah4VЎT*&~_To'O$,z5|}${~X ZW `?K,q;w)*Nn% wH.R]2U_`vUGe, !*7rwҵ^9X ' cNї'!5jPgiև+7h9;N c)#=AZlcÏRW*nn+Y_:U,eЪڣbtyV'rksU%^E3v00W~vzpw9:%{sZo$"G L@^/!1Y9d/ofWbDGs(:-b$Cu^FM ÍωsńV&7NكI+.Jח|mʉ6贇w,vKi r"KڶsH lx]C,1,;[e~ﵕrUeGˁ~%6w:ar`'`s-X[+(\ta1 1t]%Â٩-}֞\s0Fm@m4kB.ɞp/9vL$;Fʙnj0 Wi8(DOWE;j*HͧΓ/_1a $5Z|By̤ AE(quMUa2=.@t`Κtb:*6<"]WAU+Gll4 ,F[zj :};+Vd.2_=nulXZ kIsqX}afmS Y2pB=]PHyUFG.ː0WGm;t1uz(þADSv`Nqr|g?*{.( GbB$EF:%e_1||(o'$78ÌAmMs{Mykhsd&ly?ω 2U$]`2U>ZhP@$1s?E.^#Ռ3\12-L":'vX5rBYwXeV$J4g$e 4z-Jض*UpVl`-uM"]sY>'b3V_ }۠jTE:\dK6Wew.v j$ZXxr%˔YРqܞelLkI-5?ƴ7Z}ꝼ2[ )؊a^[km+;6f*H> 15gV[Ziu9{͑ v!T*&"N^[8_\A[ {hrL5d́ gSu0&8;*r|Rv%?v8Nl9[$`:WhMl,$rz°.w;GcJqhi6YH;6vmگh<8m9Lm g*̱I.~E s(ʅgtp=A JMgӕ} D5V1'.h`5.4kh[kl;VJ (SKׂ|QZzmGgym+?;\, $ {[O|x~͝nEo^ųD@{L{S~5ʎ1ҏ[7%yw2.=HSI0ԔpWQIa? l2 UFi5D XҴ|:%OuPT=MY/$7H=#Ÿ4wߞ+t~sJ}y )&\ė`i!6{yLɍT$r)n*z]TΉnzZ=C;OހৌumC}BH Ċʘ@#VϙR&r\[A/? (u߿2Nּyx##¯CEGxq~Jn5C|ocC,.`))dtk3WA7 3XEAH=6#  :c>d*S=E$Bp% F@MO])Em7*CGs mPoMhTiX+vqiww.onN8 4&UT I.J\{"}z\ 9}ӂ"K=*93Dʪ`5>}.'3}*#˨-!Ѹ#j Q"xӪٟ ˜%_ܓdZ/tY_}nh7d t2IVb#櫇k/(2y8Ϲ6M:VlNWkŒ5*\u6ԊDv\>`VӘƏ.gԜ-oE5qn4,T x.CmBɾ'Z>-"\|R $p=/U8󺬄q&cPIW9Yn>Uk7Z'S>I,`(@%@͉'L2$A1aÿw/P9~;3tU"Hj dL &I)`_9*iHO7n 0ZIPwD6BY:9>xHT]/K^C[y$VRαdvWegμ< r` mZ&7L%ᩌNG4]JRVF aaH@CFoɊ/9;}(]fH(YrB͇tə:rHUqqG4gCv: J:N9lnVi0!r8+ 6[Fԩd,ϼ.C#?;ZlmQ(։ޏ:p|s5$o €zcErrw~N?0~=<~߯?68N6L.lNż0Ŭ( _Ǐd@NϽS@GiX!pNa6#}h#XC)Q;?^jzs'OeN(|]pDށ~Z\<9^>R>wFc{.c{ (X2hC$!cΖ!E2B>yuĹSC" yH"yQl/}G fӔ>b2> vPp4 N7y۝>G ]8$QP q>9IPu@05 'ւk$SQHC^o|!n>Շ`%'95T s`3eDRܽ@5wܲQFYAhb*bhV(vͬ35NC`_!xN >Ҷe5o%"75ԬEe%m<$ݝix#Fk!ūYW^J.wY݂]$9Es[A؈d@W)R*`ÙY+V`M[9tCXOhlD7R4$a|a7#\qvcKZZnҲJ!K^5&: kYZQ: ){dֱ!y XOak@"#V* |'BS&pfaH{^` cE` Jhbv4m+f1]fDhCCU6+Ġ+d&n6u6qwlS/$Y2dԪrQVbf5Zڵ۬D(|P *hc׻wDT~ԳQY!*!D.;PDTV|v ֞}xVwr}::$3/ZX}C2h!nNA#@ @HDΟB.31޻;#Zċ6ݙ+m`!TeCvڗSnzw ~98-ޒV JB݄;E.onβP vP\m,!؊ buPn+S՛VfE䆀D 0y)S%S Ysq:\O dx9)Yܣl)g}L]e=bҾ_(I@d2HtΆ9ElthTf D#,KK&)J/$g.dWITbP|-9šwm6oCL#>r\:E.u$gg]`yHE>A"u^tyw,y熂Y%']^ -3NdsKKZDTKTdTU v@y4S}_nnR[_]:_ ]es%~ir~_WyƟ)4z}_CZB낸 DG6ϟ=q#^tH9L}"v ,h'#AdhvTP&M LP=F\n,)0'RtՉ8r{%V趡 a[H@NMq ޫ5\P]Pcjh ٹ <g||mG]J) xNO`*>]b9ȅ'9r.i2OEx7Iސ@ڜ~d+ԌO˄R]xeH:>.a ky>vB.ku~!5OuēJ1S+ğn\z2xb]V$DjS$fҰTO8.+vstʺT;W6Ni' efvcwBJ?)ajz)d w=`{l]Hf礈*=%.z1%p0|4_"_Q"'fQ1@. SuvL䦮OGNLʘ}, @"okj LyI;$! +i]Y8I.-+9y'LޓF:ſ$g `ޚ]?1i0wrA>b49޻ Zu/nd_9\mDYCgfˆ֋{Vi0?ԋ!a uNQ l9Gpᛄb9 ih9I[~}D&ջu&i!I(`lDg A`2Gi:Mw-bi]^] 3(\/߸5(8{_IZѴ /LXGT5{ pCMbo"އrKQ T=`Ha)BJ%JZȡY]Of Y$UZ112 ELYTǴsgϻ$/-b+g1Xi4෭V!%F(I6}W}*]ق %5f)l4ah[WVf L vJƫ!}*9([,SʣIp`UG UsKpSd+AIa& H&8 !H apnN=l&dU36 dXɘ+JEIs2dSB%ίu>NbrQYCB91<;鶯",NA&0Զ/RsSΏljqXK\F@uv6`ZDz$rBlVsbn T)T>^&}nDe=DT^=;ggnP<>yAB@ 1 I@x@]l/KۼOϐֻ;n<f>m)d}[;.!Q163"V8Rӕڦyë?^g Me5$,KZ9$xdu}y~A=%wm&Ƌwc»9};7 =O/ĶB%!-PKWX6g}8x{4zt(iB?9.U7an6Td=P @c:@/Ah'#AUIF$hd;"vxʟ瘯/y~;y6R[7Ͽ Wg|^ֿOAyun/mޟ_ꫩy_|qrc6Ϫ}EW@A#yˡx~,~2'WGըj0hmL>YϾ|d=;ss,YMέ.['fl mtف] ͭ~{vx;̊6%yjLޟ2#v`OK`޳uLȍ,N|A" m>պ3:9.["#:w-qvE|)J ѧ;?Q$Փ˪jw`H𜗩3؃ڿ"kTcNڽ@=ˢ2wTz#sibm)6/UYUBѣq(8!9pg s]bOzT|=]6Uc4ޙ>Dfл#,m#6=b(8Ş,s³]a^]le%+ete=> @C|7 )1<;M@3<\SUpxV)Vs/3ͺG). M]"G 'y#yIܼ?M3+XO W{(`\opϬVrM*f="Q0޻펖J-KJ# =[D܂#d=!2߀ZG?izsP>m2lQ!ܮ<̜o7|+ A*r(P-`\0P#z$$[6fPQ*@4Vh TZ+ՎyydmDscӱzpWgȖ)(4u3oAхYHIn2%)@RN$X?;_=\sw`I~=AjTx3=v>zZ Y_";aM~/16JM)f5nzG6Y1S@ zsvrw YdC0s#g'/ IjbF6IC,y.|Cv_I+}ĄDfߜ/Z | ;Umk&y+ wSyxDs<$phCP2CԹQA(r'\1]lےujkm:,'R"J\q%b{i,N+cmCU [ߠF*ǜXRqVMf/[t^;ctZ`4rjF\!MsP5N]C*$EP!d^+s/\\犐G[nΚXШބc>@F+Dǩ}h i=]rf=7u"a}qr.OB@|h!;uzP'R'Q\TqqXHA6\&3H=U) `PBSXL)JeOMc/+9mA~v&%b,:FE런gdT~wFecT!F88ߝ?e(pxs#f9͉)~vbD*܂`YQ\{W,ۭ4:_}rd!~KÐuӂ;-g@aEAq{VoWd;v)᎘Ȟy\t{$i ̤qibz]Ʃrj*OgH &a ,.cPJBߒ~\xnPN{t7tZЮ*V=+Y,w?rP<\GlDCG&%v0LV+EZkk*Iܘh6vv,XfrpRBPqp{n$ y!ONMI! >5֨HK˳Onu&\񼞋z dr!J v)%,:Cq"5;Էڀ^{qnX@9.#WL 4"s] _Pزyhyފ,B|R-ec@ĺU=v)K:vUu5:HFji|,,E`*g4.9J-5-P|{VAjD%rlTs!{bT,9J1,9Xkp;8,=7c:!6$FdERiaȝNOzŝ\wwexHOFXAiy}B`wC!zO`'r~w, ze]˞`… /8~DIO%2u.+3 9p6k&EHv"|v-/Z>/=ޮ8vf˔7 `~R]\]3T `gbޏĦUS-R N@HN.= M)>Lq-UHx YC,KQt5]1„hgH=?%Vd}iy5_^ :涕+R gqr/ n}ZsSbUJCfN.l$A[-Ij}֐r%1^hwNxޮ\"7Wc֐;Y飩%wPzP@x*H΂3!ܢ,IоLNTk 6&gy%_8G?i%|I3ZR/XSGl|mIsF|*Ҕ(@%az\˵C^f?'pa4s3s'm0+9W9>Dsb^T!np}+`b(X %(XjpfYUzhajwn9mYcCrϚĦ,'V{^/c@X8xzx7an |F\rLh%Y4<)%}߄!dWZPkr1II/+i8X̖cUi9KZ(vs`oz_vIڂp)&4)ouiҢ1oG_)XG)D jR6?eT@ x'ro\cu?\j"$ ٞt"G~3TޅO?=u?Ve]f 2gXW2Nsj~quȗȥ7ytL}9co:93\/'!/̥*uƞ5\1r,#s:[]yxR V"}E0v*hיdOY!]q4Iy4 &9X,r -cޓO*C&,SXFTýƞeknLھf$x`p㉑\F\顎YF Lk ۻ2ȔsY X6g%g 3{3'.|铴3ewf(ؼ}NLhNb}).I#pޏzya:Sιp܏\ޘ0-.Nqu7K\RtP[nugM,hГ'mKO+>L$k39ӺL./0|AQ]uvD6C}YnVB 0A\0¸ 8*e'Y+tyӆ͍a庞Zm`ȅ ɺ$uJ~WCg=q(9扷w(~䫥ȿ*!@@ 6#I^UXj\`uƠGoϔxw_T3Mln<9o@YKK:_6hn?bf!/|[kEgˠ[HT^R0f?kˑQnJmlIut^\135yYjT[W!K<Ÿ`c-AK[q\ɍy9Krqԯڍ9. =b*wQ? ?oÆ6ۖB`+1n贑v%J;_ ޥ#2Yoh}b-$< r!xCYi%nr%Dro,i !84ݳ86{|g\eL ŜKPTI˒qț7w&;$^+Bmla n6hqAE.W:oeh++;w7M cee`UdTA@jQ0PUx9aRyz#;uzPcƁ&ģӠu*[#k-((™f2ylx%Kxu@S5uۍ;MwVesQFPZfU14psuOw]_~kƌb 䗙+ح[|Jn)[3 ,S^׎u8T/@S*l= nY-=\f58N#%Ii4V6H<{;5t4GJNk&,?Se(wV; 6Q"D3TS+(u(#Q$ge.4F,8}3reYxmZX⌯SL5|SvsgUPD;(QI9-Q`kPLAɑj9OZւZ#Ʈf{Xʖ>g^l|i^Bj.ԉv qtJ+M_>)sd%H1QGfoR=b {_?<5crg &O D% ETx1#0<.}km_5_2Łw9 "\fW ~}D0^ro7V Ko26lBPpRXF%R65ݹtmr"T\~, 5+i[Sʓ.D N`͚̪Z cMC&SSLV= HLVFiiv0Ml|B% ыr:i{<|h4 gƐfR!%wꔈRt\FZAF^8e/SooOԦ(XCܙ(Aڂյ&#Uh¢yܮTnr)VV4[[_[R蜬o( =pn4 IH :j,k?tE3j;%>jT>ʤ}pʊ(Ȭ8\7w ԾO1'EǫE[!*^jޅlFxվҩdjXactJAfZQ&qW rsfSj($iBѺg}h4*^vhwyJ46Wh1 (ʣpw%BSzE=t ( io8 K%*:nֲX,.=XQ ٵ4agDYc,Mh( F́4 BzϷMA/JSj͇f7xf}0r>Dn$O7Hz+g[[/ZENG޳y w]z1L}W>';f`Ϟ>I ^ss4< {I>X0 E)"*E(Ω1FtKk9m! aDъwF/i>^o߹4BH= gtjs,8AT@Zb<#fU~j?2 rڻƒnȻEDŽ6g1]_b98ixf;}嘀Ysq80nѱ/c)a+Cex0/wr՘J\m|H٢X6$d4arfEg|>6%51-H3ҧiw%soǾ }._ q5ݥ tYxmn|,ҵ<]/LL uD/@ B"Rs$P؜SeO, *氉_qUi?6v}&  K. D)"Rb%L-%Tt(Ƴo\Ɂ2"[Cq-=%Վ uۘ9M@ltt\B ,8r(/ "(ҝ ޠ@/RYEQ=P; SAɑj{ +FkC/v&XـL#R@^} 05t8cZz!&]?FwVXEڦFJ&Kv,ohwl)Y$P%Ó,^Z'IȲG=S&5or]&pLs3}rnN;xg߲z/,l{"C(+dS^&Qs^" HAyP IgH8DشпhA;c"TqU (_|xK N9 /ҟwYS_BcZ,Ygp·77 @/|0ҞGِѓ(oAA ̙~aJuK6 K0>ƃݪ>zus_jIG@I X!` R*%b1>zU-uu-Yrb}sMu?j;RKZia+}nPJ s+|g&&stCTـݠߒN]{unVw sa6z$ĺ\[@f. ,AXNA~<\ӻl!ؐ{ۢ?|٩O r (H ϢJSpˊZ~elIoo7t^{S}77'⦵ԕ}zwo&߳w|x_r߽L?*v<99-q΀H| uěC8jm>w}.粡NuƔ"d0A@SI@L(89CLcEUDQhB/rU=_yJͱS ]};_ x\fBETQJ}OgGn5Pᦈ)JP"cTQZ"TQ 6e#"kwpёczwaҁc.'+:Bw 'w.Hsh39x@st_ϣ~sti綮-e9a1ztS3_'] %>bgIg>n? S |YUjaWY(^X'Iɴ{]sNe tyB-ұ|tw:=yNin^Zi|O}6ا)jG# +vɁ2BLg*Rߩ.Zk*Kv`h?4}7+ ТP :X`-)MiEjXoYZxip\Xwsh Sӈ FR05Ru FDPnbDְ2ޕƑ,0'V.K<|3`\'4ԓj-Mu2]B#L"~Ι؃*Kg‹2RfUɊ\!4z%xo]uVQB"FML£͉LKCeì:ZǞ)P޻ D(,ŒxzY F)fSMFA^EFˎZ쑥Yfdu2=IaW@PB7؇0(o(rR-Yox7k9 J䭫gfhI-O0kQ<[ٕ.Dl !5Ytn97d9:ܬ98) K:1Ԭ%79:r.#胏6}EWERaj)XUM2 ZS`Դ_yEuϕ>*S:\sg k7sy&ggGDX߬6\ޞx}Ha,t{vY{0l>Y^`t`.(_-ˏcf[m3MJaT5"Xn8rtOB8kgᯖ |Cq_]IcOgX_`^ެ,+;s]xE#,Kͫ\rL}C5Ohh@nޱ X;~~8n9qK$G6HfkD  Hװ6W:#Rhnbc"KVj%F0cL @1 O3V(,똩IL HI DX}?:U)[:= t ~9`F,Ol*tN=E:2@[IͲtŒ֘x)Z6w 撤%=7+r"}{1KzI8tlߠa pNK}6;v!@B󻿘8È÷}&"@oZ5QVd095;

MŀΉf(nmJVc+Ѱ]t5 S`Sc"ub2lPS̔="C |-K{ݎiWo  gSgrCPzWMܜB4{qo*KdSL14KsɗFA*]9x*d%TZGYUVPRfRΥv,bdبulKy|B/RGE9} xڝ:Wi.Iv!F/RTVZ@mU3AI 4\eF.T- v">ƣm UзC #ND凞c-Qٲ'q'>;KAsr9]VM.e(; H6`ggc69=Q7;2b >GC̰W+2ep^aC%{9wKZ9bȖ'2If{}Ax[fnMS-YSTHBFJ:I}&9;yNO%JA!M]=xhdf۷{-+ =Fy˜P*oYڃ3Xi/߾>x%Pۼm/ W v0؛yㇵ`g\P@,A: X6mGϾJxGYA%8o5f@eCZxfZ3AV(e)R@1QzkGs%si;d2]ܜ]vD *h{ʵ\Ajo5tne  1p!lgKh-xk:7`9?ٷM8-:jJt xXNmO%7 ~pe=m&2iP-=u{SeQF2~Ky"lAq @jOx^ߊw[Y?=)W:RRlB>OZ:UPFN9-b oĢN;vïU&̥FRaѫxPJw*cz(C*0ܜ\?=fr&kr}Pbsr}y&g;1ɬ dfsb(%--KC  Lvbtt/NC:5 )ℋ5ҽfʹKYVN}gpDA' mݓϵ~iC&q\Zݳ l6}~ 7o"o6K9ޜgaP78n %_z|Qn?=# |CB!i]s9ucYD97[};#[0i?D ԡcg!Z]!G'qc5Kq@{INUI>H|LAi7+u`RhC;Q.a3I1ҭ u3@9YΓ-ɱނSu}3;Mt6t\,%+jxЩ"KQCl,K0u{s#W'spo~Ps/orf'7Kg;FoSnCFf6jjvLH?b֩{G>6CIo^]ylxc U[䩣֢ٚ +۰Xn;FqO{iC& I%aBC}ہhkH@lfb+0! _4kS\Ъ8:c%9=knqNGާ7ּ-V<7,S]46'(G4 g(q}TH")iիS_Ԫ[)-D &ǣT.* BPYʵSѮzj7+tLRO ` O[S-yYXLU u:>)Z3)R3 rk,Ů^ {@StJv|eǯqxdctS+zGT+5IY))SXڂ@ܼ<]c[f*"v̊u"oQʎU^yQ<7hX"Dt>l4 3&zJDT>=Q= QyWDT'>oR[؜-r-*!2WK8ֻ>9jCw 徘3ȯy7sK$^_ol~#7!9a<⌫/Cr Qr$8)u7E#GٮBdm+S6 {Xu?h4TcՇsuR՛9(xe3!jay񳑵Z[eH[.c$KKT,M<&Z^fIl ҭoG#k"оFf:X"S5< )_?~wqv.}t_xw_#{Kܶ7} ojG?O?^߾1TtNKk Y`*&f@5A(f;XvC2f>x[,Kȫ'ԣs{_Ka&@t+X@u ¹(4E9-BA`shT>VYm. rhHv#%/#@wI2RԆNךTMp{͓&yNA\"rն-]r[:ϷC8;]vN;[$޲üDlb b󆽔;\,IhVzAZNġ+&(%Rw8%6gj(R0FM~B~ozgIo \đ])bYG4Zf鼽oSh4kJ%jLL!WNA^zŧZ53-8{,mkqpEр@sys 2.KH TN sU.]=!L|Ӷ66(3~A@ŚNi[Vv@̐m=pN[* &~Cj64/ Wމ8jhѰ͕Sh d-3BF%쐆D LdmhHun^+~RvNHpVqDJ`}j x\5K)wʵ LejX*+2 PAW*T_Pyv铦>),~џhdRlB.T` QT)N(~=0{] yys}*9-%M@꣞)~ Rw؁bwyq$bH{}2͡O!Ao[1]hrO@xt坂еѕGw?9`ٱX_oo:gmډy)uR]J<':ͿW]}j}FL٢!e욠تL%B@XC~^]8Zgg /A=ޤ! F]\#a&yaѼ-LCSq?8¹8q? xb%ZJѵZtšֱ^q\.h-[ښ!\`Ð:}mCnk~z|#빒fQ.Y(,^7)\]e䭧*AǛ%;#0ED2Rc5Xة7b Yɾz<3O@>Ԛ౒U9Utl'Ә<+iWkvJz3Hք1`ЫB̚w;˚eŜ+UVjeO!*=V˾hWI*At 8]XQd'C-t+.?'JCLrщc40B7~_ib <=SCZ5{cUN#Q\6Va?K8 ˈ{$_n/xW0>wXpn7)nh@eJ^IpEhЍv9뭜G=&']vv!:#le*=^?~܌Z>C5pfD)<#9/xKv?s5]9ri{}6g{* Uo>ڱ 5T׆g]CRCy*Ljut(qo@@+;@5?Ƶ] w'7rQo{?ir"ǂӈ۴:ߙ<֠YV;1&J'̻@`+=Ne/mo%j@=4\;sGss\:5.VkAGJ%b2kK=%|{xH7y^rm~#i$EsiNF1mY+ m J %% l:ֹXWz;@e~c>ؔ[œFHt.^쥢Img{$1^˥:ջë>xYGfҶS՝UL"9䱥+g,=O~ކ:iJ:)B@Śɇjt1mO5NXmb>JQb\:BC Rf`͓2ϓKuoJըhmgLKS}b]8to#ϳ>aPde}K|N*825dRbm铳ϡe7eGl啕6z);@6DT>ٝ,r8|:ՑN5cq>^?~A/h(hlc2!-^,_X<<׻@wƩm|\1gLJzxNzw%D=Vk}7 \.Q<1d}#B..;EKWMes)AjgZ{}q8U<_O1\lÏg#qdePW⠪UꪓCЋ[/G@@R\Bڕ2..K6rKՖ\jC\ .8VOkGC8f;0z \mIA=`ZyuNIK(54)#FoPKp8j5:2thA} & Xi]vul)U8ޢRA,qowIvhs/7CVjg{>*݀3zj <4BoD#Ɗۓü$}1foލɷ9A qHFYy\怅#~ | Mm{rEvX后q٭kߝ. 2&W\lj%ܾ 󐝿H{[>Cpr#iĊ\R̵Z咤e6]ԜXxO)9"''* O2k>6]4u6 ]Q]N`6}C51'=S&\γG>;&׷$]K0 4v]J|>tIaD] v)k F {s*ij3lj3$D< 1\mp߼q/Or=isob/iS7> ϝޛp5m/դ䑣yƮvgZjcBY/ӦO{΂ho_ֈ 3T%?9='ALxM@]jG^/NeoE"ehtY_B^-]x.ڦwbsrȵHēž 7?Qv8VqGU ي1R[pef7ZJ 9Glk{a;O)3$\CJgk;Űl"YcH\ ĵti&>;}L?Mњ9]ʖ|J~"-iOx&ֽHyZ j+ mGkT*<2ͱшe:"U3P! NS+pޘ˼sF)΅ЬBUt$ URoE{H.eGy^~Lxa40 CS#.DӭVe5xT,AܦaT|lX)!9V2^B`Cm'SM|91ciۡߕX2*\TbF/^ BfӰ7y31U@ޢ~҂[+bY2qNDcjvsZrTNX'u+Cu2vcPrϳ%/D;zI[b#L8̦! Us2zLqb>]yt]]b'߶B^,Z Ж@jm7cꜥ&!w2c)QJ.N9l6Q(eh_ٛ= {oVކZJ:)B@Śɇjt!m9fuȖr|U ye3Z!jn|x1|$+" \>a`ll[RЊ.N͊.X&'j"Y 1 u$YdenX)YR˨` CYzFҵ,\Z]WWRj` |JFCӭycd0p˦EPHfҶbGKN,AHE;pzݍ)_ -LdFhHB BF4l0v 󇧻BÖeCNSz2brn|\6scww"11df591pw")G2t lMNz8[1I{^ :T̥VGΖVR,:;Bo1Ly6kOy .UT<5{9}.WV}cILVR蕈 ߵ;=O~Gw56zmzBV) v^.YE(-F꒲SPXsG׫)4Qh]R1i(b_Zqn&=.l['6=ߝ`xHb+eυBqÕ2gI?&3|,c~9M# gI9$[ޣJuM>)[xwG>6a4N/xppι"5T[(ᡘ "HS9 B[# Q4sDsC8,>cq%Х[⇏Vt9o~pe=,ޥ[~ AB[ZϦ1c)a%t^ | XY6I s2Ǵ:&v%hY*&E45V2 6R)FH V HJW0*&OU|XUYm`5z:X rtJ={"sc3eƒj4BH zw;uF˾aY#-,,&PRJ Bm3tTYֳ48}|98I)쏜PD4K㡊{l6>( XP{s<HY1H EM1FD׬C[4)>MSc@k-Q(;=HauHR4錒&&of%77]i1He"fQB˞ )x+e t$KKVא~SƵ'W$UxeD \!4z% l]OHlϕHrg.[(-HHï^QU!7XCv=ߝwr|_Y`]JBXLԾ/oBxKb~m,s a|Y\zgtq/# ADSSU6V 6jq}HLʹTe)L .k(Ccተ9O# 6_*<[ۘ?kޡHm Hp*CE{X)R[RB92NޱٲG9w9#V'_|c x! x/opȌ';fOU{%٨55A?gۭy9z\0..BwZaQW 80$T@95?kV-;٤$!7a/%I1'm I=[t9ЅQԑw`0F!ɢxyx#FwŬ2r|!l3͑mptryCz/t@FV>R,zB(X 䶚.L+J1i,5Q6̄#T!t-U;`U2D-6S-~sQIժ"(rooES2}, LJ 40[ZIY))DDZ]e٧[qVnZ~zɎ`Bt_Toސ~E/_+{hxvϑ}gk'֡ wWZb>X[ZC"!+:m!P5@=\.xlXo:@?Μ*tDFᬄjWq^[SP)1V _"ic3܆P}V r B-&&sɓ7΄1khM>r}|}"ҝsEe86=e( =DϳtxEt,RUTQ7RP(@pX MDSovfV)vyWd)ֺ;pD:4e2)D YB~|˲R3G&7,pVj2xIR^@o)0fRzpm8#S_2S#QGlAؘs+JQ:]k2RE60|HY [P{;FoQʎ rsj H.Nm{TN˦r؛ 4* +ȬS֟tsw6o n[vo+kU[km:W:#jO0& uMbc27Urs H㍻dU:1c*KCQ1J3H둢쏋yʞ'N Sې*fjH(?>sS=ߴ "t0X3U!mvv-UycG@/WJj:bV-]ne[ru)|UNC$RK ٫R}Bh" ]%TK6eaVEYNv.'b֠]f)LyNh~Ds|~4ro=`>ܽϛwҟU?BqS~_V~x__ix^VjE>p)G HJr"B34CˏyWn=~ňQH]KH *BJ #,UJB(Q#U[[X TS8[+p93"8@}3[Dxjy*#y[E놊hxQQBOR)QJa9MbZC#m0{Ū] ؠ[ܛIrI겡l8͡+K`C#ded!!]{?KG#otQnu!*́6gRl5Xjl(wp F3IK9X7wAS`aoyˢ4$o/ ++/}Vϗ'+ř׳; ݏ!4.;NL4oτ~ώDq5CBO@+dFs%ĭ8ٛ^~[%NǛOfH!q%ǭ(, Sl]2YyHhYNrxJ_{@*?2+~`y_qnf5ֿ+=_ҟ"MO<|W#q&^ܽzĖԳVвJ(Oh$ EvXǵDT8ݡmQSP7zJqZIv'[>&pJ $ř.d])CO]R2X)eʗkCL4U=<;|Sv&I*] S]ԜXxO)vBOCHIZ\Tz$qXRƀyH^Wk9n4*XŴTpiL ]#lQpg#@J b4(T. b1ygr2kJ&p] s;D@T 'sι\0s.;9<糓ФϝO u`,k-|}ƫVw(W:E1}յsDAI?%AfguB!>a Q|_cHէ,'t >pN-iCF?f9[jEvJ0M֧O='J.8zڻSd{{SaX)MBS\s\]vr`.'OVIpD%i4 m$"w|'%Sө;yj_&t}&EBOǜ4냒}sx2_ǵ%F*=bөe(T,X{cv~ X=N )դgܱ"AnW4ԫ γ.+yRD1,uCIn_d^*kjt^ :@)h98Ȫ7w8$7lu}> 6P4ЋfӂD% s-(>D|Cf:ʡDFq[us.ܱ֩Tjx$X JTEKGUۤP‡=ܧ7h`AQZ'KCme3R (Uj~ Ƃ,wfOQ[(X#"* 9!\}iۛE2 Jꐽn^G5 vZf[Mڂ@ۖtʢCrCbm‚Bٖ9m ^|A|JJ@P#sS+ %/QBeiR[* & 9o.,U{'yJ yH4#QygwCTC=v॥<S b۟YK6'1-!Kan+M ܳgn8Fy4OX jN?20$,aBIxú3^Nf$bNjr\[]rEFUHƭm_+ia|(6~H-O, ʾ:آSJUvͽ O@ 5F7?'ni@w[kqm$pJ $Vхl+eHמ[n;mG(7'G3HW`-v$ 8n #e( 2`R :yJ?k+gh\h%V+Plm^C0ڶjCX#VB&1)y;l ky*y*4@  rrzc= -EVdr;Ep>C{[T0N3Vk)ua~}, R|9=fJe>4t$[~}޶.?cŜ:s66Ż$PLFP sq>=> yw+(]Bgu(R.fsY}"J|ss'ޚOO$žt6;.nӣhq[)3T1ͬd եm=V/hޢz҂+b!24䣴gK%  61UMq3&gO'9?./hk2d@%Bh^ൌ`CMWbJ]rSrҩB`"E[s*mwG5Tv$w٦Qta]ա0GqNf5|x!gQtX?u}Ļ=ɣCe$fI"Yк׈I|́>$#pIrǞ{lb˅ :N-Rj|5/ToT)Q 9{zgy،&S&Po{7ۡ%$+" b`8L NJA+r\W!Ot*RH"*i*\G>.ѹl=j#s\5Ka"8t'ux Su77&'/u)Y2kqCgЪr/\65lV0C5ߡe7Z?l9#K# ?$W նzIY%$-Vu) %#2`Oj뀛~{Zb@c)b%5T &N2g~A u®eMH `] oC)-5ʵkd^)lZHNiMP73C*8IM"U[UY1w,`TwMk,*P3 T-RQ%ѫû4]r(Bqe") ` ۠6ʡvʓ;vڦO>^Y ;j K֨cNPODRa`Jwf9: u>[Zr1ƨmh:P ־!zJp4v`tA86e6CRDZwkOub;]vX@2y:%E0z!*_ٽQQy]'^^ 3Jx\q)<D5e8K,Iuَ'}nOv*  5*/%␝ΩZ6[5؈G$s'sbW~K$V4^JW$#5/ sqzzz<<RwJ5Fz+;[zsCz`*mXj FG+hⰔJńUWZ'C5:Ynm=Jq2E쥘{ԣjMgU(45<8@ރ;jF[W;{7Q=:ڕzp؆-D`:'ehMlt+r(Xw-i0~G0wH~oD55) :<׼(˾Rgj_ Hkp$Χ8xGV.LU&H&gl)D8cNpgsp7SNiU!|ܒnնQ]G`絀T:D3TR5.2ks8U7R}ZCe~x H_֛m'a)GrN(*~LSk,Ӵ{<~TI1+.&țXoS dݽYNya`ҷ=cbU\h@`$2)_>g 1tj?.T䉇s龀Sbn [aS``HĶue~=A Qjl{yִK`rJ cv* QIDC@)'j{'lsbY:~)=vT`lQtp߬S&R#dgs(1+VlQлͥ6fs?YGe]iu8??x2a#y}+|'/~JßhOXȍ[!r k>zۈF//ognN)DSDwT8 ZJ(m" GrMYܲI2فK-H_-+SqADAޔ֭^h\xK3m֦Yg.+};fS "]{aҷH%r' $~s*b5t`)b%ϕ^'l[[+h#`+1hUMoe\0l{|yr'"n{*YxY@kHѬkgZ ۰|G\'74ԃ*Y8$+uix}gxq!AbUuH4\*]'v+B߆49:%kHkvZ; Oo7c60TZCЁs̱:qze:˃em 5 UcO 1V;?Ƒ[.xkPFGEVYQE@]'DTea9MbSp; иۈ8uT*͖{l4J}4oo϶&R)@lQAљֲUZ^isk r,ds-E(FXGGPO/۸S3 {x&uO]Hq:O]DmWJa&pr}S_qB8E[}v/AT&2-Der~v Hp0'%k 7K>% p{Sx}ynA$UϓJd#^pr?+Wj6ԌAz}^e$wʩIq\O'kWH9?d Ce2x*_ 3(Ȥ;Ḫ/f0 ri8YV(@0 !phrHDq"{<ҥ\ FŪfҶk)GʕsWnH9eDŽb{\ "q\Ҁ04(gQ0*3߹;}`&D ] v@ vlC|;U{K4GΦ+Q<+>\,'Iv9:4y7]qm-D`:"&  6:~٤C=+ߣe-yW= 6z1A v[u:LD~qac&UJpuuLc$w򦖮HJ)'^-YIԀC{LZXH%G-Rލ=%IֶNcuh|ch!N~ĝ"U{*OkņYv— Θ*h- @V)\7ͼ뱄!'lsh4<F Ez jK>?T Е88HjPΫ6@G6/^dN>cŵ~ ?;?h?6NˀF=CeBȩu9ה¶بKlvB\qKS#K7;e~1"ŌyzC)^ `01DOqD8L:zZJ?b=<?R<7Ԯ[NF<+pFin%#攝gbXx쇽u_!ɸ);/z'.1H\@/T/1#sJ,Ի/Ӷ_/ګߖ|wCh &U4J-. sұVta +#{n\ VLEtq ֟ԛ_:־*: @1?z+Ҳ K;GѺשc gvwy g B8|Wr+ N!wVwWmįQ5h 'sC $сJR$&Jr[PmP 37u[68V :Pлͥf%bO֖bηJc@]2y:%EMJg\6n#Fگk@qȿ^-)"ۇ}bk ӕtG$P4vHL蔬!-SET2De/BT{ܲÅ]ʞ^s+˅[a뱿3u8g +w0cG]\1NLEwSxqep]Aq|3FV  \p~ ߼y+p!)aI三rONpcTb&*c ;͋{1D,w8cU$W;V/!\}ZKċW WKyg,8*w%Vr?os#?%VZMK.=eWϣ1G>Fd֭5vηHNZ,u=9HH䐪g;o"]Ӻn/M n)&H%走J5#%i6~G05.tK"j0&D ck*DIdJ/Ij^**^8*po+xm>RגU5 ||כ_I!'W\/~Um\t%͍V"k2u1$ @~ =ۮHǤ#`;Φ.R_>ݝ7hm1%I*TC.QNŐ=m+Swm=q%Ixڑ6y̌iwOcf1ՐhS(}tY]:Ʊ?b.n}Vp >DJC4~447~Y{B#adʵ/IܗhD*|[=7F9TWڑbH(C9o4gs\t`gO{h=ʿLlm)!!<&xi(w lRw$\ B0" ?xvu a%ޕ~kΙ3[R09гCq3; Ni]5_ƽŸI ?\|C>ǒ<ԄKrO [CZ5 4=u4]8l(ѠQW]6ift({UVԁz:VA)铯#!F͸]rrmv8Գqlq4AGͽԞ\w#TzYnGSr)ǒ3>I >Nr]<תޜԴ_@j!@.;ph6<}w^[{qB拯Y1bR(8uPNvBqq hV]+5xx+#V^5\?+f*4I>0x@WL_js"\>šcԬ[s-i_kk&y0J !eL3gGYr#_Is#(˶qYGVɆJ$ JdE2=R>Tt@$?^eu Gnn!kkU'ybdcc%N^i  㫢i_"\9b#eo|'4gCWѳ:^o-SY(S0Iqx38]u"GX%ݭ( R Ż ?1cHCK:ZO{e\Bx zMԐV E:ArH(P>]%%8)`9)v)[>Ci: W7oRMk~&4WKxqrv& HK>vR.]3RPl h|#&b,#VάmH;;MT#֐ á.XѩTC:b_VwV4C#) >6m \Ԝ"L3B)xoShhkՆZ pk1GKA{FHCX]Tm,>Z_YV-H@%&VNn* %g*2*1{A>\=7?7k9<峓uǐ{ePܽ4+ Z@;Ebu>-֯߈%yV='IyvnF ݣ3h|=:t"f%zI:nb^&ݹz%u  Y-ٮ!TiMORuĮ80^ r4j|D'iߑP&.cZU8^c^aw_x)F"zmo";P@nKba/zq֊WAIV:{56_5@kکCտgߛO'EҺ!8A{9H~ǽ8jK`ChA¾!Yv91[6R5LJ͐钜@{J؞Wr3UQlC!!PON iKGB~iԑԽ^^3EYQzD`a`?@oMηԢ$r`b;n?yIMwg~v(Q5RbnH Dm (\͚+1}-d"ycFƪr(ض| lDg A`R=%OI.䪺DJ`lT#%a?]uRhHe"GGdQz΂ִ+]7Q׊9 0R8>I)AI8Q0q%KHy4INOS؝RAR.2Vj|y}[6r?5jo%?$}~Z_ff d9PLnQY1g`;ƈl] Nr(08kn9Cz|i1fj! LBrmEو9kagIf/ܒ[d&pna*"|7CUs hOΥG.D@IoI@ TbHt2Cj}A(Jz* Y^}ȊQM+NSQ2<∰r4{;䖌S{nmy\YˮHj8bV!%ܐ~5g-ch!27>(K4.CZ&?[A_ݩY޳H5)%l:@)\ iwrX [dDWBD4+_ƺ߽IV6 jԫS/Ow&چNf6wyZYmK[ rs I rTGcdH☚gb)mFQf6fLj* D"* y!Ge7 siIY]e|- 0FeeV{)pHV˹mTWZjXupImױ7y:b hZHsQjP gfB^'t+Em zQϥRqBhRB"_t8\U[8z;?˷g| $;4_N܆o77*?W.MyN$,H@RWo~n?nD\'7;ڏA6kc%w>|zӿ~?S-7VГ__O~>F?iݟGk NN޻¡hQtٲm``q *m6jY&=2#DxwX'aM.r \Awفԙjg v6k.͇r; IKMˈ N?K 6$mO_>m`M69F'Z]x7_|f(Z%\]ZoR}ՂRmcC &`JFMIZ  xCʆ3!567զ6("@\00N8y r5&x-, Qч/e(@M؅uxD:+^QbC;W>8 n~,Ρp'AѺb ƤWV"q^mTdjS) 0ʝ>]4WTg#餛աdY:9w-]qMx9礝|}f9Y" ,|'h$npEEd2ۻO&S%[~I<}qҟ$c g2>gVYy6,]L,y:d^.$A*w+T8-5 vҦvjhs m7iTV:*'lKA؄Q"*y\\X" )"E{:tL` hU#ώυ#aAqrak]_,zBҦ_tއ԰cPKҭw#]8E%)g[1 ѩʐ HI TE(ǂ:;1bFǬz3=5B3xAͲ[>Z{HQ0! V (ƀZT X6lb1gG3wGj4k6>)j5I[({6߳t9WwDttqOE5:R&2\b(QIᝢ,7:%bHS,<ځYJ][Ip)ƊLNSSTD"5L~2<5PԼߕ uQ"pBtkd&xoKn@Z3a5"6~1d$ŐKm~(9n\ٲbmCt{5Wי W1Vw)p/,qͼ!ZUjH2)c(S Oj$䗻sL6MU易s΍5.׾~#~{APsL&o<)d`N]6f I6 ec;U|xZ\T{wqgӝX'ҍ5q4yLɁ2BLIթa`bSY^=y6Ĉ0Ym(N7Ϝ 0O eJǒ3Z60J(]34+B[1٦)I XkRkdxem.n? zJG|z*zw>[{7.;K1{;lm=6GGH?'AP΁{Ʋ&3m ĺLr{$w|&&<&C2+7oMmq7;uZp8\Ne^oADlWA8>6l>y91"NRu >><.0o>Kň^Ier;ycfRGO"uECg^}#n㵎6'^ܤRx@W,m~i.f^w|!0އ օ&x>d3Hs HH_M-J1.F>v4ιzԼ= rp6nǶIKKSR/Ciد_ ?Jj>CQ"u*mk>}2i!YwĶh,dT(Q5 -S6E4opΪ*%KzW&*ar0kH)uʡU:fX朹($B0AѺԚb4bi׹17JJcE?qFM蠨%RF.-pѴ&3eJVl ,mk -. 4 J պ)*'i ){YqU`b{޵7PgjMκ@ef$b4$KVm8'<IjvT*Hsj`Ne Yj~Wt eUAPܖ(s$\䄹(ȕr b^uzW)jGk(ㄟ@5*'kt,%+k=8)^[*X2@I$Ux%1ipI`81%ȥZButDOVֆbiHzܔ6qR<L6Pe[3)J'Jc-%[xDe\QyBTQy~'vwg(:wT]2E%#ō#HTHJ>)K#ewSY7Ffӵ۹JICu>J`=\>n´.5r\;}ua_\8_(ߥxcgoKߍxy}1-Fvw}!Rgd%lvl3\3NrCvhvVZlM^`%O\B P-ԉN?9vSW8]KBO9)9H9ȫNGZ1=̻`NAq/J(|A[AR^DٞbQ~ Z` 9$N#p 0ӡ3V(Ѽ:e6.s4F1X5.83WYp$}&j=)>uj'gi,MY:]> 6A9Nz˜t6lYq#q8 }ѯwGƏ~퟿i8݌Fw-&0Af*G^DPZXX{?D,nn"0'<֪̒y1VM/=sovb&3b_7O.oSĘU1 %GF q*@afS@?FGa%qw__/;e*7_.~%v?+zE .m!y/7&Q\V3C{gʳStS{ Xa]I8Q$;CĴs|M9vu)6 `2,|0Mzsh+حz($xŲQޓmlW(^Ӟ}yYz 6A^<4A-Z8A=CJ2eKlEHDm2sΆӅ?DHnmW{(+rK֌m)& 1{|S+ƾ 7A/6`ڏmn xSvlcJ 5ީq%=>.]ah{F0/оJU, 5ؘR;ֺ; HSr ECyV&l\1϶tê< /jI3ơ%TRyoQUhہwHv*`z@ Lxp``|7Mx)U>|l<2e?6ߙ)41p; =6SM{p=P%mFr; 6(`_9iO]R3O%2v"!48} 怙QnN>궉dHmF&27;VzWt)=begWyS] reefN[~p%|k)T#(HBv~FC؂@k[2%-\Y78WE2x ";|& Ɩu$ 3pU ^ A|ZWt ¸h`1hZ5ߍm#v 7܃#ރ઩u\()縁] ԜvqcA¿zq @FZOA|}TZPO5V {[0F6{LsػORH&Pcx24,s(Sk/(Aw)>3=+gB=UW*qߕi{yEa.נ w}Ċ!ҰBqҸ7fBJx41UwV4iώLU񕳣ʫ NDiߦrpZ.)4-+ N;vA6&Fl2\1scN'j_lfLC?Z]һȬgs7}J:>SvU˓5f$MNkw şO<`^_WxUp\W?L>ɵ0|27]lJcЂIorZe (wɥ͆Ĝ*71;0D o:[/ЕS ;!ϻʢJ+g+mV%ڤж:%T5!"ј"~RImbVelJkf+΢ үIh\QozXkIoڠxeŗOn|nK򔍬:9M L~JI9u6Jǖg<WDiѠ+˫MJ ڽBӒp}XV)*dlf-[?H`ѠZT0Yy]J\!P/|']{3FMcԵ򌇹x+ݛ5<r$D)9 *ʼnlgU>]̮YUvՒۙbNRQug4`C\X{J0VĘ6 /UCꭍ#or)朰J ̶ܲfs4}bv{bJs#OoG]ҊaՋH'qg:W(MZYa1"UT.ST?cpB3~w XS*A*Zvq0+sJԥDK*m-cQSr0=~wr>[/I;Ə8M)wxԋi_ʫsR^<ƐJ)2MHS[mm5BR6Ŷ&oś¼4ؑYj=2 M[݇! ޭoMeY;OsKXvf7Z.pmeA͍6n0"gU)xl4gc?MZ<ф"qof %FDŹW5p`;4#:2hO n׹[u%\s>L-hZHH:,JTV ~JzXTx!棇U^h3]L^:.b0?u >>2 x-^ɓp'eqLqy4$ !LjLTNR 1h sqÆLLL"ͦ4AǜFKlYfbB*V) 2E!F"JnȫNe@? +ʮI' {ӏiAs W70?QG#B"$fU3e< L3.)s`0H10o챙IlH@}īj\J)IOze11% oF" 33ADLI(|PJR`Ŋ0C1YL JX$$%"'Hƭ]*njq% oLYbq$H&*Ze"*SQ4Q8K(CiƒXQR* dWiGƦiAŷc""{OD~V\|?h(*aהpIu3 3 L%)8B c!A:H4D+x%d2!r40Gහb^p45)XPGBh=@A2W(5MDa Eo$K!ZqiCC/%^3G}_~0JE4M'iow.I9nj'V_xJ>Y~J_wyANgoF b* 7`[p qW 8t>f쾨~7R GEhq St0\@7d\dѠ1⫍t29;cb41 I;gOїa'I1èI:`vc'vzËK:s٪>$_G7Ӵn30@Lb,h>O:&m2 N׎:i혝i8;) -G>m2IuR]7Z*, k`aTY*HAc[@?W@d˨fEȨ!f%뗕ʭWOv`|Zx~ ?h Io=6_\x*-rWy 57ͮ>{y70]^/?l<|{uηAabؿf qc ǯQ~id4s {O&k`ϺV0zoPs鮞y"uzz=6rG`Qjh㣻ݒ* *eIXH=6:}#s͙-jC{+)aB[QjC6D"mԆH?_-g}KT%w 3 6DS!ڎeTcMVD}]5% %ɊV0Bi)rv^E-k^JR^[#ېX3M][o+9r+Ƽ,-/iA6ct۞#K$g&>EɒZRv7eYn`03>"ubFoHj wgoam{ߟ j#pߴp{>}7=LVkX/Yo삜P ;ELL $&ٙbp}¸Y:;latǹi;ޛ]xlLva)Wgo1V_ƌ4(X+m:AQZuމOXm޵:ncA=މGnL\3{ *[ wQ. 򴖊 yJi{N.vT\`$ndJڻ!Nw9;߹(gY}e޽sdoPȓEYܪ`4liR6PZ@֊=)(K4L)ej]xƄ#QUR;T%!L c ,,-@iL}|ͫ|p6͊w?j' Lo:8{ Ib(+|D0tMn]ßgXc wD&No{%Zi+xp`| @û8cxgpc n]]<`dd|wqI<6TMR\RɔC =ت[0tZ8͝NB["L59ip\rw\%f /A Jb:d!Cp !Ci)To/hZ^[IϢv> & ap 7T"A Uƅhwuפ6l4@e ƒNJ&7جv-cU;*&?GrKȮ!Wr%++xbg3_F{_ŭ TKCBE㡭QX⿟G3}Vm:dZ8|0ez5#lzLJNm ^qѳLf dkL#= ;* ™-g a@|E, c*n>yKIkk)ЏTLSHbzHfjP3\g,)F 2DRzBeJvpOǻ*MQ qz] 1ܱu3j0HVD!nA6$تGdt޸@2n-0`n{./E"S^t)R1 Hy(J:VǒsͽWO,jxV:DyR?G<[%]aإ`D\ȥPĖgj`:L 8⛣)qNtwz=R(hϦvBˑ">ǬEؒ2H[`8([iRljq-NJWKd'ԦSRr; ('熀p 7hl^֖Kㅌ%zu)TIitV uap=/qa`3U${ y[,\:^j9zzRHQF Q NP/$SQ$TBy_m,ؐR.yH0t\Ts^AR4I"T`Íqydb /Fdŷ,FН&C=yw {ߏ/[39;0&LwE0Y`å,w[U ˊd8[\JLP.V_ n}ډqMjjm*Di0ep@EAWځ%4?â" R(Jj8f'D69]Lej Jyۘ5_2bx")Fm-C\FRh"ßԺ"Ee)f.W&`X:A(9_BCWes.ӵ-3;G2`erAiEփneYmڕ`UC)tY}ϗ9tI\tPL@/P^j0tIBJFm+Tb,YJz毯T QNh˗=,YǤ Cʆ_kp%6ĝ#9/P ;&cxBeLVROk:.-ºeŵLIiquY"gI{x7El1( d|1{ {q$Z3c s["نuұKb_!3eZ$%6g\*sO}k#aV83@縎 G5 kDT GFPl~;qU:' 1r"Ϥe*y7Z&ֹȰ %H8jʠk`X"#]@XiBGn2v15S솒Zq}K 5и"VGat!0ػPd ea? 6hAQ#a}']=nħ仡si-R7A[HvAiD N`uӢwIEbf:`0ٌ7e]d─v 6C1 R2nLEPtH}b]̑YtKeˑObȕjODE[cep |4>Wsy߿U©(ul#V<fJ .؂&mܸƏ8hؙRzRo8J *Dlw-5Qi!h(;ZSU (Y0h-5RJЪ,|ڼ ß75Rd(@SMtQ%bT_lT s!ikJM2uRK7EBj<(NExRU߀(hTb UNm ^"+RXF! ffݳݣ%hN4ɪC5J-0DF'vBsr9gR oWϙ='HJwźUٱ[&R)M!aY"YVgQ}@CEm'ph乚B]P IHmN)/ mevhג**4;RRp̲,hWH,Zv =Uq[;0~ʸ&:v`;uL\8U`]o~E8"0x9>! $H mrVN9:4[]۰jvZyP9K>6Nc:m?g~wZ,S0ȝܬ`تv6&ixz;&J)}(0ec*gms ggb.& M2gtZ,I6:HaUׁW`gw춑o5,׷wegSFIRiߏM04{/WX)egVFcSU>[T(2q",#jŵiʜ1Jdw|?;uh1R4 ;'b }ǁNy;)>=1Rs{)WLzW.1,!H8maLi}'ghAC`~JãA+hC>"I =z,?5,ԟV L1'"|3V`%v-0db73}+Ca('g9!e ,v'W_S 0g\}wMj-Ua+4M6x=Ej&NF$h@̨9Ǖ2?d_NP*u/_7s; nsqNn\?FиYLW]ZoͤxNK`SտR34>O 'g9̓;:tbx`Dm~ o}ҷX'[[K05覤6иdTFte9ibR:%LFǒS#C i5ą( 8m+q`y4?)76tF)4Is&u 4@1)SVnӡf \ {\LZ0юƮSA 4P3?z^ӄjѬC5S澈MݧfkqE6Fo"{4Z#ą՘" ęR1XB5뮃@Lo^T$7.eϛB,ު H 38W)tS3xY@㚳_b8q~QqoQ$T u0UCE :oa;'e7иsMzu<SLDC$9%@ ɐ6T4g̢6pl\I坣 ߏwLNN0GHvBOM y?S  1Ĉ{̑BåWKBQz-~>}+J`>.iwC YRd`!єLPk {-n7*`7n |ßA]9%~Ӓ h|{s Ϗ-L ,l.ߢYX8d461˶T,^ &3-'v QnX\J沃zx|_:6mVTV6I]5>O} ]جk'~(`QFD ۣ8C|XA4l30RB94VPh*٪GuC#a_âgq>!@"ʲ9`lbASRڐ3,jϗV1hB&bYtl7s6e|[9b*D,1'b-F҃$z1UoD c5]qsS`\j>+U@9ʤyoOD$%3ڽm[*a~;udsIX.Tۈ0]NL{0l*{IE p~C`p)N;ba?0[Ϸɂ R1tbiV=ߐ16U"`3,뫆vHNP#nߺ>^y2hZkKה)2LL&ѕ!-ߥu1qJ=>*WSHd˗"ɗp u$0CrAtژIc̑Z7Vae/PTR9+E{lK@ϡS4Dy/^wud!WHxnu"M ߏlV-oi[Z%re\+NI@갹^ 9q+G`wjnIRbϬ9UFGRZJJhք;3m8Aq fٻq$W(>:b^ag#x R.K\!@l}L $RA pV1}e/W3>sSQ'ng>ԶK*vcԦȉ? fZcr(:\3Be@ df2TjZSJ:s7#\FlV&;bQvrB@FLWe "{{[ѩ"v;*H 0HTR Jĩ TLw69^ ʸvt1)V.ҍ"$h֕DT/d0k Ĵ`RGck{ 1@دgMRffMtLZmܰT3Zw ! 0e`ݮS!,@k{]PYc3L[/%m:M։hi7#}|0d*xZ$//0i'~,;^N>a@QGxlUZc V2K;kh^Q;8NF>Owi/2 h=gO~txr@d$ wExįm=L 2$Ĵyw?kswzE>Zr+}>r񎮠zXB+5MK bwma(}K|8z"zGku Mb/6[-( j 2DAbsmÒQ š`%ɵ6 7pןU*_͹yR KI?Kaߴ+jfjj诋uo޿S9^nv`4`li0+۷+VCכ8^Q:wSEmN {v7H^5FJ<l2uBܚo! 5+~ߘMkݹcv) JPn/eBc) fzH NΈdJ7_7d)~M@m1>3f[4Uo|fl Y?W |c_d+٬B}aFE!S̓fJO!( {pϭq6S)i<]UDŽ^O'їqolI`)cYw'Ն]s1'6Z9qglV5|uO|Nz@U @1`*FQ*Nԝ$RrB_P= !1S 'okM-'dv50](x\@Uav1H*h)X ~1:jU)i o -*KCԏ_N6~!Ӫou\mpK3[Vβm-d 첀7 $hᘲx ozcEuu3 PȆA@:A+" q0(͎QeH2vˏ&"|cȸ{029sm*JdZJyaK~Aз2 %tO`/2<+LȎAMvBt+L&> Z<'$pb.Sg2F3 ωJ0eKq:;hA|^V]fn)뮇Bjn Xp.|BX^k0bh wZF}=Z%Ed('Վ֧$=-Q㯻k>(A#A*aX!V7ez^ *R2/-F̅:}}RDG#b[g}zOpl7DjvϼFBjSro5nUIp 5N%{/B 7o~Ui{7<^8\8!$%1oȘw҃`~ǛUl'% ]b=e:6#;*YQfVkuB nBP̩޻FBkWf6Ai-%4DRtN0\_}''@W_t:rEIEgȤ]]t!}ϏY#3' k >vlJM xm(`CiߖlOzN4g>9B)hUz}qx^B%;CY2">zؑE1y6Y1+ú o>R!Q 8njvWfA6,g6zaMyLmb;b3hUC/i[DBzj,]`'X?"iAs- 4>MB>a]qr~U1WOhU'JlΑx2kFJd(A#:A(ˤ]e Qڻ(2nd'%xIu9UՊ^tǒ.XB)9{QJHr"|A.1A!eyfwXE08|)*yjr1}zu1,2$jO`UZaz5i>5fw]"T;No rH22Ϊ~<+s\[A`P WYi!Z`8a\~Z?RLa(6gc@YfiʕjA[ݜ28~5AZD4nxdh /.˺O{ x 2O%BqB{[jӨ 57Uk @+gҔҼ8z$œ)ԅx8טtNLPfE/ncÚe$-dg~~# C% @F3@ Z[nWƃ ; p<Xوl1·A?h  -AD%Fl[3#"MlR&x;7*sva^|wmH-jk{B2HI"++Z_Ƞ8Ц4pod|7Hv{+xsZvRm (W*{AUj9 >փrلGA'Rhj<o5!}^*:#.Ui0FF)Ks7zѪ^sw(N}>)'Jr ˂Y+tQ,"Aʁ&)ae<'B't>2ʊN4cJne*H€ًJLɔ8tٝ+bt0BB~ J޿Aqlݤ̺ix=þ">ԉ R Kސ~o;ː_\."GDvWfp6™*%`8P(rQda~ˋ9w !i'ob1[tC؉Oǝ#!: fz5- 'dޫ>FNK6ҫA}plC{m5}˕u'^(R8Q" NNYa ʹ 2NqXuwZJM r1q,:4BTYTX)V-Tӌ ;1a@]O qyi4E5 2a_{5M=>MXjdϓUwN2"Efӹ:MdXfVڣ /K3#5z]DqWܔ/Z\sFgWBS yV 9\c2w2Md<$f+wr.醥n_eJ!^- i` 2a8X+-U6Ջ5Ka17-*0F(%n\(+ui@Y +t.DK d81ϵ5^6s݁7>ځ) jnP9P:"Rٯ 7E-G`C:??`H#OC 0:կi { o1-cć5Ik"$N׃NNTu'kUۏ'玙禬ynu9 iDy5S1]@q;L$"$=-uB@4 Czv:i;ߛ+;VyoB"q&ç)Q&dڏ3<Ea?gVD)N[g-m4%xesf HC)T`/ IGq7=f,MTMMz`M)yKH AIS7C {%A(VfD-BRDɀE l"n(B\nXiDϜduTo r2+l $7$M5/ξF8yē4A$7\КE Jp$u6ᪧtP`ؚqgģn|EngeٺU0>ŐpZ3ֽb֌A/#=/\Zc"η/g,kʁ(Kgf]K)P|KXP׈^xI#!Ud7\<ڼҌ3심s37!/B$]6! IMaÐB6DrEXEHt&K_QEr `΄.}7lk-Vr`Q!Ki 1*B|_~PBe48 CB 488 N1@9!A3΢UբHLHKAP.ȉƀ笄g4#rZ1hf#'D3)MNx7hn beJo37LuYdpBVRJ˄ܘBpUQYHJ= [iJ׭;bd7-R_:"pgƞ9qڛ $qfC&T=7xaH(Яo@6#- QB(|t:40y`t)p5z0_-N8|,8W^ixk!]EOd$΍x é"PDF(djPf,̈;o -&ϥT \[{f=x"@jUJ~2aR4aGD B#x!mCm /UKxAuM {r( /^W ؃Gڻ^{cUz)<@/ aw6| ??AysZyH;p_vá:ui~ KYܞ5OD# g7<\it|rkI[^uyjV?ll`.O>'f-D&݋id (C~A8>nbZ&|)UEFF&F O\"Ȓ|9~$}zVxSSj۴o5:uZ_] 5+`ڔcfll:<{f>vl,8ʓOM6*UQeHY{MDjdBfe"4rP*]αkȋct!lD7d5)&lHkr{/J:c\mRw2k,kZv$Gh@c18vBI@cR?#'m?g/Wߟōϼm3Ywnnҁ,??;}pO~7(-#kA'lON$KZ8s6~_ou% \&|ś\o6Z#+U}OWʍQjUwtÅ߷pk9!{uc*~՞\+r1wK<L:ޝ?WO 4E5| ;HRsD.eDNlgX].]ܐP?zOδ!ukr QqSXKdRb(X >$+AYɩiuceƇdx'dg*] NumO[.ص v]Ez= ߑ[#`EZa*O QT4,&%B㛇:הZ)HpɹN]id`it!4:rvW;wR#69zI~aV0^ [ZAsS Ex$4;ؓ3A+P&Qjvg{0@X2kdsYrK; WZ.>=;,b @$>",Qh+ӭ9{C=*{'3m){ ]5-r$ H4Vt^EX{ kQCL"nw)̝d!Tt3.SG$&H;$ eOkYk@AE:{2Z#iB*d' q:I,"]Km^Be'j{4dÞYݒrUO6Y)P&CA}#I ztmdҭ90G WB@k%L/X\X$GB^;2Z;mu'n;q[݉Ns(*jN|Z՞X&]xXx*EjYAZct!=^6tu~`Yx:hd  \"G+SCs$P0 |Pw?ћPbU N|3|\)) &RN-ǵ꣊d(5"S$(6EDQ j =8k X)cUWM{",d):S`Q+,Cb.Ÿ-{VC_Qήgn'SHȒdv"tJ̋ 8nK*Tb@XR JM 2-wJM*)&$ THDo떟ɥ8qٸ'\  ^iR xU#iQU 49&8TA+bԭ쭶2lڀ ([˦*g}WAHUQ  E ?gf xSS8W@U}W.L/=yUoMJ -Z%G@Lt:b8:a٢^o{ s-Z!tjiS+P"\I49`FmN.胵QFӺnQ -0ya޺5pQ.F \$ ZЕFPk28IR kWtҴIp-[Wp0! k!;ΊZs"N80W䟵xE6k (K{za<\MKh3tJ'@z4T, ХN(* HE#Rywƙ~jke)0Q*eLR 9c;Y#>5B a#\Y\62$]mj8+4{j!e<>cKeiSm=;x' ܬ'?ڀyU. @?ۊW-yE˝VT1eeU%4 Pd #kӷ4?8ןUB{9Œnrn %[V``ȑ4ٍe +AqM-['ybˢ #`SU>xJ!SR_rn"8-5i!b`"ňlI9yjRޢVGr}OUHD.[a."/\r Y6}CXP3**͗d $"m+T!Y̨,2L.vI< /ONów6:s8 qNEB 2a8]n*Z<˯'R DJldJ`EH;keZ$6XU+*#lIiU ]{t84C̼ؐ1]W'x3]</uxc{`1V HR&End} OTfܵ<1_%i\wjMmkN\+ĵby؝Uo3:UX:\+j t>]vj!jNay@cZ;>?hsӼ1;iW52HJPjkfdR$uXi9h~ ~k;M7:amoݏju#? m߉KN\wⒿӼNʭ.LMiCo]uNtYKA/6K@Qfy@c)/׎z28+}znӦFEW+[~jfoɃzGdm'8;}+kX@V;‹vv:>{N +| E=Oǿ5<˶:9ocߴu"| _qW__]O!/iKtG]]+nyqUhN=]TOoj| ۏt 6x"+HӉn d}|:D[|kou?7ip0r[,7Tzz=|~}~яh]>./~Oxvy7g.>}x4КcZ{8_1az?C6   Xd@ԃjJvMɎMfwש[UTݺۃd+x.1| >RCt 5}'@:]>a| -wv0O7{ ^vg{W;uq|Y^k1օ%G>y)ۤJ|"xѴC5}t=ȃ[/V{?>9;TK5zr1u}8to\5R_=]}рH̛T\ͧ'7dwoノ1W?)[l~7瓃W軟Jݓ럎.q^?|~&p:=9}3=><<:|w~5n?wUBRΦB>u?cM;i6{~F=ys9C{vS=zrlӤc~m%jmx:o,-뺒o$]^;e%|.IaK694H ֓Lm{_QK(֘ꫡU;yԓ| hvTJ\1>ӤvɠtsiOw);;Ҟ~YΐZF?~xgHX7 7ey$NN4ͺ o2&,=ϷYYcژͮ M#[l"I1Xj)mл57`/׹9msyE/AFeAԝx54׹ #oAFނ٢ G׍ryDThG˦*[hI6LP|蘚Jh86rZvA0:&w|b0| 0jgK\Zh'JR\`#,:mЯzە(de >Z5dI@}A?MkRE~yRwoӴ>ݹx\]1ueCJrFbd$6NOꃣupߕ]~Ow|O&5.(&tFٙ+XvX:*oٹ a/tXH辥Vx+XU4^`o z8+AT1Y'0"JCSkX]R5USE\qR(FƘMLϓ #bCG8[t"G._q1Dns{yf[{OEJkd KV }JKUQE SkHU9A{>Xx%y8 +H0e4RI&"hY3m=pu]k6)"TQh(m**#dL Fަ8[0~v`lcpu*g2Vl V\1Ug95ThbW]U ျQ$(VY-;8;r]qzW&.%3?FtKk=rL,Zt3~`V % gĠN=E( g1lu^.~Յɑsɋ6ҬJ <* Se:"h9k%/ &aS *W % "A91G$YtNWL. J+@,Ap-5) 2WK5F&ڵ"xmx:(W@(&V~>}\ 8)B̍Ez&+Q9 Fp/'F joEt/3/ H(]tL2"i*zd+Qn:y\-bvo)J|\  1^lVUUJuԕƿH[{]Ǜж[/=H˗KS>7FiB<ȝzC~n=?M[ɂ~4L TjWiHV9$:nV+ݟ+|H(L(i="Ӻ-pK!MVTiQRu ת6|%ׂrBWxNKFQԂ+"cRO 47U2+lhɢL]I:a~ժbϜ&]J%VE1dI sJe'»6%@ͮ%Zya9+ ^|qA?܉alB1]aRҞ_JI$LI $[!)X<f䲌J%bʩ(tJs5rF`0mrabvzKO7ӺPV{J[q63GAUE sB])kG$p]UbMQ'S%-5עY,.I9g7;q3YwB`QP뢢0ʿmgɒSkʰ՜,ȍ=x4-yv^h1P%S= S6u‰q(Z.v \pn5;>˹@*1ٔC nbPh6E:٨Hhyͅ(iB/ <7n<QGn voYEsuV`GA*':PJ0$9_\^xG CS;!] /AYX_zRbʇ.ʙB|RNcu!me/Mo*W+~`]NbǢHv%s0ׁ>>bT΀Zn^j#)lrfٿ|Q!˘6 2{&%$S:pM2,)nuj)^ѣxĜ 8uZCcl& TEpo-P1!-6rt69T_Q('(kE(.:eUVUMfn<ΚZfy8B*LVhJHRq#?(!k"j۸Ӎ2!TEqT1PX_4r)5i悂Q˖U6K' 1LޣWY5Rhέ0Uik]n7LFrW~M/g5Mj%8FB㳮d<:⋱428RΌA5QA-_6V9|&QNaz6^9]!ޒhZzšis5~.7|z &[u-JhMIV @QC|bX!N}u/A /3113V2/)G,!WkD(ـGKmaENV5/QdQpdL7#l R)cy1)Ǖ;h;LPUuŀf96$n<z(KlfyV^ۍVJT&F@#ľ9W ƺ&hV%qMʹ\cDqAUy J_ބA1C6p1d`gxb)Oʗ̼T}`{,j##:ak[I'**VH6Ѿ#4nsu1X0kc& *kȟ\ˠm; MMhh51U%erJlаQ-^* Ji@%Wn`r0ݯsxi MT jYÄ`QJ* -R9崈9*NBTfuS}{p=HHؒ3Rm|Jayl&ȑqrnh*3x۠-A'CEhh4ckm;#}V 7/jԀ'VExx ̸ӵkozvXeh\tthkGd#VnzpPaԃ agb71Vhr(I Nf5f# Y{!29q E)BPRҜ'HbR<jm7WƢuoZdT-%g^5q]x nRAS%(c^'# L/f, fZH%{C1'+6CPfό c.QVW{]6ŧ[W׏m#\l#\|.lbbBbHFXsr16'|bvyaZo%G>^-{+>B-?Dzoi+Z"fi+zI*JlmYjU14d."Fn8Vi puM+o-4vUSz)I.W9k*~w@bL={JjTEyQf*_Lvg*fMLv⋛ߖ`SNT;-ۉOiv!{`][a. /_t,Q`:%c,93OVvon5$U,: )Zg3z÷02율 RMLPWp1w.).E[q*xٛ|!/Kd#wi2_էa]MnQjk,MXlhZWrN{T$ٍ\)i`^{cOڇp6"/7͝أO9_> ϧ{ PDFה삳shnrDnBD K @I]AA XHL$FSyưP݁PdC .!=̽j:Z!O |2P[BQD,kWr͌\ĦX  F `KcPهlEP3FiIƹ v~3H(fYl;rL XW>訂Uk[1XS*S{T zZJL)uVa1vf@Spv#,Lrvu]j8NgY|[-So(9j>gN;Hqg"~<^Ro]mFdaءwD1<9>)h)87Fs` ;}r-ExkBDR{6hBۿbyd< DR0= $:'@ڹjFޟ =c>c^g_J@"B֞~ش8;nVޚBE{D?QMO9F%e" E9K:b)@5`@^i6hOUM~ʑ ~+ÖγMU},jbBG6ʌvq  -THV7* S5jJiV)=Sڗ^ݟ9y hCk?v*|l{1;5gbv/ȷu7oo쾡n++L!/['W_NMwtzFx7g)ua*LRQUyF=]*rs)R}H41*꣊~ZfdR?̆ Nk>.gUtjgU0rBॳn(7{_nHjs*F_voj;bs^'u8K|3]>(Lٿoi4jK]G^f<#kGaM^6 ;zm!k؅RSYLQǑF`),ș ކ҆+687G1^IO7`'ЏME9`{'!;RS'U  }/bgm՛kU dQ9#Cl1zyW˶ob;Ar-}3GN.5%#IgSDAҋ0ת5'Tn\-ٖHl{(rAYlƒ[nD8J^ MDl+Ww:}~PW."hdXE2* WbA^43 F$lf+,,y"iZεL hDŕi98;{9"9=Ue_yg '=2W7(So=|yyh7@U{FJOVU3ˢnZ(+ Ws/MX_XTX05 =)ւY>KlXeʋq-5 wfH29 nlD~hgKX̅k̲*E6|!}Dv+PmPNXChŐ_\;yQق]\t7˼ciդz\)& ]BnibZý*ii wm 70h 7ƤZzkn ߰FvP!oýqz۵*ŏi>,=~sQxqC/ӫq}Wؒ9tn/_^ӻ ڂE]]MW/b }=8Ut?~K? yרϑǫϼT]lc?s~h3p56Tک͉"͞WstW/v3LZDZE=dF.-zi Xcfrrvsˏ3oTY& cJlTz{'͵b4&Ã.X%t1m)Ȼ hzpiΑY MU1qf0zOPWYVs`zsgJY\yT>WBP"58j](.սBQyPɛYDtr9ldp@|08ۢ$;ڰ]Qṃ..\9O[獙c਍;t?=֦mv$$=g)$JɽF5C.@W-05(jO+BLG!&B yԴ^@7~.N?sjz2TO l}Qx(XNUصBʿLD!`*s[ե`t#¥SUdK(Yg˺1 ѪX\}Cd!UUjk*(aoN]U/ֶ\= 0$TI"}F$$4 Svl,5a99>tgKh7-]n1 "h?")62._\c*x%b sX~ X*]*8hY+vտB9?@,4ֺޒOxoAġ޲p xՐL˜2aVn\.p ǔ__k&d Oa?N7s WtX7UD|Ich}s "eI0Ѐ597b.q]TrO3ܲ5{=RO;h-{}l&'- +yp5QGdO dE4HKK4ì [t\&G3H{Gz)0owIOA/U۫2Tۚ8yw^kar Hyua@ǒ||45OܐO>HKtL+MttA(I7;2L98RZL9v %SӁFlOY7q /uqeiރЋUTDJM7΂esN*WlXe] Id2ZM' u(ronJϻW w\*1} *|Ο~Y F `Р^Uz!B!0m XIl`|̉sl |s-eĜ&4BjZ#}A͒+#`F?xaD -v/ξa><6Lܚ9+ʃ^uRs|JgCi~w"m3kZQDҍKc덪fPMP]l3N`)bSZrw\4YKi%|hAGrrK}BIJCU@[r-Il ŜޔK" F)`Bl qRB'@ bޢ|+#6(PCK-p,^PCO@6Xvo7WxV{h=T>T?zVT7=A/E^[werqYٻ6$WL7)y f=d,2%Q<⩪"E-Ū/"3?bvr|R,i[ U e XES"x2 䟈I Ei>wsVlƢj(m]QzvC?\vx+LXK%nO ֭oX[` =`z]dS):)N1Ns[ *:E+,O`Y:E'>N„Ď〹Z)"{VҁNL*h)%$i,!ZQQis J1jWxJ.5H$l95GK o24Fy9[{KwKz؉؉Kjحlh$< 1<}jk9.WRkrԽ. k9l&deŭ2^$'VSfVu kǢ4n޿7i2haxG ~پ$'fx}*0<;~'MoZ0 tcEsԅrm֬LM>~opڅK`=0(Nlt?o`F+wAJeZu(ȀD"Vp뽲k7' w9~y3 ݍۗV5}~Oi^+h![jXk3z>biSeb8bPGo,p{Bj>)P@[-aU}'j-P:g|WgZ2<^:q,KtW772YtucwjpY58+zx>CulqL*0\D]8(uS;99pSڒ? 33)Kz|~z|'̵O9VY񯤂,r#|D6B2U2ܞX>ջ"(/~nVR $}=J W5됛9KoNh"ؑfQBq) UD夆Wq_"UPG2nKi;$$sdމs|$s[OGu?0acyrypI]\؎%gaI[_Km$O49:%e zI‡T& IG$zļ "QH zK@Rr.c%[2Jh}~؍4 zHJ0e”m &w 1k] +.׉A˭0-xR0UP;!d8jbZ:%Fqfxw?Nv}Z̊߾V7RE[vɄ0d@TV.zTshaρ%Ư$߯Iq4#)P)4RG:P:S h첣0/-@TZE'ꬕA#gDj E-?۱zjT.f,Cr9j)aJ[v}lAg ,[2niKfh$gb.f+Y?$YTfK쑳fw%kdtY;^35~OҬ׆8 drDw$4{Ij3i@zR!,m >-Ҋ|ƒK*R1]V,R1~9XH Kf T*YJD FxXH=0B:ϲ}QwbktP^sB2Ըv}:/Ⱍov!x6w7<i1Y~q{ O:ry1qP|9R{NjLDUl&PM"!8*vU`J$s k&Oš, ۉUE®0-m{IrkxU/[zш5*>$a|"J G48W7#1iU:Q*Pjf%Rb)OKwRJ,gM6[{ ޽Hjye!EKILV*5*J S66&j/:he6XC+mY) ꚲRn{rnQe8`}z'L>g3(2qWPg΃3Ihh{6@&bQ9V3r  ~RBZ0D7xx _40{-5*׵juM rH(n+Dăi{_-q߄0yr;Ol A˛VG'h"@)GR!Sy1uSoRJ ;Aa' :bP#K Fjeښ#r8wdͽhn&q ֙/װ8Ot#I!WZ_ѻݛ:h~31渾 O?Qxɇ =wlٟ؈H8W,.XtbyY$@ ()II8CJHu_ghٻ֗:ʂY;*h0{ :3ԩ!~+hÒz[Tgr<м3Vw'C,Y_ms4 @ZKl]O__T7;}h~Tן jӑhw[']A9FWGٻ8Bs BmR'KF{:<%e!ʣbBSXaiơ?nitͬ.F,(%s4pEZ܊^Jnu,q uN*,Up1E`RE:EԀ"$xp&ha#f=⁋߄ׅTK|rs'ebS7{u'|1JLW?IvdubTQQOXY{= -_U@UԹ6bQrYҔ#bŘrʋJWJVC2fw &&Q7og7ǓCnYJ-Z]2Y` N;_ߴw˾i_'4jkp,!"Zjwp+꺲#.pwv[+@;K@_M`њ_ڃz&>݆jWO\\-#5W.r^[h3k@IC. 0~AV}xQ8T=}_ȴ?]S׿xhkEO[a>ey)˲O%l`[b6h$!R,lIU %X:jZ-E؉K rW3H Ikh|j\C͢czlZ\#H:,$R4+Pdŭ w>8Ww#QKKr[w=8q^cOgZk gyX?L%ueRA_Rƺ6 NnHf 6ޓ}hVt*/*L1,_篍hy9Z\gAJ|7/=كX*>pe ä2N',RIP u¤aTfJ(k}Ƴ_-C pd7l d,gm > }o,9D\m|H^GQzX*ÖL _DX;BT>m9]z€G/b ySoW>srt98lp :gW2E\@{D9ԫ**.;Ѷm]ws,wo'>]꣚!ĵ9男Jq$X܈o4>fזGQArjF(*X0cQU. [|=[9hv\"..?ƼntLF?$){%YRi\%]K.vͣ×40Vk|J HIR@)y ޓeZEbHC5Ԩ*|^/9 }n4,9 w4-F:wc)Avq3<7r3t:\ +LLK% N -ncYlg'a-1ЖUh îet-kumhѵVؖ֬B zOh>ϔjwBEG΃H_>pd|mwc ļ!_ն@6 Sr.c-ρRUXp\9͐vPƣ :P:xPjYaR\4 Rı=SSEZҦ>D'6k9߶{Fܺ7HLf6h:{3}Mkҷ+dZCn{yW%~п%?gw(_wY.dDKkM2Vy@hn: ܕO!pr2408æ [}<{/^Z#ͳp&ߍ3>ލff߄a%d*7#&sB]T&a*LR(O}HIC]aue]w[Wd]wysu>8XQY9Bьj}5Pwowk\@E$4QF~^LP*vOFg/JH! .Y|&׎DTJ1LZ 2|R.G_\bݯV{[Bݹ/@ Bd^f!\om U\7 ̻4;|(΀db;bgRLI3)v&=73)ƙ%WseOdDp&R ֭?}P]tÄeŷQLItQ=&P.Q#`Iē!3/ˆ |BzV$Qul2x>9/BjW+6-k0AQfV i,*CcflsK1 Qur #3j<=MڷgH,p؊={"k \uTCwduwT3C1d2 P`7YOR]7}#FE'JUQ!`#PPi,;0xbՉHd6qM}j hkQ^W9<ƅ@.<}Iɶ#v.ׅv.q k{] x]X^v@ kZx]W)8.-Tj:Ykp1 OV_KS&f,͠ZfT-]/?޼{4Ӟ}|PX!֩ }?+f b+UtO%>K㠷)E%O4AJMɱ \J2ҖP3&֜Zi=f$֤ǜK01KeŒ\ *g<UAg#`QdfnwX(i]~kExAA*Q{:-eCep}(b5 ui 6K=) KS0^8K#{)6 ;i8&kTB|^Vl%l%x$1*lL2+:kSd: #m 6"igT O0-l& 2z߽ǛY|؟޸Vp={b `Xm^$ PP |Iuh_uOB0R0 uR)XdsY(]7sc(7F>t ؉؉sBZЯYapܽ*_9wtcвPo#;"LFox9ue<Q-Ֆ, }B?+ N;8.p~.g7)2T+TTK=M)ɈȨJyZ-)טa:ϬHK'8լokŀ'!l;U `vD8uPch43Bkr7,M2uϽf` ?f_sA|xl=uvfP^{ ci[8T`se[3' 4ru.Ѽʯ a` L.G]x3 7kSC;;'8({O)ly?~g<CB4ٿW( &Ŗ,d4OFx.܋oA~!*Mo;Y{KTerImSE9GV尫cqFqel-4WP4~Tͭ 7&cUDADAcDo|AWSJ ęXiИ x6[ZcL gTyi1k.32e+,T&tԦ\3 6U\70GU!b]룖Yhݵ>jQjhW[QIE!ۺp3@\} 5qnmHno ')Ͳiqh JKJ%\60b̐ˆ` \JQ1ӈ _Ƽ9mP0܆W'2 J+#XFU22SYYEJdJ3e <kjO (4c"(}/&Y{`̤8ҤPN/BT21k )ʸcCy%m W9Y 4ʃ|`Mcg5WmU,Y]l~z R Gv6JUZd:Z%9(p$~2Ž L3 WIJO \s4u Q(exV !e@'X /m"傿  2_5ƋTHCCLPlj6M,Ap=4IiNN2 aF2EA#Vw)ӒR%O6>SoΓ8Mj% t)ܾC,e:hk<qP 27O@jו~zxӫ2LI(tMK05\2ٹKaW,\2\@@\+LTI9Pf˵_7 7? ha%!Aj {DDh9/<&ăM^gxنZV.J*Ȇvڿ&_'lh͠g/2Իa~ l/{]` 80(]AlEgj3 fUErkh3 l=} uĉd31TS)ܙK%U0רbc[fA:o`m}n˨;lTۛdm?\^֛͂@ Y3gKJsLBJ9K.%-`,!bdzݮ|z~;84Dނz5lI4ج%2QI,d<, Gř.3Q=Mt OEX3grEhvBSζ`E|';`t,G/wDHmoQ4j3pt JBтyh|70q_?qOFE"֏dv3Ḫ?cMuf.*7d.?:~EM\“Z:OFh>EZ7ϯgyW;u ʨ{@<  8ChT>Gm|daZÑi~(.WZF;E1R;PL1F3ςoHUO.YsX(uRHq&a0\̤{,8vRX?_ߵi(1™J.#EY9#FeXs&$viO҅[~ `NiaQ1O7$>vgB%hK|\R`> ʔn݉<gѯqfa<+cr\FJvcF!O}="2uKC-K1a58R-?ip %ˆf!KSsqW}VX.ѧDcLR!Gi~wmIM_h) * zD:zcӸ{bNn2+ .{O]B:݌Bz' 9) kYZ`gD\,zzxWMEF-NZI5t{"I* Uث6z#* iZERSq4jW!R:[R%4F $w;*8,%[ˊv{ØYjzj/Mq irôRGu1RUiS.˟101)C.K=5+oZNZ.Zs/LFx 4\3Noơ,rDOԨwmnc!aO"2 l_Cq-EΔspϡ4r~k,ࣘ@O#rA"b@I%ᮈcZC{gDϟ>8Oy g ft\wsjaC,8su8HwZ Xm)p8:GT Q}jRix=pf8oݴx+hծ-ybgs)2.ZygBVz86*.F#Ő&CHIN# |*S `h&эo"f-ޕ>m,}yjUT9GR[ׅSfD2@ʖ]߷Q iyKY$b7=M>!SVH5pHF=#/B! aƞiO(B"L<}M`*k2 DC+> JP-۫4s4ŒJ6>[ Pw5l;_w-wj3:[n ށA"1a3=9Wb1Ng:WwG· 6/;wB.ڤ&V@2W;X2+dJqՔM L%ض꽖mԎ1y#JGj&>DX^;6rU8P 0pX[VZ.MlhQ&je틖 L+U6CsTF۫uqA" TtSU(Jޟ,1v{jWms!z^4vW֊ ?-VALfTrDt# n+>V~hBD3Y6 PLZ v!f~ Jx#bˈ>\o{ɹQQrk1j7.LmW9fW\䀂X 38X0Iv7 W{t;2nVbTs܏(G#8]>,NtśdFb#=hҚE( G _*OD CNc:$q!ʘXP&:OJ{ҷ?Vd*2(-Nnnk% a@6.}f5Feb2m3.}fw"X3F14I2mhGu0B&9|Ԇ<Jpg-;Iˢ#b2>di0Y"t>O 1W_&KG\$,{zoc[&'ƞ?S!ftSy n% 0 M?OKxp yr3&gñ}c{Ǜg. q1dM:89E&`rqh1tѾ64Uj'h+7s/ l@;<a3XB*"ԮJP~Z2p-q6kuI:~2Nr4t?N_@jrַL{ɣy砱Փ9X濞^aj/?sy:Lv@/b/|t|W?j9ӫlJ=^ {ρyُ@Oo_283>P9nah _G׃F|t=c V(٥drex0wO\Ont#ok0)on{$aJhަ0ZJO7%^e]9~>+?#+Œ}{%q1@)f]LW2h\>{\̒4 h~Zws?}{T|(@dx mFxe&sf[g1_}2~5L?g|_`/=w4x4|^6u( hl !'' D%<?ϿyO0ȧ:t067Yc,}1jf^i5;lo28:RX,\_Ο(s̜ܐJQd*F(A܏=d Zt/X[d9?h.87v&h~,ͫFvlRU-@ 'S HoJ)>K| Qb [B ǣH 7|v-ՍBt:i[wo Ma.>%R4ѠM]k~Q0]~faʄ[IM~_j\ö,Qok4*6#.@>ɘd`TYݕ hsQrg/T~ kThXo kbx$]l.y-ʹs Xr?0b0DahA)2Hغe|.AP'!!,9;G`#/M} QB*`lW8b{@^ P<$E)}kuy sHU]OkcsK/+n96OUe^44c.ںXD gD&Ѣ jpa*'+h0s&Ҟţb݆`Yl7. H!@~ݻxo8%`,}Ew).4X͋Ke[&hpr+f: C+q*ZqCob*ij'?JԼ21p7`7hC2-yRo %A܀)F4‡3,a-̕xWgafhab&1w s1*5Ef"UKaxsHkVtr9CL])p1}/$L$y05ȋFAA##F_ȁr|}7 \+wS,bK׈-" gZc" zuE (r3UR+Ά 4zEJ_ q2$a:J[\7_0"BݗVя>..(&ō[M\hyaEeux_l {+Vᬄ%LrVCw,797>eAA3H2!N)6,E-Xh^:6t4\DmP]C.<0T Ӳ(sA1RH5DNCR^k gtf3QXi R5ۄc-j(Bv'ZZhpRvI0ņt ñ(Ř"޳[sN{Bj{0ʼȖ~:+g v(*k@5?x<>pD| ()Y̟ H a BsUX&2dZ}zV8<@qhZjr/`qh7` e1L0~fmfNUj6nPB>:0ڄZv|NGv#;QqWt1?#"n"yn{!_j"܈u&{|!2[Fwq:B6 ;cUbj20@GYKG&2^ђ3UjI:s5LWQ;d84t_t|7Buڕ~ϾAÕޖvv f\w ˶ɦ䒍0 D^Ԟڸ֖f xas䍾V?mFsV{_ޯz7^fgq~^_hO6kѝwdF@Jnͼp)9i!F+Q)kõoɀXo5^wnQ^8?Xb簖w.hgZUa->Z_/C[&m뻟 :nY;rTa .pWg|vm?R8F'qԗwHcWSXHEܗ Lh=;^ [0=އCn2mwG@һFY`CaO.yѱW'We?9WOEڴǜ} Bbbǻ|Wifs3z_N?=Njo;-@F~CX!>NՉaB5Q~WQ٦37Ql` 8 b4-Lԯ0el=[`7*F $Q! UBn|fMim7c;afw('mPzamBJ֡Iw=)ٝٻ߶qea z}b[D>uSCIi[N(NT@ڒL΃L&A~2H;2Uy=9z2@mHtjj8-?("ߺЭ)RW *hn'#`'BiBI'On% iq wC4 ,C6RX#䥈 bq,)iHCR'\Y-R( Va6\̬ь$MP"b)3*ζ4*DJ6Ph!D> [!^ږiޒ3T:-RzۋQ5n:lvۭvk->P^gU vtsS#(ddK>^|?;\>s'1WSB띱zR$(Qi"X$Q1שU"p0`DICC)C j&R4K5XbdAK_[`ƘSq?3-eӎV OH6Qo*UӮe/vvF}e(Rn;7j`.?/gNk,W+:]J[Yc"@(o嶳ЮîYhVDDb-K5P 'bfXIBTfb1/L.%XeURe4DhZKǓLpkӣޱ=MBMM?CcojL]CC34t+CHTghB!ع ԈX/W<+`9uUԝYe{bZPt/,vTaz='*;Lg#B>nw ]]#VDzt5{o<9el^/2pQ fdfCo&%:X7A )"5S+%zB(Y"% 0Qr̵f`0FX:f?[8RS? |2t9նn-DN^{zLmvq첩vNQ('ɣS_sj|b},ZUqΐ0=Vn+T.:\mJ#*iWn!8So8j?e\؎Y;h:'sA:ۿ·vբǏgQ  p 2>]Af_χս^.An]22sz\WsN%Ιz1] 2x{?VT8~}ʻɅ9x0w}uxWMvq o*(%b~kw;h0ʄ"'KM_Oֹ?bW3\%;=6`nLGսO=9_NFjx\lr9jwƅ/Õo_0? |{H3(=/~q bTdz' qPq_d>)}Awghsv5ǣUf) ~*8&ou/$'B)3O0h\6,zz,~^\͛+?$D1ݧ̷ lF#g<^rP%oV&7}>+D-s8ҵ%\ ;=OWVwڤDENN H1tdemѿ+w!`$@;Qs *qoV%)ͥliI+%;ʗmgD{}^Y9<k%BfFsώR)?Aҭe;V Vk,Xz۱pUu n5)˄´nQZ`}3G}vWz[jIbdiELqMQXERy3Cn>bWUvCw⏍Qp875@=N]zSԥ8{5@3, $JeqLFf% .q 2H1 TD5@۾U ж/oBC$FqYiֹ)m`TFIC%=|2h4I>y 퐿yUP!};0&h,;s=~A$ ?z$[1?;;HA=wx2Kw^Bz@c~..EPqcsiD_x~͠!amW\k)]XtuZCRPF|=voh~Spr]>kvɵSߗ-śÁ>"{vO(˃*=H5.߱o}ȥ$>6lIf%HPK"hꪣ%DQPl5o:5T mem g[ G`.7#28I3o'A>㎝uy]>s;`$XZXjL(M50-Xj`W.YAVr6 kцyQizzmA[w»;x:AY th2>L5ENw]%04*'S?ى gEg쓖5VD6J1`,6,A_i]_?\l: V T輻`ȘiN>sz_w*EHDDhT2knJf}"YSlv n PsޒQul-J|Hƿ&Ư N0;LPoze/V$fH7.t\SiQd3-1EԠ¯TPRE+Ey'{ðk"{y;[Z| X'{=D}D9ݖi>xiLD,q,r+9!d*L[QR;|^I̼J\ .řLz40u sBb#06V(I X$`jh%sF4K3cgB'$Բupg)RSMʕ++("w=A)&h*0MSL„ZIE;u&.]o v nJDYC+Ls<tsh9p{UZ|>Ϩ~sn×!TgVr'LrPRsF׾R!%ѱr"KrIs'6lQfY;X1m=SLNaDˊ[eūuW3ƒn%fH'>p*j91~b[Xb[o)wFVj/!m>f؍j5ӧ{P⅓_[__ؤ ??Jta lu<|˳xubGǽ_ft OQVN U΀%EH€VOA'%4˝@pмn_sw~c%{edL01ČbXKTmʚsh ݮСY-A:F5IVD;#XIhrt\d&Ʃ&džrB)HMcP4 e ð̴Ak.1cr%c 룯Myٻr'ުͦ4)њ8r4rV6dLWeѼt*Fl;kw?זrDnMb/AKX{OHR|߷Bc=1Xu | 5K}7DQMF:hk%8#R; g~/P*wRI305A,,SL'6"F8Ncd zk8H4rtۨxɶqd۸Uz|qwh.0m)f}N%Y&F>  Dzd49P7|8Qx8O *G*Pt4 ż?9s'GUƁjuť?F]W|ܟu-PF\(a1Bѵ*d[=_Mʧ̀|L3 砟Dd y2]3Y V$zFݓyeNlLySREѹ4'Bȍ|&+o1B! ܌j%NYQ!9g5$)T˜*#>~avd$Fu! 4M!(Z5`T/#VQo"xd2OvM螳瑘cozR\9O;Xw#&w2qK_;po yA *h:ܱzkt'HvҚvA5hW U4FhƄv5#.TvexUCTi oVz9-/l>\?Ym[q^!'i"4 TnǏ$VQ&)Ƙ2DkmX"{_ lM[eo )Ûs#if#tW}Uu#>n-;gL/B;8FW>{ J,*(WYXL_H*t5JŚtJJ!Y$EΣʆ@1v=ق. tN \4LJ^K;bl cHh):KekHh0\CoYmBMŻ:6M~Qj뀕mަDdN\)!`Wʤ- rqMT IZ*UR q))p,fe?k'LZDJb ! $pO2b$G/R p Hɻ:ΨfI%wFL#ˡt0ItX nS/hk@%(BZ ~[ bwMW}*S>_R|+6njk?xML 縡І?D(lW l VVTh:/Q 9ML"0]%n[JS?/ e~e.*rt-sb/kOrDj<8n2!U`ǯU`~sOW|s2iE6;((ؚTȠAjأZ0^Aëxھ"SH=+S X58ƚb+ߞ?P*N#˙?cq2j 61j)0^~)0>~1*;_z BJr.S}RfBr{?S]La;pW$zNs^EY  UAˮB}~ji^ذNS%:BSba{MseIZzn%mwjAߴCoDƥ9pWP!:jW2x/oQPu{eX}lsJ~a7)lA\I9 o"&BZ]+FQPjRRtꖊ?`r(,@o\Ir^*P)0N[X;9NL_ӄ7[1qj ֧PZ&ӖR1UutîC$^L,6 XI.N6"{Zm*H}u?o|0 -c"v"R/knYm1a^~ez͠,|BY)LGu˻n姈g#IX-Ҹح;8ֈ;!q!|,EکLo"嬸)o`y7,*tv R*QeUC iV(K{ySP5 t~H_L3(uoRbMY%F>i5ڨT_/5f_̰$gg|5HLWAƽ %*7 Tgɧs]TWB]y|ziSh h}Ua`͑u{^nAm(iLN:譞]T9;nHVԦV bw" F,`"q4`(nZOKp/u;.ЖLxlXťXtJa㰡Rkg۹*jXJrD1aDܺ,f IVp '_L"Ft_;R/pdmνIFm>.f?i-U)߿dxwut@(:PD xg1aG $J9|GTI$kp=eVκ%j0O̰B( &7g*T_D~|?,d&ʚEtaQLFV@h.;GH\b/SK6 8aEJiyt %ĕV)»}Lxb beZW&r9V$LKe+gM5L\V= kQfqI:*ݣq, FĜ'Q`ۋ Y*0ÁڊDE-Ό|\z\`_NT.ab,vS1wWja**6PLﯯ@n5(.uKi--&hR 9*0xIx07`fFqKt4@:vNb &,Q6fIydcz)si̜}Euќ{ S)s4ٻ)BPԊC]1AŁ;OWcpk!7 -tGx{Z f dxW{_Z3`LWIzY`QSK&lV3K1<svưPZ؟Hmıi{\}@ۚ-KT\#gEzXgXIJstM/?\6$V>Z>Ż:eM겦QuJ뀔nxgĬEB%j<,1d-Y@ x08.Tz7ƻ9e%^^F{ 1|`0\8_rκ>9 Lv 7 bx >A ^0Aor*KhZ| OW?)Bz~~N% I\cs8R`o>/v z?@(N ;akLYkY,AfUd]Jg9 h_j.K=9 4_HZ:8H/RKfQatԐ?"5H2J({(Yg@E;] ѓ;z}Gց._#J2 r?q{g|Jid܂1j)2^;sUi g1ykNKB;vTw)pooaUADL.F0G*F k\K:f>sj{ܐ4N>9 gnܢ!1=ke08o k FJYa1Lh47L0 x|¾ nAwi;/w#ibi?-8< {#$UFKLJ1x2DCaaAGA8@T(I[wb' yB7f} p'caU / %#0Cߑ-`OyY,O!R]l:"jEa=2c ]Ho|O\`f'>=`LQeT%l0{_+k7;mO< zFxs%(GLګ(aq|Y~} Ldz2wgx43O;v2I=̽q7m$ZJۃ ` ͌16ݗB}G>nxB Ycഽ̓j0 9{12&0A?'_r{u("K@3D,g_٘Xtv Ck/uS^]=6#i#=:`|,3˔a&§e:7Z1t DRԃ\CٻFr$WyY <}AaF>l!yU˖ݖ)v_Uo `(-򉎤J^u("lׂf%%{Oz'ʳX:({B]ʶ J{ RN=v?%Z4ps:8KdwN5Pa;, ډF 0lGpӨ4*KbM~nOkc9\0-GLJ3zń vQVLȴetPᣒ&sc\>4c' ?6JƈqORÓW]pH<AC:8!͆l,69 @ jJ|Hf_ +@' VS~U+x`x?."NWן޷wRآo-| ڏӈ.(ŀ%vM )r]euTU%V)5SqGj|gJ8wG"cr Y®KȢ({,A+XPl&/=2f }-e:ur|c؞hMr@ ? gtTǛz F:(]Rq-!3.D֪j3n4pw{?M-ItP Q}3C1MSdPn jSPPcJA1/bm@O B56(MJa-bEh 5Y+T?{kQwUҒff%^|Z>Z+s@)A_0ۋZMKO/W)mV8C10nȉTkGjɍ$+s3V brc؋Fu F2Gg3Lel y" й'#m(rl" ʱ(rBP3ve#w]gJJ0ȿ.+h!{nk#gsEpq_9Jm е&xpEך-ivHHVx0MHV#)0 .1_̡sX"f92nm&}Э h1d)0M6yumK䠉Ә.ؑA$Mbg?W~FgКN֥/Y朲?tJEo|lU NhgxAzaUEP»-ԭ;Y Ck #E<|;BSyn}(-j~fj׊[e6'i&u*ͮW ̹qMZFon'5G!kA˓T[/h2 X/r77 ~zwsEo˳ '$3բ87M6߾p s?k~ckˏR9zPV>כnM),,Z)X';rʐmgy8Fg>ZCjrpX 9nҦeѫ)va'%R?ŤCyuСg;CɁVbҡ'Lx-9yߠ,m: ,M>CnU UICvk\$l'E<1p̼6=m\dTgOQ ~4AFo Ep8ؓ2>@M竍/*$簩#B4hY )6*Sl`#g:j#q,8Fᶳ G.n kV3L`@qjH Y<*A1UtK= ȹra|g|.J3Rdςg@ MsT'9g9c7:Sa- LFRv]$H9@Ƭ'Ld$#V==F$MjM8*Աc:!d #wDu*&6p98@%v wJsʿ*{]3D}fV-' CA"FEQ'cd&M^ZExjcuaha -Z|C RZf% Y`"ځhK;F}wfL1>1/ cEѡN#{w=zwK%n ۋ0.3)hș󢙋6=>8Z3lVMUuh4{̱p.ek-pJ+0ZpXז>ӓү㇩Z}}zV} OQߗ!s>2,,c>6] Ѓ,n%-먌 #Q52u{2Zn x}%>zhk e5a=ׂ%kYIWԑE=ú$WAZ J '/R־KZ^&YU!&.03匋&C6=@NmuBp[B&p%m wA*va1TL΁\[ Q8[* ,jjauQ@x7cڼ_MyYu=vX_ZF @Y h\B_($]U_fUYGn}1s4Ũ}ȞeZĈ|9MG~:_ב?Z㑮=oEtC1Ar? Op64&@_RO5^L 0m\ӿVA̢ŷgs57a^"E7h ";gLjT@^8AanVn)]k%'wrbI8UuIγ6IMK)=Z3EΜr0A^G%)J6TIxzڻhTDVww_ƁjF&U᱾(W~:m:A p|^cft_")ڒ1FyX2$=YXc;íD;IH->[ks1@FAE>Y Ξ/CMS yhe*z2 ^M-X 5ɬ_!S8Fi1q=n$eq&Sv3j.)FG_tL/Dwm?fwE t*tʌ]J2VLtؽ.i SMdcnJ$=0H[ϸEU[9_ "ؑx/~%oÓJt"B{cR1BNE = :zT4%s4)HS|ULg)a<|g^nX_˓q~;YF_]KX.pI\֎km s'-Y,yYho_ɬkSʪ2FR> HD쬏r*:XO$5DPQcInRw\82}Jޚ n.k%mvY$)i0nŁZ1q~G]V "_PQKKy<'aC)$?~&PPk%V@5DRrDI1X)=Uj(լTqfdmiҖ^{3bnFz`iZBPHcN`/oHUJ kl.J5h-8!m@v̂"O}> &]G-7޽)~N``懵ˁ\UyrO9.8us( dUî&ڙnVF-t"vr@Ltt'Y{m%sÛw$@٧b+aM 2p]D`"a}V ٩-dm( wR^; ֔a'I'^gZ3NMZ+(7i= ט@X3q>޼2ͮkVgUX*uMMMvN*HRȶ{Cg krnV=_tx$L=6XBwӥQ_DGOhRm6;y~3X3j[PN<{` .,;cv#'"O~:'fƏvPntw'VY`3D:y7ߺ%HGޛm;I5ls{+rqzbgjIģUc1\cUE+^@b2%-5/lb ƂI#IΤ׹J["O=RJ/T OrL'/$-LLYs.OLؙ͢i3}|-_-Ul 8cDI5w@R\LKT@w0Iݹ]sB/Pr҈͐D3BaXIEPl"F*RY$^mt>G@;bq4*Ous1Ra~ֈ` WJb93)a+ 0lm9cQs+@ Ъ; sל[.!^\r{P .@}Fiڟ)8A(fr.f+|tw`O= l:Ƿv›=@q͛, Urf%gϽсܕP5W.7ͥw+iyސ;b%Bjh)EgTvW8Ԇ vq.Zz!TZhcM v9 3D9$K*aDv:Oy)LC0X {蓯;>{*U֘a՗p]=!j P ;R;RpW]=D!j%i֘Њ_ <$1GNp`"QkPNPE^Dt笅YQʬe·~Yi0HRwvuyр`9@!cr)Ќ˶*~x?K2hSԷAV{w%+mܽBqYOɓӬ1oAa·aŹLf.(s qo)py #Ig=@!7TXa:щFs.%[9n:' @bp@\ =%'$VNQt4H/,R,sg\ Xܯx\2/ΑO[ UꇟV<dɳ栖xI^X=pQ~bH {vA9:E 'YNgyѽ| wFf6_l_fb%aN\> F$R}R2?U~{^$c|wK`(@`drŅr\{8qI-٫ޖUt>ET":* ^t쒚PdKϭ *3R40vdMRW7[p,xle k,@bqx41~ W\w>v|qLX۞`qBȿ<T ;z "gstQPـ[t;&A 33C2"Y-g&-4F""؄W@ @5y <,JqM{~Sc}fƕVv5hXRSRg輗"HcPQ꜊Lq#hd<n[.: me }Gv"n܆,{ο9zZ{#CC89,:cu \>s!0K uob]*E(@sfM:K10m6:+״k:%X، ղ*NްַgwoM+qJ< HR8Ci4&0oܘ|Ӳ`.uf>+ /lUeM쫫S ӵ CvOZ $JGbRthIJ5Ф"I5i+U 1Tݴ zamOLJUWاdS4r! 1t< G yA2 B \UZJ5Y;k刺8;d[SƏɋ{@< `0Eg|W],+ϢNJ! a*ure̕q3=x~l,]sqcBa, Y!^gWYot3*`[&j4u:pF.0.4w,=*@/ݰ~3<*Ds)1QF=27 xRpdԌc2z3ZSB9h~2 r&/|)[?pƕB:%b1L M ASǸ̀ ZI << ü1("DI)MV[ &/ogVo->/;QNj꩙]P:k`PtьHD%w۴Pf3{-[CAR* nPjTbI-w8k1HEʉA3j䕺U>HrCSP1>Ȁ=1б TRkZ3kKAUG sˊ+X#z{vM ΞZ|uQNVQF?'0'`߾kB;T L\;esɬUDAh>txYýxあJ_$L8\Pe X*'ґp(舰b7F;؁8?H+x  AAހfKQ pT-3%'Ш |A>/Bjn{vb9! )L ÆʓH ZiiN(53";Jrt%qH)1oo_{6=eig˓|gU;W8[?1ȪJ{vWz,:%+~`G 9GT ~̝9+'ߚj8_T_kp$߲QgpAjRN,0w.)S,DP``rk 6. Sލ|ӉVj=sڱ~c3yzޞћ~r:.C) C?Vylya `ڱF<0#\Qʏ;RJ6xϣ Z .J&ۊ d= ~<|cAf8^|p`fWm7{xҡ̑%w{|X4,&d'tEgzܸ_v1/ ;LlO֤Ցq2@DT{h 5񐇇j?zuglyy-EG}85w`;4UңY܍W(3Aݬ&2:S/m!ʇmR^ퟅ'_;k?o@kX3vAQ2dkka,:LGȊJ4H&"˓G٬ \@,p5I%" +@J!ExY6/R*|ح(Mg7BtIR'FC!y(`Aq!bZ" *W;.L*7hs\ֽilEHNaD"HD'Iy hNf߿ESPv@wIeq'Sr(S˅L $ FK!GRhCB#%F1 DK""__B"VQZfF̟ށK TZ wR =7 Bwms׳h8t1`f5Պ:d%GԘwNq>Y3X6r3Od;;kuo6~i5ԅY AAt^Cӛn=S{\OfHǔIt^ʷNDщc Dl.(MǗHBH a^ =~TBAr\щax2Z]PH{$[٣GP"g Q7>tB`<8cJ ٓ |r/fLQc:*<#5[GXub&@7&9YطV80 st#.H0# D##MO;~ADQ "zB-ȟ/nL\G`PNgAT-owxm9|; |Y%HB`۪d_6K(sUY><5(8HLaFXBQ*. U61"eeXb'7^[8͕LHXL\W 0=\"";.őw)<ݞLNO aG Ǔ6iYx(ީGߺ †ojy{?Xa=6o!%84jjb.sӲXM `*P]U`B‰FPxqzPv0J8b~=ځ|Վ7n합Bp 7 ^yAVyk6Yū~IWAN` .m؏U ,zm?d,!![ "*xu 1 1f}$ ٤HL)D!WOmfR:?fn Wu,XmdvV%!W0VCRԤ9xL)Kh7q&I"`g;oh(|TP9W^pxʔ`TWꧼj Ԥ tq8oj3ټI),!W_?75i(v85a `$UGyfz| e;ٛPUkF3 Ds[P)o[ ~k"WS˨;gKC3= L➝,"_Bi޵ Kp|Dptht1G&&<ڏ{5#(W9|Ԛu.TnANʟ_77LC9eljC{wd=qxv y/j5yX8?gH/MM%Mj~v-ڡj'wjx@vaZ) RYt[]˭Q w_|eAUu\.Bj %qg&>$8OLT׃ɀhIzPIo͝yP8@E:` <4(!D @kRo[6R @nꭶfʂe&];iz=ͱBtTad+lA0'[wXH_̐ <{_I {ZofFPB ȀS= L8! W/av; * " u89 D"u")^iyG Wyv1[{}1! 13FsÔ=o'[ _EDh% klXDiI޼{W̟gUp{-Am曙\F%ISBy4Q*a!(?M@2Qk at r,2,T*0@) ve'°kJ;٫P?d#}Ak)NR N]2V&"VQԖjĤ@fVد0SƤhJMH  EyF~SD"1Ĉr?BI 0ȔC[%k8J8=F:_F[Lqx'6u +~>hcSSa6>4H3ig:\kn NA`j\Y#)!J!?r_Bb_/,FAݛ1%9z 6w۝>th`|MakboW$w P@B1z@7G<,~ӛN{1hz^"D@B2, H^T_{ 7{pRMV[s{aCxU0:h#'88,YMGyVs)KhJĉ6WMFFS o`ȅa!((!DJ&BD@-έ@$eWK܉`0!@WK\&QHuj(db_vpWk3'6T:X'#,ZBSFVVI ј:q|WC!iz5+T Yt2+4 k;Z;zeQڞtXZGՑV&JT?~( =6.`0UIlJp0~u k)1RJHExC] sS5;ؘjY?;cF:wuyR^J^h{V;vٻ6n%WXz:#H_X=k+9lTqu65,S$MRT6f(Iqs!Lű@4nnR2AvgrQeXtNgY(s5eP-16U?񻍀28(yxXdQp82 e#M@+۝s ^ҫPnO-fMw-$鮕)HPweW(&TX)ƋT07^炓TϯzU]oq`{_?SV-~_ͬ*(ԱJW#ͦW TcJ=)p[j!$Ȇ}4òq% mNt</&RJ(k% G9UHi!!+gZeK|y=$cPdӷH1<d-GiRmJ Y^(Р9S Tn-fj)ּ1i(e-fR?s!7=)bJu .{sE#yxmhM sD@Xu"׹B1=5Y[EWg hUIrTZɐ:OL2ͫ<[Nqv촽q'sK(o9nP%Ph'EJfD6TSFs۩i5%T +SU`ݞtSL0kXt|z]Q(ŠNa޺Նh .` i}uAugt@bL{H0cW6z* N).g&\,,d);۽@kP6a<}/U)PǪ|Bbd5{en q8HNN;|qj ւvgU֦s֮xw&䝾 };P{7˽R xb]fYs&g벒l^p}m<(yEp0HS#a%}kfr͛ ]r_?-Y /.B`<~J;ILa!;b z)vX:ma&ȼ bzo>|WXXLfK8(2VH8֠kXZBhTgLāaDf\3DBI{_,n PƕG΂j%Xy(1,6XJblaHbN:}AW+;b?|/7A WAʏft="p[>_0Qa##&c0f;BAf\$.dq,HSM1Qj!.<`4szVcNT@R)NDV1%D9tJb*xZi55[C A$?XԠUkPpI(ƥ<ՠK:4$,KG3gl`l8Ba @{^Pd·]D8acgYbH숎SujR%Ԏ"rX9/)5h\N΄JCH'FUF)@7t>$,5wcO֦Zs{Uyp%;wr /n~@#oGw`5[ JPP|WT&_;3g TP NT5`đj@Z1_ utE JVIsRJSoR$>p85@IbNNr#4w"hAE:!ܒgIƧƚtd'Fe ֔lAcVE!R_azF/?[{8I SD3%̨3$Lbt]R"ޞ8ZlHk14m)=Hlm+F&^ii /""D ncK݇zgS!l<YiGhKzz4Jzrx|~E#I5ªV Ƀy -ʟ3Sj61gf^jG`K/ Eic3:<B5U`ӀEsT5NݩIԩ{BHvn'83S @hB; "wU8>Mg|ASKqRA2v>ma̘m n4&چGn9d3\Г0t~݌֭w{9~ֳ\ӅxwHi]-״k<+ xﴠ\aƩ5(L|o]KO WV8p=|-;I;`[drK~{0}~yEƪwlا$zbV#4/ElgؗNAȧ~coTk>K}?_a<|4^ۡ/= ?*YfQm;Z<̘\kZ㚖Z^eLJDkR`XYf"YDi+mbSĹ"<2)&:ΚQLA>+E =\}duz^dZ`ZainVϣ}UfU1MPjc *44!Bm" bVJF7+^=0Z?e6kcs#B<=A2fj ~r:k0o ]8òkB|zjv|ջ޿;Ƚ |;H~f%xA4 NwZ,} fwb;=gL%\y 0J;J M˽jӳ/|(/s2pɃzeBðgYh#di. tn^V% #$׵0UQHV!-heXa 3Lx+˶8veslcp5r4<ӱf?mH[,Д~~x_3wq,oP*/Wq1H6,wq,m~8D3_hab<0$HPlqengxW|B/02炌d\HlSRh'nx%WjF'J cJ}fX 4`H;ֱ=ߩW߿9\.z gvnF-7r"V: DC_:{?l}vY2GrABS-TV_֒Q6\ -dveS͓cxqe s|Ki,q68m_(L³lP4'Xk#(Q!qZ%GYfP(K򧴲n-d9 b0Y#+<^ f#w6Q&%IL֔4,] ~OFK`(>n'[c<^CM$6zd8I1Oc,| 9ߥwiR@ܝx ?{ܶ~mfMzftg4㴻?;mp%i~@J$+E|i"Q >#$tΫy9Bh>P\BFnwc{N nf]|4Id^|mZEJmZ4]w8iQ[g%W'B~T [w XcX䐰6% ,UN33b䕚/JQ|-T°4⸱J Lpf +B)0!;k>ah>,StC2NJ3BœD9::m3Z͌ðzΚ/"ڠr&C2adLgR8 Očaa)1ϰtФ|y_7qIH `];E]$%YL!1Lk'X LÚIȻ* ] ].]{㫻7/x˫ח2r|w7?~@]Ty{NImE!1077?޼~5وGkU/ֵIgе5ؤ]L1r"&n zM]xCE%2 s(hEޑ JGŘ++x]#ocgb^XXSݿ{{hT\}.[ @s+aPNrZo& /AJ1t^`qNϤ)~CX˺Pjd܂ ?{?GrAS֐ %~\m sz_To*e8}&X"E:8}gkR8zB Y`BC'o==L|$drXN}t7Z oŵԥ0O(r&*F|Ћ$O:m> s9:^Nq!͚kH+wJg#M)^aabMD9STg1˖|l9qCuy.K]goP/,_?%I8)DzL")_uwOkBxTJC NS9?R2NEU6)."ͪP$Uߥ⤤?_4 R!<͆hոnj4~i np L!T$&4CcU?҄:^h 0TX9HJ38ÀtT&M0Qcpg 8gR79L2Vɚ>bR%:%{$QO_Lojٝ3-B ]0*Ƥ `ec,4 jgDJdb1x0b4TaYwANww,s;Eg. ٸ~? tusrD|\_Y >^?x!Q>Qck< $]k3>P3EI"!] 2c:cj#)MR" L uLOߒwiGZYȇc>@Z|7PfB<}:i·>4 FZ+>=6+Q74i#Ed۬Xv "N2Ga8,5, v˺3G[i.xn#춉SGũU07qN{4f`d%VoKçY4IXlNq[Hs{Aɝf6"(̱TVX':!1cZ,VIƌ:h)(lag!Xb9TW6Ӯ_IeXLǁ*h )3394(RĤc10TTfjDx4Uy&qLE 5Xf/QU tUxjR6{'5{a/H4$>YI%yߞ#2 ;5h؆2bcRy_y1ƨL[-ԽNŻMTG(U0E*K!:ՖDqnFrBPQB%PHK2ךIpJ&KΛxIS RSSffCM!k, `2tI&R%L ѰUd `3,$GZ0SA10SfDaIpc:rʵeTHV&IOYC?۝ը{FzWZ`eEEryǒH7{Eae5V}T0IcyV rZ#xC[7z~H }> tz._ fGlq޶ow~{pLZ _c|/kIGFI_D@-ߞkPCR'|opݢ9@oSmfQ_9rf=X ycŽw6T>-r$'|q]*](Ty6hb,3['>@Z⑘xʪvLLsGT%E5+4Kq0!͊<4堖UhNՆK-$=yM(*M4pc<-RDc|HAhLiV/^41=5Hi?}Aؙc ++w7#5`[h$VpjR47Ӝ6+2԰g ֎l[W;7w<ۚD4^>߆X8nl{@K4䑊^ǎuboxt۪|eh00ugo([*b(<(I Ui3\ jVZk3ZreAOJ56YˬV*@ A3Ls9.Q QDf%ydVJx+ ˆ^VU}dIQcE*0ij0bVy4y03>l)K==JRA# R"SW`>}ag ?o{擿m)2a"/ ކvfCs4?ĄHX*mp/Cش?WJn C!?D(N/=s_']Gmܸ ]ߺAzf|{uBڦ,^#>)lGJIϾnѽSh /g_ odj}ᔞF#f=1 U菿=O<:ku]b`Gvbo&3zomSBTp!Z1EV@~]ƙD;eގj~F~A*wV:Wՠw?vqtqFp4r} "bD|L zSξ7pJջй\I,8,܊f0Y~M&djo={7jCo={FV5cg7~0j'͞vu%s-nl[Sʱ5Px&t<^}r:tK'D7K|J̌[pK7{E 9lwܟ )Bx0oC?w'?]bJ;\I%%m!L6J*bdL*O`~p3t!g'݇"-"9ѶVJm9\9Y81HW q)eTc D4;`Y)tb1)G'_0*=>.L_%G$GFړ5\ k,onF('%%;RïזpxINJ~ 4YcZZ2w.vW8V QEws`s_kVjlz&֟C ACY@':5q\ ^b4*PY&P)Ln*E37Jtժ)1@K-@=TٓPE#?ӸSã3y]F|b(ڳ2EN<2ݏGK+u.yjָxte&TzAbMEꍶ2$M!7}`x>|~E(8Vţ^t;EQlbEuC!7|8B@NJq(I$` L4C11D&)ºgJ3e.|PR0'͠ JVoP2*d*oRe$ *d"i[$O`J-iTq!CXJK̜ V``-OEl%Ε}+jtT4$eh3BD aq7H4'oM_!$@7Ӌ:wq?'q,w<5ejN'?űub'i2ōC!+3aoޛAXF uծm-*J^> Xh "PP'DB^P%(DB% n:_6֓k_+ld[Siͯ~yߨjeQ܋ gHm]2V]:fЍ s@>^P|H.Al c96T<]*nK툹_(B*n5>@)"vē\A6@J @j\B8Se^ y( @1-nt\T6Ũ W0խe]v>?l1(E?]ʰQ.F+-eX;p.X!jqEsWz m(%j.途&\.xpBKam2 Bs.XRAP EߏG(ޠ-Eb ҄)J󘣘pdK%D1&v䥇5yAM^]@RFKMLKڙSf#k"k{|ms%F D60KdcNxhYFЫ KR~G2&)#脑D6v"=z 5uOWeQ b;ku'1rɻ 5,:U$.Y`$Aip/qTnyWv#w뛢1M[݋>ӯWXFn /?70AұIyky;> Jy3֜z_!|3ezy;YHK+7'$zse}m!ܠ/sۋt/_V&:"5y]T*w&<Y}=ɆFo./ruڻ?1 ȾxN{RѶ^k= ^Q*AZѮO%V bUtM. }hg@s~s&,"&}- %chD/] GFQS&%pb}`A][5%VM)NN'G8}T lhyݨSr*ٙڹ{óx*,X3;';UEC"B/UTY_˞P"MxOQnyS=-rxA [ϦEp5$Ψxus&B3R]JR4 hH $AS( hh,z< 9%tV]8zb(Rt.X)MN6 ǪF۪aP~B)Q&W2FVV*v@K: : edͩ_|L9>qcϽ}{LFvd)mI;zJLY 8i!z\(ݤq7=ll;"@dUi-f/;Q9X>,|d甏3nJ͞P􌸵Җ0сC]k!=|P x` :GjN6pv$О3Ut(HjR K"+SԎ\{˃i2ɭjI>^3ZLTA"Q/<˗M,K* KԌ/$%MdAsn:T %0d*)w8]BECJ^hv xJa,T(%p\Ck Ff%װ[4ae^r ҆sSr h'@\C'>hfKhG;vJEJuAJr>Vht:L4fvAAփim i;\ V M/cF.P펀iJ*wH#m]0fJmX ru:P\b(*DU_@,QsZDh<:m%$W΂N!g*ǟr m$RO_މZrA/(KSڋp!(|=:r$|\(1+.¨QR]ŏ<갺E db #T2\ h(Kz7 3R[ǒޢh"FE7%Z6$]>'wC싟/O?kjfPjG>WlXT}ՉLc0tBN"S*W#Φ7%/BڧJrP*㵫8MXW%ce^fv4_,3E`h{ߡBj/LpJחmPK)Rfs Љlq*K*=IM}}ʡk]q #'9/EVKP6?.6\WR]SO ƽ{Q2g΢L -~Eb*w y tJ諢uHD@% IrMS{>!4G u@i:c@)컅9hKprO.dB!;P 2 #y =mWl(SDoH#~}a2O6HOwAd5',φpqD'PB4V$340)hЂ#)8.6'ꖛ|ٞe bE-Ȣ/ E6lr :_CdW(2&ӷQp^+1c0.7'9ffkhSB;o3'KۅJ4Tv˜P-PN[9È"F-\r&\$TYQLnH(2CV2t-C7ğV!MQ|a/>woyxSr}yD|d̯ԓ/ ~H`oc#fUWcJWXȨUFBi:`E_ 8XE_dT}<0 蔕kv\MYm+tGD4_P6tt %))`X_@q y_fﳈeT'dZ찈m 8ʥF*g Pk#װF6D-iק?3%\@K)+;k{>|mXXO@GCp{ee`P1D$bԜ U% < ^ lto +bP9zli.?Aςy璟Hxђr-RjȀZGi"8_r29~w}+<쌑^;zcw[; n !#[2+c4[^IKJd3Dh4Fjڇs9@1iQ&6u4 0n\`P\&hI;qJ7bUs)4l#0 Yp+ᨙyl\,-KtRPjXY=<&.%Woq;\1W~Bo[TRmD#CF2p2BKw-س?/e2yzAd UXpT{*AWo.F8=X쓻ibo*%0 bt&/E_z_` C[0Ť"!(\RO8jS9Z?$=n=p"xz+X,< a\L󙵳@nC kHX+Im?%#V^NhsOl*M^&ޕu9g6$wNV'S0g0&-vxxG'NF⚎dZ}ϳ3MWC[_Ɠǝ :H@t@¦)dOAדnv$Z0XsI|Lo W:81H,`$f4QG 'Iv^1C2 LF@!$ SZڥ!aە$%&D4K."Jyv>ӮX_^,)R˃da0'H+Φw׵|kQ\䓡c[|V,yZZ`CQoEm]4Ɂ~[_DV#)$Vџ@Vۧ ߟO؟*̸Op: jNj?A^iOp58'?20'8l: Biߟx r}}T"?qs}c'%jO HI<}*xQauxP>*LUivj\ќhv\vQaux%>*ɣ##[ ׶"˾❆kΧ*ZI% I&MĒD 7ńT$(#b{^z#HF|N>F|-.`|jaZ 5 =y@9RjFPވ:#s{#@堭Vp8y\)qy#Eh66 >d͵yY垼LgzjzU)q1X*@8T9Tl u>ܢI$PʃoRIlIΒ"3I5)X0W1!X#$5iq#<D=vE6&2otëͼgOPwWʻ>%7M`ᑤ{ӻxH!Ntx'%g>ZU뚢mOl ;yHbҚԇSD3cs:\JPsZ!jU[*? o3.+.y~_Y},OlyȾ@<[5;Ҟ 5#.4h<ثEƄДy%Ib1ꔥ:%A}b]M-1#L-J(ү RVW5P!ζPo.B*r{[oVsSu9n{\1 f~zeIX$yJK_\4 djw%sPj,83L[ÃPv,QiYF8*Bn势%]eͻFH.C*)Y\vgNQ5\ BԸ5L.ĸf΢8g1r?7*қ:v!Cj (87{ em863Jxs$81l Rh0or7‹ya_d.*--J4f2h \+gGdfi)^=\ow=X`FS6hMaOUMH\jڞ}lل{ZCTHԪub!@9 CcdDD f:e8L+ƙMd.2N>tEQ<~vWŨ5WAǟ{|cX/)fX)g߂ 5ds|vl7?YOK P^".s ~ NIJPrLHah&J \#d[V MENLDA&N0Lt&֜0S!d$HB%)Et{z$И*x-W8Ȓ;D]aI5b&t|eF{|%SBRZr2}@\`H^ pu`^)%OG_Zz-OpU+Q$|1p L|2'nO; WpUw WOpUW|Z}HA|q[_kZ7Hb0|ulwZߧ)/FѧdИ[ 7u׈yU j1KA ;=ݿ9\El FE ݿη&yo(oſ/dxoÆLff$].n,wQs=XaE{hzGP)i F7a~k!@//0FW`I{EXA*p] _Mvj\g 5g6g}sUxMDN <=',STaxQHd`K1 J(X;l'Cέ=5x[7 v2⤫I> a\L _; ""@4i"aY$cZX#`o+,K5z-F5 %s66$={p^NhsOl*f:0y{VVF3dj5r.Z svN"\npRM?Nu~(YRnfϾ|Bf#m t{'{9'Hļy\ ?tIЕ_0r[g>{yulj]i:GV?nOͺZ0Ѱ 3?]-/_M 'N.!L.5L.0I.dLH?(bsXfqiņ_@ Cl0tPƈ_N:y4N dlz7MK3Huw0  `A=X}, I|Ll4)yڳgr?(f6T ٘KƑ"n<dI4-&ƌӦ2kMdGIp; &0>gfgGHD4n%Vn.,%P fQJb)X ce%)h0Gf]3[&Pb .SDcR0&*VH asb!"chbL<5 ){2xGpkмWC-faZ"0sPNQO?2o/'4% 3HͨZ0fZ֭>aP\`OyZbxcux$>ZVFHth:\PJuxD|ғE+EL'y`^IZ+ux8ʹvﳰ{ Ly?Q\hwZ-YO>>ԼChC"sv!Hk Ow@<O;D{whw0ޡz(Ul'HX# RȺ`?Omi$ dM_a;G6_C7gKur.%Fﺝ˃(.2tb+LzVl7.]%{ʗiDrY*6cQQ0|0i9)YZ"Bf. LY`{Uq%k#/[%V!y5= }/$޵6mdٿ҇F`wwv288ɗuV6dRAhQe% ~{O^eqel جYD߸o#yȇNq/ugu.w~9ϔyr*f NTV[- ;Td1rU{^N [ڎ+ d/>Ssp(#Wf^ꍇ .'.ܝd 8yƐ!v9xrahpo{5_~tkzx-X̴-Y.0Z[^d4*VBf$?v~MT*5.~t'}w"z5YOlz`toM5~yyԾwbSzw3sgF0 F!C=$#thvc/q;@bri;~\Tpz8Zw`LE7iwگp2 -2ܡXfƉzV̦;Tn%zZW>z n Bxk(NYDN58=E;E)iR'5u >$$i+'u !_(!Q!qTlFqcjL`x<Ͽ{4G~.:{lTou~3ۺ:{f>fH\ X31Rcn 8o ,ќ*>+QNep0#\pą}apGj-}p]sqv `pn(}c <ܟxPujU@YSE_3 byb( x98EX &Ug MyU< Ą`fPY(ErG7#W۾]1piJUS2o%VYiTno7l`p%9#u\Pԅ`0$0!8X18p eZP-rhY‹;a hhfvj6Qw[v;0n'X/ pלGیvxsڹ\7}sV̛]{/EOx Ly%e<F҉sp_u07JTTބ)ƙSySH6Q݈m NـDu&\ɐ$%ڄ0 joh!$ݹ!QGIէOT7o5,:XM[pMc 7OnЄ+.8 r#75 -7w^apFk fŲ8ޏ'hLu"O{Uq1^7e6.~qr|egvE@X gq_w|q_)#X&zkMVL2ȯ"auÖ_kA~=dU/aA~=dy;&ȯ{cK=H yۤsl 3ʯ5C |z_A0D=<'9O(''zy"!Ok>Mu:4<g yX yA牸<ѠD\2.B牸 ezjH_CAKyHMn"I(Ó\QDX( :6pe+l,q^'()8W؄AQrs%aB%7jv;!Wvh >r Xbr6SC&BpF,JCp8a,QfZmpm/w?pq$YXKq3cę2 ,sn+x6wqkDY̿[-vsm42_EʘL-NN5I+Sdɂ4)s zNsT ;sg.2@0쾢 ;jy][h?TW{krpfh&2@|Gς7 +y8zd0L$F.Ƅ |Υ`J$!2~KKoB.#$$RKTQB.aR0&{zHaG\1ϯ =3̨ =59Hq]E07|1K kok7uʾ6sOmhM_Qٔ OrFrZ2]2R vQ]8eo@‹5*1/lS5}rK`Y$vˡ;o-V`bɨ9 \2EJr&"%eO 2w1w YcUNsjeR,J!L!TQd,Par Y$5"H PBw]^*ZELKA-I ZCxBHAfh5Guj)(8qTp8QQmLG8M HFBބMR4&F؄Md>(|lDHJ֋J…v۶[ rީE{MXr2&\M8"W_(ȵ+?)\;` r7"Dn"+Q\,Qq^~&OD2^#h-Oxgdn<P98b$+G6 [cd !4cL L8QVTg,9,ݫ9V ᖘJ5:fO'3L¢%:rQ7[B =ރ]QځoWLzP8#g9,1WS};Lј+ED?=m:+3_7~z6ٺ?ij?x^Ͳ_oVy૞CѹՋ_-GlH헏; ݨb銯0|a W+Xg 棭/*})?VxV18׏gȷ)(I>oĽH=Fw{Pٲb ةةzd-k&}ՍL^fPrB V8VڢN(QpWOHeYG[ GT9$E.<)\Uy4:VQ%0IhՃVw#yo~7[4!7+|Z͍m_/dkLCh+W;Y<͎Lbiϛƅ4>\dy Ya߸ɋgzPC!@%ehK% @'UH$úݗ1B5.VێCa{RlTn)%ҳcOU8R|B"47rj.-:EbS2:iFbEA1#)X3J%FbĦ HlHx( HmUc d*1_$6+냑\XJHl$2=}$6#f!k}e$6E H쉱S*ۊ'ۦ`k1Oo+J( Ո⪽bJ\ޏ]Ji($ Z2zJ*ދutwRnn ŗEoCd?>pv;xh dxk# JQFr6HffQ'W6+Iyl+ڹWcyWVIq$Y fyY/WvmY"aX ?̃k ׬0bs1wl=Z&i&R1Y: jeFs=f.n4zWx\n$lL[ 0rI˺ gOn& ;}o$͆- :g!d?I@ec6"?Hi+oEA,?? kʩ@~~O$X?2VW?*?? sW0P!?45j~~VZ ,SsʦC~P`S0*0b ?w J!1Rj/bhrV]~-&r>^m@)la7"W_ˋ-euuXW?Z̦߽~u\I]0IOčZz`ާ)BUePTFDVgFJn,9^m;ҭM[JEܝ+q h>P$=jzx]~v2iCj%J"HV"g4u?|szS..v `$>TW?nKobX5vM`_Waz?jf5Qׯʦ ;ךI7z;ZΣv=|pKx0sO0nUh.?ܧzEl&"ě3?/nĐ0Oi1F!Jʠ1kJ筈,H8㢂gnl`˺{M q_Φ< ̖#=pߏt>bbէn})B'LinaCVI&jV4ؠiG?S74-?|~q\[5=kxMu so|Gk۾}Uf]`{=Ԍ薲Ui (Ca*+ мj++jI[,e!26얨. 12gERNiG(2-e)9F̺һphB Ƌb[%2dDŽ%R?Kf| p BKоH)H)"-B#`Y^h)Xő|dpv ɧ& D2=OS F1 $*R>)(|p$ #9&X1ۚ9vCߣ [ GA(ŒgINA K0]JII1]WR*9.:AlEbkO ☮t]t BLfy]a巜?7\Ll..5Asu6RKg2&mK-iZ}46 {U䱨j0B[.zXXڲU[Kj;[2*IlUj%([4UT.b{5|dpa}N_yB2ֈb_Xk`9F=Y7{VKw{b@6*™Z 478Uw}S;FnM(YOE5,0za$"'I!يL 7?gKljl0P[#XGHĊl3 r)!?97?%A&>57?#Iei@n0OQll8#7LY ; [MaIwX!a[fU6u!Fʖbo%KL  1KHA ;[Umj_5;pmRxwډ ~A7٪CT-[*% (wW,לƧJ\7 `7/DIBQh`D‹Yosk ϧyW BI1or_/*VN;paC52Đ;z4 drml"2eJ][Jt(h7HpǙ!Fm~㮨Tvy㯯90Gv7mZ_3웋wrQ.r[R*:Gd);xFӛrq&9'O9uk_nfV{+||U~YEGou}hhWգ:xz2Iqގ臷ny__3l6`=Mxќ?T)El!;O:Lina&CT5xYA=Ҏ~-n2hZ\AokzlQ/oC}ګ$J+gEKcJc G/tUѰOy*t2+!_やvc*EBG ^;N8i&*qp5~S9 mYB7[i];Yﱴs;8mj- &_NL;&e<$'+d@wVs)@su6*IbL4WУdWeWpoEjOV5' S&RKB De|XUP "[:l9 O,(LBH"PPxnF+MQP؃0 %#EAa^Aag8F(F((<F10 E*%3@AቱcL*&(LfK Zk^W#B*i"msnR*i{JL;J@(h+1:Jv;lҌ C$ Xtk{ ZC&Ng7*Ԛ Be+c$*Q209UN*Z&a('[ ~Ƈڎq5 !~cB$cnIPh8qFX-4 'X:mf֝u&6$gƊ[LyT%gj눏UU aܳ:K ;l(9߰a%߮ųp!-\㜛ecvs?Z XRmm. >N^R _gÍv<?Cp =􌂒֢/Y Rdy#ceQ(}9ђR<SN% $FTA1葱b>Ҙl<BEqMXRã$(Q;D-#8E%j)Y/Jvu_8jF9J SP6[%W;+CaR %ʠH E̬bhQ+)Η |iK['t[ULfnrXq]Zb˚/_T߾x-Ó_ubٗogl2Z @ q ڌG ؠA3UrWJ&|Ћ=DEUǨ(l1Җ.T)v bP+[<5i``k& YKpn9 b:HmPOJO"\!Jd#,mblzv&Գ-cR{za6*i,cKeaƨAs+53ؚ<dYY Vr˃{EaC^_̽ {7=g1zxn!ʚ( *0^UethQF" . +i68E,tW r҂ĒX &$]-$ETMCE?xTjtn 46~n-:uNJ)X)i"Db$cZV:+F/VohQκ5B 㒲SP,)GHʐkױrr숐aH>EIAEjː);%-99S2i &xGAr QJ쐔e ) 5#e`%eOH>);^W#f;y1[ic #ْul5 ӝmYmŻ67,95IkmFԧػm$krkEٻm%mYk#.%I߀$ZbJdM% 0 ̀#) ߄&`Tp_s {b.hX," 1`㣿ko+J=z w~0ڎ wǼ⼵<Ս>z.Jp * pa9Қv;z>.CG/ceXv/5$'?RED{֟2ijT$ L4s7>B)y5SN'BLYPÜE2\~ &yznp _! ~J no.qc;|֍i#B'tǷ.>ۏW0r0cxT=э0A>AO>qlibC6c(2o ᢋ%{)$8,D&%iuQBYρ?54 t]N}7tx{Lst<ZgO} o"c.U I9/KHթ`Vn ˊ| _#|2%Di|6SVld̎O)RP?^_y-$z|OUސ˧?֠Zi ,;`6;YKnX5/ ^ Gru=6NQ\1Qm.%kU:+!2j! #,-:kw[m ȖL|m3Yn}IݨJH8#qe3F}+[aRQXC;Gu("s 6qk %k&p"薘URt#dvګ|^*b3G;QH4 Ql/Ct:А~PMކV2סa)#b ӘF*I8X=bFFL$ ee,A.#Dg,=VtVKYokX~i'mqz-mG +0ZWb)yoeuy8 h9<*Gs #gV)QҊ妔@YNڦ@դ@8e[S0tdA[A1) nC}P k:ř,|?\6.ŧRV, v[>1b!MoRm'dfq#gBH_h^ !>?>oQS*­P%ֻ|=MDE0q!b]3*ǭ+@ƈ&w5/t*7^4j7|?:\qS]#su2neʯ|qϵZk=WVmG r4@y466@aq4.ʴ9 Ѹhb\lbW0ҝzQ|E͸O֒w0vY [ﬦc$*X/פ_mW'sٺ?p^LU59$gHN4TSbc1cS4a(,V1WΫ M@|}[Egٵ ~,)_Pβv{k잏\  w"@ (}?o}4ULuu1)tytC<3/k /Gffq\S*WםR^@S5Wg>L3|$G"ƙqFBC"&#1=!ߧMH&DŲl:<4("fdLw^K#{\b`~ґ4Q0)15x'\9j1p'pr- U0/>xMڟGCK:]A䩽+]wp4PA!?ymRg#7qcx': W#}ݩzޙWş|w@W\vø=hl+]!\z|_cV9ʅ\nḃm(BGT%2XG 1 YFP֪X kaH5赤ƛ^;G'^x{ߏNQЪ1 OKPo>2S@$m ȓ}9MQt_% v];"jA* Ӊe Yl=o7}7aP ĖHU#Uq!VGi8#L\)/ߙ{P^jp/q"7D7_߉ H؉F$d$f GF8NzKP"|NVh+q$-*&o 8j w."S|]ee-}zp:Lak&L|È ]i6$oP'x.ߏ̕b?ջޛׯzW/xŋu\Wc,뫟^^^]z)*}(X-VNpfX(i6#1z V-1n댎ĬR,;7=vU<~?|9#݊)"Q+N4 {? kmm8t |L~V=q 6K"@@n~)UǠ;N LUvr%/R˂ 3nVv?h|%~y]TN)d:zܤlR8 TN* *Y?[pwnܹ,JǮxS taN:P_wrrޚ[;!HJ~l(,Mr4Q&D9(Gh,(+ai&[1L#S 繁Qݓ)qhmsq698fpDMAl ͓mmg욃x<:ub%/4o>WA?va4͠hיZZkE^u1WcW5m:tL>-u{IU|;IBE K#9BbQTD0fb#;8 !ZSM-MP˽&5{MNVH- wgF0Lo4?,fC^O5/b&l{{ bXF)gdSb 8`)Kj8vQ$H$I %ReĒ z}kA4$dguvN gk8`QNHr~?Z+W4M:bԴM4{_I4]cBj6$F2UIܻÑȓÉR EcY#DQ "K+Ymz[6ţ5mcMGH%%|1u5]h#[tqĠ.;v<*6)%: *a:"DF-J[5Yb:4M:bԴM4{J]kƄtmH7.dJ?!@*Unknmd)g$HZVQ"Jt"1äRb`騡z(~v+-W"mϱ?x08埁bcrca IB9 @!fvBˈ+R\#d&@=Y'0lDY'>0.&(|~wHlƄ~9N岑 - X]IqR8!4RcxԱaG^򷒺t1u 6$F2%˽V//8-?eX f{1IqD!s, QhQ C8g<|kD)[ &[O4 >΍&'K.+{NQ#( UJ%SlF4‰SZYyHnGhK@P / ǰm5yS%K0LLaJ"LuP*ctf@E v2bզe y Z3 `n0l8`f[ *-eɊ&Wep 6e?YUjWp(Uwsy#6)1mCvC/UB_v3U!.oRgW:3DuYսp&]Sτw.Z'խ1;*7WO|6g"G7*ZN瓨KE=JS\L1)Bb*b(g!JUgjCY/%ƅW[>J'Wߕ^zxѓ_oϏ9gi( cj󇺑 <EEl?ur'G$*,DYqqу>RU @FR%KHR&K(zPI}1 W9')G` T&#L&LC3F ipgLݛŻn}{MPa^K KT\QAZCV*+SC0KpERcI '<@!]6_ԗ F8QSݘ7u^oe|2Xma{vFy1tq? ʔǥs?ob/7:~|'gEDOnn W\9́7>带㏇=I+oxk8kC>+}2 _߬$g0dk0p>(ʔA~+ܰKʈg5gozy}ru_7&hpC; \-*9oUEaEɴ(T|nbkbnz 6"!S/)k6~o2AY֣q;Q]/?VqpҐ*3W@نEmkߘlagۊ̇MwNzu~SII~MVk7o?Won!ozq׭ 0㫪BR-uIƤg$a;% `ܓ|'y %|}s(:x߆Ax@ArX<<(hzA)48W߼"mn'SOwq9kd) cO,1}jѤItj4&mO.&$)y!Jr2PG}po%d$o hv-|ήz䚼FŀuT8?;y'~b4zߛn7$[Bq?z_bv;\PC2r6?j2! U]sNbI}ZMʰMHEc {VZ0G5)zE7.EsŚqn98̍O8#sw4S*WGbS^X8j#RS#ŭFOR();_SS0 QBjtb1#U IT)[C}5 ]~bn,hdqzI3zp>QhэۇfFA-"xg=&Iߺx-ɇ % dSWy>F4&# +Mᡢ9aYz!%ޛkj`chk;P\niH̀nS4$^KЯrÂ֛RM6[ QOVgZXc7>'M? [ԩXH2h\ Ni E2!{3O{2vTi(Sa G6NEC:"Ƣ"ƚBRBQoM燏H"bARg(s̅LjOXpzHB\>I{;]/r^fAߠi@b!iɎ"`NjPL|Fz.jU3Lb2"efYcsArIE3砛` SCPbQzh %U-lSN%Y_ՋU_j9ۉ՛uQ~|CNIRY1?9w:;?1 -)^~}zŸggt  T7턭O2+PnH1VA$5=FCMV@9 db{5Qy8ЁVOFVsaqlMVcOœRkYU@0d:naI5NMHԬ 6qRTu-e!g;;3%AUV/&櫎H7l)WRI9+kjT o761 ?߼n{FwȘuǜ}=-C2_푮tPq=)i?x.op-{߂WjOLC?e'rڡs&U?[EĨ1PZ9ԦUH9;vY]qzxS<~gyx-{4ad%ʩ&fbz8ң_j:wgr?T7X՛YE6p$&YXWϾhӤHܙiBz~E-飏xPt}PaS3,9O si) p皺X :V+a(5Uj*5O<4y[Q"$QY5+>J,_{@ S(P(ގ镫Ǒ8=ϸODMRbhYJ -JXOyH;`=h^4W2}8?iO%N߇03If_ O|I=Ë o Ye7-6zOܷvI~=Z330o14c7O1)ql*,f8'@Ia#,Xlٛ,O3ٓ`138#PLTk$u+jqA{B)%}D?u5|H .gs_`=ۧ\v)2";\n_p.6o7&.@pΧ/En}>˦kqW|r1+gs>ңn˟r濊c}&|[B %_:tuӏ7;}w}ľ/'goߝ.؁sP-[w Y T Л']Ec=:s!㢱9#}^q٫ )D08h$LUb-9k{)m ˢYaQ4kfmD;孹O8kW:Kח'.9X_fɫQQ7Y3O zBOAs7; ;~p@TO<pzP**DZy^DEͽw1G.O=p m*S-tt +@k,깷*3@VܩqRR+3}ݑʌ#?R,L2&8v*D9۩Zu 3o Q[~%)eZL]_\ JYĻ71^7]}nZ 9$oq,M#AwmW6\(sYʰ̙+oYjTW"nfW#"+Ud 4 J V>ÿRgz3p!U*(5%q^XY:\]z‚#\ gb6+ Y=]TΔkw1_Lf2'(jcNưsyaI!ǰMv$v)U+|Nw/zJ!cPꍇRAo'csap</ے1!ڄJUDxkjrܱShKŏX.4WjĴf N ܇:3w;o_wxkNT8P$ρ"YT1ݦ +){&\_֓ !';DJнɕa(ԪzBDYּ/5"Bx^z~(t146GHqicB!m _?awQs9Gݢ߆]>p$QK s%5y"+tiB٢RN՞:f:h/w&i Չ,/?te T| BTIO >-J9x ꯏ*T`UիAh.Nhx@.$8[Ɍ e;i{V*y% >;Y֒qA2Bbz.$3ڝIY6PG(-nUS]YQ)"T1DQ%L9psk.0BXCAj4fau(}I&YӃ1:nc,9ۤWJƢ+/JGu!*.eQ!e.i^f.qyv,v )vR̪2f.#7v 8ix({WYhwFr&ssnr$L)* JB,<2t "_9Xۡʚsb͹McKu7͹}g sO(y)QPYNGUpxSD7o 2JYi1SY>nfR"D)+#{8eITDR:]H&J̏iFl`.q$ZNߕ*J]=m CY"A'jj.|摃0Tkgi=Nj&a^RA@Mj` K)-ckPZ76d Y^ؐe tQo7l#iǯF󒆠T +Zf &v|r׀܈L/Vӯ+S;a?~~Wp*樁~ݺ#mCKll 2|0)@tWvWDF>גP0Q Ǿ"OFG9QNu{ ((gMB9~Rpuy!ׂ 7On V\!&XI.Eg$ѳB;,zn]ڛ'`3J FgaZ Y1wYbnnwtcZΞ t٤L<`~{6(ι?!9|(TpkM3̝kIp>*KpmGhJ(Qd_+f'so_RDƨ=_~8(V %^+ qE2\WV{%Eߥ(vWX4CX1(@(h'#i'A ~)GI*,[Mci[1fVJ2GmE.Li3R@!.FmDI;u`l|̞9fsAx)~Etb6#|61u13H%ƈ޸V`UNupiN@qL/Ce:J}(iEE= .?ɼjϭ,?z-49:$td]X’q˄2$1gIccR5;%7sd Yo@sざHNOs/uwdHS6b x0)txeĆjy2Wπ_i1E.1Y149P㬽'aGpr*I,Z;cWz{RiFz)jxéaRS=ʩ JEUk1=q>*㒀ZZ_Ia d"{W?GҮI7Iq$M8ǣ4 5({0o۾1"Xˁ255V TA@mA]b &$(RIy0%mJRPj6 PD3mdP' c=.&I9,8QTЌZQTؘa;Ctx- 3Ec *yB{?hU|Ieʒ Z{wjLЈ]Y8zHǀFv;c1 ŘzT8tKu^*:s=wBt90u=mvIc3@ћe u!DDW ֶ/x1-hߕ`fb ~Bmpa\5S!HlKV;/*+U *a{h /[48/݆F 챻0sj{gX" 5ȁRÿáxژ]k&Y lY׆ )B?<.TӐ[<57U@mLV9ؙe04P-S"gw&0M)I=4oVwVhޯ aNsZ9} g]&Y]wP _ݹ(Ǖ| y-/;-rUU*:H࿜.P{ay Og߀. ]@9"1MZֳlIۗ M@@Rv-rnb7]=qk֫'$BOlP"E:wd%u|$s_'ֽ'pnΗF6%ٔ?Oک|')y41PoɆR*@iwFs_R&k$_"̅zT9lqhΙ#q"y JP7)[JJH9AH W*5LIUTjJ5j)WtIAGi:6ݫe7Q pHN|(cC"ccgoNc;-KgFh oZT}5^/Ë ]ϧӻo74^a&>Tiw7-&t3w;Pga`J͌-#tJ0%~s.,Q~RRU|n;\ P_>oL??-_@Ap+õT?l  Envp(ZlyInMA S٢RNT[foO|qi_LnfT;F!قYpnΘڛPfM gfIp*~Z^| igmlp8 O4ϳl?KOw _0TMi}kz8d8!xń~OW1*(8؆naoڣRw(7J9=AqME ـ&[. U_TтD|MFVj:4h%I4+$phRQSȡy_;)l{CCڞ !;bd AN`:YυrY$ ~\:|l ϘMNNj΀KbtaBލ8L9HDUD>'4&R"UKNi - h&)E/a~v*mņkdKb FW}Πo-{;FcWە65]eKG(2u[f M@!}Ŝ[F")Y8ÞmSK>$ϵh]iIޕҺdoZ@<߄,h5QKu_cVS-gi1:8:3P0;|eŻ?#H 'TjW"rpIp]Je4dyIC4L]h@ 4ܸwo^7Ƒ+׿") "Ewګfhm.X#xX,VN{8FXzR|ի7;Mݫ&,\3a}([abe,Dn=0a՞0xK \*9AGc>b[Qd( Nz-N瘞f(7Z2#iQd1Ѱ&E;[U7'08Dk)G,$E&.=caˉֱ@xFWҟ}U;F+ sF]2T׬f:ڊd3A)*b:Ixr87VkQ$wK; Y|7XjaPXE#I*6IGކz@M-[ v#39#-Ze ߿UUUEzjcg ]1sx͕A]Dsk @sfi +Etؔ/uo)?SuLy[ag@ ђ쓉Fl7<_>\%U%%k5e#Qa#QMFeRՒF{ zd4Vۊz#yTRehZ=H.t) DZL\nZ*&R9PJ!r/MDo|1L?Mlu b-x\*@T+`>zZX4 \ڤ-hZA۴8kV,XrI8ZiT_C:lqhѩe(i1sWiWFkW}TNc_fL3diDZ2dfnHnB&tUnH0snĎft:wsw+dFaעQ o[lO8Mb9B+6{od/n[ǒP뽑$EED͑ \[Aڵn#`x+jҲ2-;#nM^HTtũŊu2>zgݾzե7EFMWgKrϻЯ?lHJGew^o&MA?5nvUs\U87{,jAN{49MZ2/_nxp/͏v%!!QU~H49?4;C-p\n2e7~r[%+O1.~oӟa Z!x&j))d``MGH:|kyh_5a 0fz&  v#YwODb8fRK%RqOkx?yO3KOga77u>K@3=0Izd'`J?HMZow>zl$׀`6ƷIEy LhsfS:}7/ƒToY!ARvy-K&*:(%)CrØs -hlwTwnH/x;]u|/){d)X6dE:|.~%kɄ_BK2|7u7a`z]CKxgZKDZ[mԴiLJ .XH1Ym~0p*g'ML^r9$t9%:@4Hؠtjk\t5Lө?ex6LvЫK=r].+5~kܩLXs]bmpE8G |7(G=愀 uc֍QL?6m,Í2}g NW@'[v|N|2.Lx^c#pl\6F0 :n-Sqc7&9?ݐ p ‹pNΤԴ}]_$T~V7sډɮTarp)3F6O?})Zx(`'Y׺`z&_pF>B14RH=(X+MJjץl nZðXzx[ma+:mPID6nBW~J^.g3Gu-ArMo0_J&j1BQ"91eԥ8Jr|=S>? SK@OoȺ`"}py 4zvV>J;NMUȹ+]kpde',٤# <.ͅj!p8\T$`U||qP1o,uD"= ALS_um c AKf J!D9w>}U+<1 lC5%H}΍ex]q\dNƄ wT)OF?wxק_dsmJԙd+VW M?z= =/G\FwEN; VяX0"7t9"_CM0/;wW]%r:όJU>_%>$;s^ %):QDi9sDvC12MwG ^o3tD>esi&@SU~itqĸ㢎`\TA{$Pknqư6N6)Pྃ NpOZn ͼ;;5\EQh2+ж=:?%iwߺ]W>+lnD۔jGJj2yŒ8Ě B06H>̑'YNޖ t{f; !>v"/s vɅ )rʍm/Tl+S#g 'RguPEA[Od`ZPhf9)e>"ISX kѭZ0WV;}kZ4E(jdfaEE}j q2D"`z-h `E q[KA p>t>=]ÿ=Yw5#YŒ.bޮFqрj Qi]/CbɌSw9o(Mc赂9?~_Br"g@]&'kɷgWO,iY%ւq.mu lmj;-^%`^$e{{~ݦmҨɊy  L6;VR0XEV<^YJ~4L%?/FPdawͿ8n2:M#]k@ !y.vUiPi0)Mk!@r VHi:GFI&Q(A u\eaw 5Bh6ݜI.߭㩉М? R_x?ׅ $gJK"'Hd\>wLѫK߸]pK@0UE <  K=?ZwWKwtG׆a3FvCo钬"]*ѯឍ$]")QdDG>LA: Qr\)Z@ԁ,!8EGYҦ7[h`,(ֵh9FN2rID. Um"d"[ 1-uUMgJJ D'RSrHT]F%zRSzevQKZ2vI֡㦵f KL ȴwvK͗Y%F<%bܙi8qfyay*[I?koO`9/%MQpK)OA#~KO܄8'21%*B=0sM̾~)zݹnnԒ jB~I"/È" Q K19(P^gbMa@\rN?7l-Ҩf;!v*҃ix_0$* x\Sy(1r 8izhm x^)SV+p-nȕtߕ~R5rs!*DgKIO3r\gZ۸_aݭ|R$~ظT󴘈̇cV68$"f0C$C hi Zv`3 J$5tHT7/EZ5L y`:HŴ6vY<,Uc2nY]2+3td~ #> zvtI"z%Rw,MXbXꮵj5$VK<Uf[:7F2`vn06ng"yۭ;˻p~*/UuU-3H? .}M \4U W dN #~RVL%{/am؁}3wD);Xqt[ω6f` y^d :p *גyXR/ p Xٟɾ-(JEղIK%}ꐲv~k2([deK-;Рò<r\R'b>a>5Oj\+鮉 cA,"@K'etyA\ꡤT >I\$'J@+Mjbb!]HGD7p'y"Z 46Q"u038,G[NmOߴ!2>{^;u2X4֡\ZU.T^Q*]Y#EԳ~ē@!SQvVRlLq;%7vu*Sv7L}yRa]80rD.R-תjn{*ɫTwͬr]F فRh;ˍݕ0AyE-Q қPW8 mnΤپ=ʂvE)h:]Hs@]iQGc!v8sV>=vAn^sڋT6oTPƵ6`%ESX<9Aɫ B\4NEYQqǬlYƝF-#şWş;&w?b0B)oF90jՃ&(i5X{.QǕJ:`qpT]'WTj! .ΥZq`fbq*s Z\Me[\)[w.W_+ I!j\IuԿupq6ǙpfCODFQeQYpi}2Ĩ4-5vx> jU}).Ԣ35RAz)z8@)t83HVKI%H_c>TݛEyGBjJUh\kĩVqD?d􈿋3uq^q=I ` I-[FUV *Sv'Orl~|%(,ZFſmdsrGHcI;o}h6Sl1l t Ēm ܲ&*Ż+*Z|G߼z=,[qr3槡$qgjP1@yU2~,n B5I9!H|qc1NߗۜIoߤb]74 Af1V6r=qO|fz)FeU%puZd.ƍN&:cy2VPV1bjcj9n9$x%`;?pnv=ۨu .1^l1{aH 0Xc)mJ !&{Slwp6+`q GO-&фD)(AOV_TfG $ꎘ6r7pfb-͉slNY좕{ݨ+IqFIz7M0tuTO߭   h$muS\ IawbJHGQ rt'T7y{ܴnvѻ ˒zק%/+H,ߒq{ZPEZJXCBn?|=L҈1NiGkg?*JE{4x?a5UL5%gn؈H # 2ntޡ .\]^f:8e,+)!ab9`*dV*&RiKo4 iJ1nBs(`;a"I4HP灬u y5^ފz5TnJ w{95PRH,V8*D= y"\`f*WI6*dN&Z[ Z4!o #N]^'3`8X?w"lr$VGՕT5KXOJw.ꀀie.ka @۷c{mD`g=_^ln%7"1Wt.^6 5ޱ&G^ MaH,l9ǗMדX靫ϼi(։,VKJrter%( dbi%K/~mE~aBfp#yzg5 H=>>ҕr1Oчe§$[d+팣KP;!_+'WVGE@wt4;& 'NvZޠST*t%, c98@) ivCB>W"Ş{>ʺgd|pmv 2JxYCgelwâ;Կ=!jy\ ډt b9)ԇ7~v:?M1g8;S(ͦ.֟fǖ.#wTQny{Z[&>ZdǔRGye8}눊LsnT6?G㹬*\u.N!nк烈g qюtgԁt5e)!M >`]}G]<tGc;LiPQ=84cݴgc4IJ3>6<0~MV9 f*11[t7/Q'YگځB0-C мe$I|ЇL|uo->TJx-MZZ}ZB /q-DvEq΍n{ 'wEpkڞ@wdǶg>G֭M'9msk/9㲃ɸ?iҺtLڰh&Zy 0LRI/ƐB/vJ TlMT1e @Ѝ=AWcvqWtP63ߎGgERC#v9kˎpB2M #s+A'RdQJeX$(y NEeYW_Kp + mw7~эQ͍Q:sʺ2sn0,bԅj}Ȇy]&6uHCԯw'M˭ccPZq΢rbMa ۷oO ŭN)*ҋը#ӜſȻ]䝤f[*Fh&Mu D,$n nwAGfod߃Z;^{XQdi,N,Nqd Nθ@yg&)i_ӝԝRNp&(W8K!_(؛bc>A H$FƋe)N?T|*S*Hl޵qcEE@gsm- Cʒ+iE{8,ٜጬ2đ8C9s͔F.qi#l%"$t@:i?8H'/]=;0e 죖)1:jfh*\P }gKhL8WGiO8Z߉ |B"Qd;]4X; TEUH~/p ʁ@kҝ uQimE̒}(f˅:G@O{f<0 DJ#FcRY'@T%v t㴽~IvK&fմ *uOgaFbhgG'>;ws%gv~̡ 5\OF.jk7/˫|&1ihLc(*8 bpSŠAn9{bt$`ZZ]qw.k'+ϵ B`/%i/սyE٫w} f+ 1 Sӏ`{j7csѲIXE1I$Q=^E=B'3L9{Ttܩ&k/$S-7DYmjYG1HbJCʲutwheqGI&RBe QA6RLw<q $=gK'Ji[dVʅBP<6Q ɬU|Kl>L<[:6?ᕇOo7ػ:#Q!x֠i.Ez"dEaߍz@:ll9h ORG92{CwqƲѕ.I޻Jse@~L,|58>r>yHynZLt5e+ݠ+Z{yǐ+`>CO&kE#R5͗Xu?OW:h۶x M/!+^6[y,&d9漉cٹRȷg*ߚi \3"&1fNu+-})yvHYX(9ֻ_H 6G^0Ә_s恮̫g`Io3ٹ0Rf>^~<œ:?V2#yO~oa*m|o oft-sVtnmZr+^rK+ͭu~+4L{0ݳ"0ڷ W5>_~-}&շ k¤ZgcRu>o&u]c+69 71ޡ1G[.Uw_wU)Vp{ɶ{ඊJKJ ݂%NSvİ1lwf&0ݝo&Y/Z J_#!ĎK#%﫜a],0K%vˬ"T7iLiuضW}Xl hL>!LKHGM{%-'ӆS3Dr٢$뺐aIk&i3+CPBDWoDٓRHҎuX@UϙrhtY+ Qnt؛`H 3zT`ҡpD8`@pQ`\(#jN^bq]IJ~"}ݞA%PUHl4tE,Hi.. *#1]>IH*cs/ S!^x͍ᦔVi# 1尦-1?WЧtR,WvTU ۛiKޜe^9_ZUݸaykH0YtqopeBR!Ae4(}.$H˳b[OBc-IꐕBp^cƽG{dXu̹/y`cC%`Jm݀լxnUۜQk2KIfŻ7O37f42/?˴$I `.J-0z &:>xN;c&ށy጖).t oAcޫ%5 iskn%* V":ZEt" I,Y?떍g5#W1N:  ͙2x0:˝@BWE!`E@Sisxw.`BYSjpQ*)Xv y_(-olldX@=`:x (s 3(q:%2Eh6x_}n<(I0jRW|8a.ul/KOlc[SeɃU,k/첐L QLBa'姳?Z~㲸ϯ}f6W2;xᥙ~?.qwSތ旿 ܥw?-G!5ň}JXL?Skc<ÛK=U(Uzcf(lB(8l9t  Rh5Q`mVR8cԔ(D6짮ӱse0E3}\>Oi KNom坕ߺl=(o*-nxң~MgUc ]mzxS{Юώ"@/F~߽u4l݌F0ӣNB :ԟpvd7 ;`l J".8VQ>DŽcU4sX,x{ &Rݝp!@;bd"1 1 ;(S *Lv'ZImBnVZm"w1UX*j$%E##DH }):XWz( 8JZUZLD_W ꏘXM!e{{򵇘1Ĥ2/&Y1i#9`NK)ڻ[ ms*u.[fH莼yxL/clѴixОgG'd4'ˮO">I'N}e/>F|򉧄p"z@; }lRpe9E3d)Y^NKtR>aA(BSd`Tw ́y|)c5HV*Qx4UiRq@T:JĉgAP ^%*c]DR0n6?5# PDr;$r[CF{Kb9Q\j % ](8)GkAGbķ Ie*{7XDEDx#q:ΕHQo^~gi8Ԟ%2uO̺bRJ 802-jV9鵲:Hohl!e`(3tKD{JV" zOeId1KkJ+aUk!#حJtq7{.2n}ǰZX9oͰʻ-ej”0Nr3*bյԳ - UQԞ rT]K੬^#9<8$NNc&}M77Rc*8n^H_xW9pۈ=XPnjs*#s(.E햣* XWƕE,XW&UI:rM&" ƙj.YwHn,40!Y:ISv7J@tMjMHo& sJdJ^*PR4Gtp*kT~Umm2\mrtPx`D\0К$OlʷR>mpY)+hfz>eΠ,Fk̩3nnbVumZώ_Aгjǃw&o6}`7{3<YyQ[ݳ>wASXrF.ֿhVЫ Nى.x,K$8/Yl#9`T6i7\5ʮom&H"_1.q@]Ň2Gk*7(%$c|q0 Iص1 $π,Hx`=kʉf/xW,&)g϶Q}Uom(w!ux`v XBhd!x)EeaW%+u9iB|m B5JSA'aF7q D{NOx;';oR\#2ޅTß4)p`ʊw5#/ܳy";F&FU/mkTڭs I: R^2(H-`exHW60ff?M<6S487c`x:0͗/Bチ2fPϱT#^SWxzċo~,a |p] ~Wج$)r9a;g/KKv~1lWRee?+3[ӱYy?ý ȬM!E~]l9X q396k ")oج.7)|9[m )n A`zOxQ5/.%oPVyq-vL[~_&vZ ҎɃ).^:.g~xp?$ֹx)(2`1 E8VV6:@ƿ>Wh2ik6/߇4KnYن9(S5MtS)pNLd*0NU$Q$T4ٞiüniI2nx2n1H>D[#ՙ*yTɃ*z<9ٰĄv|пlK,*#6)b8 K$eóffKqAt I:&/Sq*WM{`I-x'}]18߸yƶ>c :c( ;*^I ~ yVz('o_:Ax*ma< C+Tav?4*_U4[ofr?47aZ>"1R$:_+56 $ nQ- ;WO+ yeǀ/X5z?DkĜx&hВ`in[_AXZ )}]2 RjzC-& "޺}Ea _SК/mu2uW0']ÇvԹssQ{7]!7T2VJ_4DfV-.'fb*CX"c~=FljVsv~jȓ)ujVwEMb}{;OjK0^reyV콱ffQ|C` LՈ9ݜx$#enXW`.Ѯ)C;J{fr*4!_b':=TdűK80xm V}e?^ۿߠApqo~c呍G6VXym0vfFL%(%8USf)K["fK%٦- A:]hnk苊?؞m_Q3=΂_^[[džT}}Svdm-JK*)HPjkg}E*87ADg^"!mg6>o/JTPBo/\iGx$'3ҹZ8$geq]NJg{Egپ{ Q0yh>) ;AlBcNٱV`8Z[>(oah=dn-u^)R i q .jz@ A$d( j  ,^{:_元ovmg` 鴔nJiZ8MXlZŎ+N+%:w^k-ڭΏ]gM͹bj2f0.j_ 9drKH%9R%IGؠؤBdISz'9'Ǔ'Ea(|)I5i=`;|7۫!o2cuCQO] IM}ѿWJ 0$^)i`L Jz 1WēP䁨blyDrIƘiis9θ2 W++, W1AY6 ;d!EH1XDO)2H*(~q_q s&1OI&b@`n*&"C" eJ1 ]# V'Y)C5z5vu(:ĴrQ0ÏܨvV ) C+-v8(SK/aWa0 rO#†kªEa"X*JnMliiTRhR8ʫTF@(7*?R_ׯ_99__ &/x[ bY@o -Qx\x3+qiel BBaw&/ )F9#B"G߇nq<|\tD* ׻(\rVv z<@vxz<@źgcbYIaW' 7&A@.v|-c.B~4vH2 A U$:5q"86g&sR[nCRcf `-CSգU`bjV#mW d"$""-YsnAeajJI0rCk9\P {W0,#Z.{_:c(W7Cij?CD#^˭m ǢI;.BkvCdYr:8p4Me"ɇi&%Lc% bCu$.YqsEQ1Z<4R$ [mCO6,SDZLJawh)z@$썕vuU Zj[Ok:/ U. yXm;ZR9ƪՌ%XnNΓ dgg/zH1@$3H%yTK(%R&Je۔*ejYIhq?OaWca)4#H㻟gm &f [wPU[!ohw=kfhL;mUr/H/2ͮ(oO'+7l-Xs^BS_4hGV(ҭnI}Мvj3U!%vҎNcY jyHнse|ECۃ;jco?!| |?h56/~0 gQuz:dڿ$sK)OM7!J#hI~uezSa Q$&;zq;ڊĹYqR%P <_ݹ@7)nP =r!4>-t\ lP= @/EjGyps:i>')vo[ UH(up{(1AA~k FV#++ ^3Pi9\vC'-=d2μzR2XW(`pfCWFYrT k׊im/Νu <\"x9)!NJ,Vl9M XJ-645HJ8MB!ۡX"&v1'xͦMV 绪Hb3i>a5͊pl|v^s>\8.4'f3zټ+u.uyjSD]us:gD~-}6Gxd}mQ|UmnóYmk.ħuICfٶ(j&隥u ǹe-ۧ?_Qq wWڳ5KGeI3h}TZu 3Lyd2#da~S^陙dfi$ Jr"C{WDEEl*m׈(-͵}g.m<}Y?!"ޠUc6ro86^Tɯ3YNUG&ƈD:UI$-ըsbyK 1A9 ' R T*c,%'ӻ%H}9--1mc9߲Zh \d1Ơ"2YprdSĤ5D\eHyӌ%5ذ\QA8I¤$LJEXai-~qCD0APzyr[t;xN,vq\a<),.u1um[rOzc{-p VǥWr6k βdH, E TVZ~@՗'g}F4Oby$z~9HP KBΥU} ( 43n8Q=B`4 25S.xhn6FݷmVNݲ>؝S`4CHa$ 4a0,B'PJ%9 9]'a$a]~YZ-K.!~?+-n4 /f,Xg&=%;.rͷZ$̐^QfxeVŧ⶯n(UA빍U;quuR<ތr+Hc8ۋ#}sݎ<`s0-Α5gؖOZ?%[ט-vڏ [&~bU88t@(ś۞mT @Z{;cTJՊIfpx`@f@wCUiTx/W5W5kЫ V=q`.DJ4O 5&NjTCJ)4b|Z:FK(f%,ڟ. =.IB ))=PB AĨ8H0"(*0^RI AsڨOK1HaŇsӈpQ"9)=,h)lB8p&xװTDNRg k+HZumFbnC9J{gw>@jr<,68"k ;b!a8^hB+ {ԥi[:FNj]G|Yr3=ې.E2E`ʖ#O#K h&|kU$kRB5] EH˪GgKQD4])?rĨt&xmW4]1 y"Z$SU>k.T408_,eJGwHmAx" Kh"HYψ;|G{|l7|YäcgY|$$3a,heFDi3<J+H"O\`FB=]9]qu)[UYoߖ`WꏲɪZ7vr5ŵџ'~kw}9qWvSG 'qo!c1+QG5Qtfnu"y''8w߯β]TZ5],h^?kY"춛u=$_\U*so!=]~DHj ;j3HALHd"Hd)$= U8 |ጸ"1 9HuUH<^&$ ؐJ@C G[0MeX̞,@ gY-m+x(&$y7 oSeT.QNWs\F}5NW*VzeKҥ3D"=Ztہ cvp15Gwvi15C>Q{;lGm2EB~y6hH}wEP^~|,RG ȝz?e=>L'W+_YǝTЂwjez9#Tez} +Yjaa?X6]o%& =c<PB>RCA( ?;C~ZIDW# ^U)6 =Z.HBit?2|S hW{ WT"V0 ']f Ol%P],DdV#y7^|w;Lo?WND-䆁[=|~=}sui,~27˱KLҖϋ}7A$-59o('>I$رRA|eenW0 y(8 y4(a=,x:Qr ]tGj8W^˞<<[4pשּ0'p;\6Ko %q3m_ 9߅ǸŖ3[Wvf6|" =c< c^h#AIq2ٍݔS8Qt8"0R ym`zrv͐QCD %C9]xV]Ƿ_5RD$L|dTK.+]VRa T34c&ſx`juHH .j<=`2PAE;$L?;6ј,S\*O%pz9WMu@`*/.E~1$_̑1}q1opE'J :2Ѯ ~gѓH"B;:v`3'j``ÉD0jhݗ_#ghpvhIЎ#{u#L$bv;'P NN=n0]b۵֋2F qo%"hbtJ5c`W_.2F#b #:] afWy+""o bM !,;XoF!9CG47@ZUcwQ'e=U%zJxQ5T)Ήecs7j\j"/Z r28RsH:"N"  9%S ZhL5*ПgwvX{ʨtѲI=Clyl=7p_7gJ3J!kX)/Ⱦvt\P:7-ZRMC$ 5g`+5c`zł^׋Y{X'ít-cP+1/S~m5FkI`UN&R qǔ2G8}4Q3$48$FL8O[_ܔ"Br2<@DTlD3Sf(iѨT;e21\І ^4lf22y^G3,F3ZxäHZ42a' F@G~\#-+'EE׳Xkx5i­ӚFA aqB>䰌H`xAYéj)M ؇Brr3EǘL&| /Lccq4-FW*-x{]+Iy vA710nymS6-gPDNAǢ'8UɑڛWYU zgG%GKqfiC=% :8bz] PTA_OVߧ.>M6Ie+n3ؘqZX4FȣY){;bR|q4Ps_3~l1N&.ڇ?O!'Ufg ,nA'c7QUxchJ[TWrV\$WW*LGQFqm6+S&N(c`XEwsLQ=nB!zpg)^h̔r@ ȉ bF[\-1 5A"ɂsFo1?Ћl0qDFN^\48"A] іʸc3L^U5\M>U](IoVOgIqƏG!Zݡ&]Ŧʼє+EN eo0ok8 En=W"HzX*"JRˆ%"neBs`h'Î"j;Հ[ᅬDhtRn<p$_?fbcgr -Z6FF' 2huWs,8R %*ZSPMN B6J84YڱPAE w[V $5m)+L'ףSq8NOv䕴LK]d#~*O'g^.d$/JX<t^&~V*-pXW *V tmp!z4^V+=Z,g5(g kGB^,SXo;9@wsY =ըXFOc|5?kXTr{P(/[vwfE-Β2Ek\=ɯ7c-ȋo~í gLtۼ:fsOG﫫WFwA_'K0È {zGa#@ECUe'%x*~FiaBQ:Xn6/Rd%Mn"O d1#zgd`ي,jytUv8^A)KȞݻ~CwS Յ@}'Nҵ0Ygi@<`@T=/asF%\Q,5 2e ,!*lwNlON)4[d_]\鹿_̧|oM#w0d9^1{sIO)ʨ<<{{IYo(us}}{~BcDw>~(\Ng%B杖Tc>!tV_Ε s3Y2񻻛|ஹE"1Q%]}{t}WLڋP*D*o6lR Hy*T*)Tq9a17j̀"QJƃ׆hJLh'ɊHwujq1\?I' eѵV+aKAfqWȸ}?uTCT/4hr quf-PF`izC֝)Ry^#B$X ۲9ə"],cN= S5s B)ȩ2%fjy;^*H$Y T/w(n'۔\C#k pJ${Xjղ5W袹<3"`L?"' :(^N\7`}~bY+\Y-S͙.xk?FӒy&^w DAO,DgX pd 4qZ$W{Zc8AT.NG|4U5&п5UxcϿ=ũ+ܱ(Y V\Y42 5OGņe݁;ul 50ϝh}Ԭ kZ݆|zt~{/)_eV{o$*. QUb)[jdw޻hj5}N[ _ ˘'1b{̟a?0ڢ9њ|5ӳk0c]k jt̞y][Zp™eKLz$%긊F2ƒ;O,KQ$ \`/>6hE4i7<@JT +k:gDY1eygPs"yA Ղ)\s>!!MZa-[(WV՟ &W=TfhFC栊j1Wi\5Q^-{}fMrzgq;ʪ>Iez(aܛ#=<"2*(O p υݕAqci PF(6)bw)djSH5+Oc;g3sUpUF=5ˇh?]m=c q5R1̴s-' PfPk+i| QO7ZH2jJH-j¬#)bbi8͵ViX ǝ"SH;=ܖ352kE)!ZeJS%ܶZid&:˩niX&4bM ga_%Ÿ h`tT0a:zub]J?Э?.$OA5)qi2iGp/U*8,nlRMi=(lu}p:(\j=D9=xrƕ3|HEh/S9 &4E2WvK=󷭣\+:s?敪4HYt |FG.HŻ慊4'Bi{y<~KY~]OBz:uH =ݲ[+^|W"7\.a*BO~;I0&;rF*>/%isJ 41>G9y0yWF8pyZKdS[{Z"1B"^DUt8&L@VQPSJyJc>mqr@/(hܵW+K>K3W+֋W{}#w%/7pЋ4sȤ˳Է]ͦɚD`xG}Ó"l- +4^ MaHNXksy0CF&AeI˨Jf5vDbl (u6K!9CC\FsU."?zU}>(yA' ){|JؔK1[3~(O%t"\_]\6yxElJpd9|B_֭*M/Y@pul/YM&b/&x2w6hɻVV}k&zf*-J\6 ~ ޫή4DS E>n <2:> ; q {wa>}lv|~›R. f:lR FgsM(IE;ʉn*'K5_#]C)$CX1먠/e:2Ƶ'U􁁁Tw&r1н :u*` wڨ/B%-ke$Obyf D. q:)2R&C@ϟGV'nmдțL;3KwyU4 P'DAx *g)G9O( @?Ņ R asK oZJ gvR&'22V9sXEaI/d`Qh1%jLȠ>u{i'DgƔK, HkneW0#8`?m LDS0+:&~$%+<:{5D)*P\rpc\eVj˓f_rc !,qEC $XLtR'3.xIo|/;KHvfi_ǹT}ظAR'X`gR\ 鳟N/:N sO=Z)3vGd@3 o/ |UN1jDtFH=)q|3$>`*1yM} k6Oxnxm3m B9 @d0̰H\Y^ JNDz赦kH"ջ#x"Jtu#nV^k4T>w*AIq=f { YD'@gRTp9G3[Wï+/H(qoSL^^1y尤Eb>Dv=3H0Hs~TsCc: k(u.̘A**2BW À ԣ@,d #  vk,| RTQ=1RAF 'ty&aa 1!z \՞Zn FRq 5uu‘$J!0&6p2LjX0Ǔbk Ye|/|ȭ0ڰ\*@LelhMZu|wCmzY\|>{u3n6\M;J?ϋ??OM:mv~M6roR.<\ī_aV{u}:|@ٻ#}_i>Fk䲦"6;(z!m>^wTy{8rm)Mwx:JXkG~ͨߟ)\53ZưWn-6=a_j$8,ӵʣČZQNi~53ZưWnmI*XYUl?s0npw$s_Ovĕ~IYXZ\%C Z*:r-ޕ5Gk{D9}f5Ɓ'%E6pd*h/_F- h bPۨ|Lh0m"Fn5k~0F0i: hä: 82^%Ē,ɏw.blMOYYA.lhA:ڨ;M]H#a^LiBhD8{y D+ Bf=M3v 4UvTz ̗61_D 2,U%ߨ+Q?wZ౰` y&dSإFk{X\hetyVyQ7j< SbotjOhB^T!J3kc9M$~+fߩ i])JDV[Yt k^ً垴r/-P%QyFMo# "^&2:SQH *W|7}0>@{|-m.>e~_{$0ZpyA\m*/nH7jT \^WtJ1k3[(c}EƒE~%eަ"1h]htSMQ'nƲٺkT=_7poRxZ>%l˽EzE\bԫTIɬK?<ķU!z?ѷLhHES-n/0Zr L8,Ĕ̘ɒlnm [[:kbȚU]0r0>!"HdXc Ey5FUa LYy@tGa%acQ} c4=iˁ{Fd YU:1ѝFqBLg4b3r2ag-3̢%Lșc\cY<0v=&LN0tBЎ(L2n91[F.@I(4EY;Z0xaՅ(d6qo'{ <X`$Wo#nfyyhΠ(g„//h<D2hX2ǎ9>$";̱Bgs7txbN@:W\3jcw?Lgנ_mI_ ƻnC '·u%xy+7]Vq+9S}8VUf-?1IhN@DF&E]qB=q8U %I>_G.̉`>DN\S HEEpC!S-޾.?:o8o58f,5_*itMy$T3Dy>Mܣ߸wxy-2~g)-Ůӄ>v]ٲkZ`w/]0܊%o$xR$zuokAҢy5n}W\}魯?QކU I|BZcTzZhzmi- " 2*og͆4{g|Qo85:2_kԪݡSFgtNA^ sҐ^A|g$4v:|.Y) ,J]B DLI բ%SF5r(885V1mA:QgD!OEN"{z+(5 ck~_E]“ d$ !.o(І!q.لvxRSہ@Mu-*?MF򠯎~z;gI :(0:$[1wXjP{:kۂjZvqTEJh*@J*9d Ѩڲ /rL:,b&V PK!DWJ5 fvBr˧UUX*>;:?b*=$䝋hL1u~Ϻ KG;_x tɈZE4I9y'aR#Fhcy8mt9$䝋hLu)XtQEOϧKG܆v>ݱ2Ak>]">]sM),N>f ?9.QbVxc௔tF[ E4E0Bv#.K:&LN!KF̩ 1aKʡ &3#$֗%#S zF}+)C1Iv~3Ats/sMPAaD̂2y/sNMk{]"KĜC4G1p%bD4A>nj5A1gM>Y+/%bΪ _L%3KĜS@:"}=1bE3/%bΪ L)_"KļKdL>~:j5!ƴ,͠ {NKy7~hLۗǞ2a߫f4(n2ExSew'Lf&19\{K}fި>Y}m ×Yߎgv<[ {^ppn=}*:bảYk{AVI.&ІRLo{%SYrϹ%!T$!T0 @*cS $*li8FiKkb|PpEA)%U1#d'nCFG`P(K$&+ɦdV-#hrwLiK׼*ik/Kʭƚ Zcc$шʒ"е2DTrH*,j'J!!sdc̥oWQȊ U1[Yq\3 ę)cua, 0^}50 A\Zo'+* ՗Yb# lF Kk("VWD+[I ёDY.J+SH>\ה:m04, Fpڭ^m^eFt(˰E< pyd6h6kO3_qvhjF GB=Mk&7i[WGG ƉnZVO˧lR8 蓛M5j3TO?Ġǜsߒu\CPAÏ?3(Bڮ{2ueM95W,&\ia6u0E^*VnQ[#GN*5ϧHjiW9 񃔵,9Kʠ$ D[#(CȸJQtmK XaQ)lJ NQJhL4D jוt 5H>DoHI BUK2!CڒU3 iM5B X#($(SIs *fY= znSMu(,% Ѧ" !o0O ( -7@k=tloiMy7wLUOl.vC/yꁥ!C?O?.> LG|_|\DDhnF_/z`<+=>4߹7"ᜄ}z| 5Xj˷{7Ɣ>\p)z0S{sCtP30[hr+Spg\a²Ljs^ۺ)]aH"#r54-Fk]h)`A樄 \2 VL)~EM}Z[;P٩3MGƺ뱫9I3)jkETRڥp\wЎl)XiIk^1(UZ+i 7jVV쎵`^빭4HHWCͦh`bYSo1i׿Rb 9x; n'p8jH=bMZfz]^3KɫK urfeBrIr'R yb'빫p]h_b*oUJqWڒ+ 1R[ (boy)ՍzLnY0i"zEA] eż(nE-VJ1UԎ[J|%\ݭIl!|,E]88LM5&9}O0>G|ߖs?{ܚ=[MY/%+Yba 1q~*h`R'5[I :m>LgtjeJKܸJZZ+6u)vUjl,``J doihYnO+P% 9yשi[6gȀZNY-sM)*O~WYtTsvj.1b6x^`7"㎳#b|JUE1? նb5k)jaU@yO[EROоĂc|pD%N vZ-`aNӾb8FBl"BA'Wz!O!oDieIa0y]QAcPU8"UYxDrèҕ%j*LF# u7Wkci SyIųoL!ٱߗ o?|gip3w20E>ZE#ď~qӞLXhHE+[Zׯ`,giP8~ :8>vַ0C3QUK>ʀ/(^I94H2DZu]!; AjwvPgR;l"hv67,bWu=>ALߎP.Pqv  ''ow*|.Q@X77?>?Lg#vFҜOlj'?D0c$B+TLMC{>;ޯDϏDsΩqAŀg QF#W Sڪq I׶JBUĤ^s/ Ǯ95퐂”{"ȃoJ@G2)؏"tŮCM*m7hdqr~mh.?QW8 ;&!lqV٦A4_r(^|gϽً%k&76]A6Mئ Z`I\,_>(s./ o"*(Kb鲱K>ir;H$AqIi>+Hһ Aп빐k6m_k)',ʧ10w[?(Džz:xu6R&&)]IaH](*Rs b8KK×_z|?[};o0V2xӼCέvT/_)&8Wr;̨*ٔ&$p1vs;]QG-MPʔB1c]U6;b$d ]`#VhsA[\;M D)UZ՚['kKY![QmL]Yo#9+_jz1)^S ЋA!yU I`%YN]Sy(e 8~ZKdNC޶ :86U&5T.;ڄۇz5 -uje@nUzHmb47-SQ/ƊjJ12u=Mj@n2s))k)&PZ D,6K` D=V斡@#{Ypy^z}|c. ^[Fnݧ3Wi:'p#m\$4}Y:7\㎛hܳl\;Фx8b3K^;瓧q"] 4]zտ#kYmQkv!\5b{pmqkk1/j\c-f >7ԟBZ)9 $v$ݻ$/Nbu%*Oh*ѳQ1n]|۟IlpOյfNS( &3 yސ~3^i@vMHޣ}ч|犭1uM^<3j}͢:+}M\ 4F7@jkwYf-UQk*?hОB2PV Rt*D+oH<|{J.`4w7MDXSӠH9XڥєЋ=!;$}5ZB@cOНG9|pנ,CIċ  Jl81|H@ˡ^-ToexF|BbV^,b_ mSņxF̉"PE0r NaI:"_kg`Qˈ̠u'o|W†Ih7$ bay%Ue*t ]  t`sH_JkR.hdդ+.qch<\%7^m*3gu Po.l2~"N%LwIJʕ|:K~1mW&^6(5^d>\f?oͪOG7Gp\ה r_S.k5\}4;^L =y**Ӌ.MTXJK!Ѩa>dXc@#\Z<$ xnyV&\Ȗ[?/|ݢfJ?%S+f{"8j>-?𴃞8}\<<.>{Xb1jLƜ~TeaSGb? H&ȿqr=8~>AN%1'?ݬb ;t>3+36R_T%)Bzb̞Z"ZЊp+H6Q* @E8뿄9Nf- wx#eK>ÏmhᏇUH|f(XcyF [|Jߒ鷥,8<tyZy=çVӯ/_s\[2o4W=W~Ҽf6ap~eO+~\{x$q5 m ByЅոo+q[ n 28N FAYḧi&=$w b"R.Q39as`Z F[qIeZq2rdj~SUo'py-W홪65b)k֘ׄȡޕT 6$5X,!p噊JU@wZޘK$&S J+kR"z\jРԚz{ͱdőo@ ~ /A@l>QKfHE^vAg3b-E|%zҢBLÞ Qam[/_D)֪˸TPw/OMz{$3VT\;UK~ wةʠ޵mmNp_eCF._5ZPFCi=Tz,LSFE|+N [rS( c?Н`;0I~fy=15Dh\V\Jx/s7^Y-JT0|D-O5% փ/GˣJcZNj6Zg]fPZit R[e_H2Z3h&RarXh7c0-| lԴ^\uFߏAdsAtKI: $Ԣ8\f@=[֗+BΒoEٚS%gmrChB(|7rXtyF]Q1;#]A2o0ݡu (uߒ_"2DEn!d@qaJ^% zoJ% OJ=Ne{l(1ZdjY]IL/B g.ZF?'8ZW_P @&vU 37 Z+yت>񟷉GGw;OA$֞ J%3.QqZ3 QbBG1hJ+7Bb4{T- j9/T[A.zg>'D U$FeRZiA6ӡ7]vj[I\D+I h x=I1U-2Bɖgy9н8Y1OfbTDQ2.pK=ƄrÂⒹ`n&%t3"!\aQ[aHƍEzgޙi&Ѣ)43=KƩq^kәa(TݵB6X# K0t}E F~XTV_a|rTK:lwJd zQ[.@e9m9.1E|FLl+c)Ym,4.~+ݽd p-C6Ai*٢R,;Cߢ90AvU93oڙTWЅ>[21^T:\V0voNAPT 'D!0x=-0s9~or!ȫ>&(%W47D\jtj)lF "qQ'uPID) rںHx)3CP; ;%G2Sz7692\i()cTk%IN%̂/' "%Rt>=.L.t#@:}[~䚢n t7w#XH2ƉRD_D06ZaZiߖwݾiVHɮ4hkL1*H>!BQ4 on6D pCd/P~]iaUXY{kaQSfRwu@$k2`9.}u4ZJH"/{loCfa4{/rhAeTFC6o eH́S׈\t&!zm<-jJIZ$*RPT-F\h6ytM|%OMhޚW-+V4axhZ9h f{!"a&{tSV] ƪPί&@N4gsrMCkȲ%s8jȁUX޲M)F)RM؛(IjޤG@QpeP@3Pjhg8Iǵ {_NɘvŨ\1f݃dT3)-#KBLT S%%bCVFBB<8!ڽeEAsTָJS9sJWb[@BJ7W6 T ϬtLc;rj;ѼP^+$n҆Mb]ZMʱ毓>WiA݆}G'1mb]X`:ArCޥTMIR_.èL>-v|d܏>?}ϣPOG[@R̬+O``IͽrU?P EAP GTu Tyl諘 -yZ THRUޣth6k(i'Qbkx.Cz(dYQ[)}|Ng&6cuAS\[LKiƕ]N ItrB&8=uht] %!8 ר9)FYeI'*]+n+$plB]zŅItiה`5 Ƥbf9ᖵmeZM{=Zfb=Gdz-%JkwtJ-QKc?rjkɀ,V HїL3AT.:^TFRVH; ʋKp L8;Swخ4gHZ_ZYΦ怠[EkV >sxA*粋֙keZ FF C\-(T]\D!ԆGJP뽊%)2һP j"%kpQj8Xxu{e/S?ڥ~S\ ϣӎyΩyޟ/-lQjzrN굝yh*gΜJ?*cqo嬸-yګdXdkkЊ6z*RJDX8&_gǚlJB1͹؅45ZXk)x`RzЦ zg ņMbd;oR8U8Ulf|i[Wȩ Z$ՕZEaбiFTTW_[cnQ!faũkb{#-Ss%?I,/.ە+Mϻ>h-^dM'mʜik8{ D7ZD+sPo 46:)%MBJ?NO6麍^J*A~RxYW.A,i᧡Oqɛ`sN,t1?F7bX?Fo^o~9qr.4E1*7G>A.wmY_AC6h26lgCr5 EjIʉW}_7@D r@^FX.)boO @,M!Hi~hHƹL>7K^򛌭fm*of_. ~v#?mr:Hv|ӟ:q[Ѧd+tZb Q׳ՇZ ߾|8B7g\j{zvPeh:B#WjB;c)~I?/1OmdS;&kgEǹG];Oޅ`4Fs >.NM~EX'(AnW:PRXa덹!('˿ab]|(̻z۞_[㴢NtyZ; ={p绾nu.1w^ j>^)}X+Y-d||ts;「96}lǓ|-ɪF% +\Hfis;\z㶓cnյq{P;oU=7ȋ_{moGcǶE:d#9@ԔbAa` &>23SX  Z߲k|Z~ZTvipnw`dƦa[~֮m\C9vo/2l3@KqVabpE*lWICn:l~,lyvR)v*- cN_gTbpvvň,͟krcFi#D< U`jXm# BUMHEACZv:RzSX0ߘ@6Qdqk7JpCi. !"Qc?ƋAK/̇-fEWyKck' E9k$1NfZ>ŽL5 |CW>w{ЛTݼo94Z-1-6}fq#s\ dWbN6~(~#PnB2De9ww#.Mhq75&]iH:3!p?|.~3gS8OWk6jr3BХ 1"A&b,U V֦y R"ی%DC";QƉM *L4! P͇ ۿ0eVAmWc@Py_e:Tsچ\gr)UcZSֶRN/,%/Va2$IcגB,dHX BQJPB^bWX)Kjl9aZ%1mXkk*# s!!qֳgֳgUBֳֳ*քF&~h;Dk H? %j}7b IToޚ@ڭ UMhEAZh<^L0{<4HhCu@ %mtĩ?TmI$%Ep]!;!/ÛQnq^坥K%>({D!ޘ#lט਄VlEUJ-}k}->`Ԏ󻾹I)؟]S""t1'/}C/:Q\3șl&[6ܚ$J]0O>Anb/}k]6KB  ᦹCmXP9-?C<ǠY] #V2v%6>H"]D'ҊrRͩPn:$KZz Q<թ T5O?Q;`e)u'vQpx) N#Eڝ'v0HIj+6Y p SĪDeL8U6VD!&qHL-<%m][6 ,fw*7IR(t6j K)cl)RD$54,2BJ6M8fI Fzx?CXh @7d!ғ.8n 1 =kǷHl2E*FLf8i0He&`"56"!VRxW!!M!69aZfx! $]=UA`mz/SqD+g+Z"ʉu v(+ [5*߽!^kTxs¬KB6`= {kl>Jloϳ'&{srܮԜOK$Glc3nc8E;mM4v?"J"x OM:\߹a<і3v_4$LYiT-Z ;&(L ?r>Ǒ;s|2GDsS32#sjCẀ,ny!9:#ͤy+?;=Q\99XEǑى(!8R0{P:as AQ<; &iBBȔ.8q.v,\kYktå;ny?fRƎ'}zAQQ@;x\z /nM_{moG~y?d#9@+-# 1ٮS e#R FpA`Byܚa|{;06G4b痲D; ms pM-ΝKXÜPQaH)Zީ;ߎ?(p79j6xF1S%M|ɬT'2,5\1ͅ!GKo\>2 ouT3# Q c Qj2fڼ<-ʏ%"et](*'64}}?+Mga@ۉ0" IXf&Q܂a嫍haxgqYF’(qƕ )lB+x DZ?}5 ]70„[Z{y8ȋZ-ufIߚq](~ {/.c,A*ʚLV 넦j.E*-SP RUJ)G' ҆G;׈b.7{%^>Jp|OФ).ԞȮZ\LӶpUl)}!$ 'TBllxZXSaH,a*A\*lyHfesւGũ %Ff #$5b$&sa <5,Rj_Auy4W85|Iw!ת? =ˌ1LDR H03ef+ԅ'@b+H` +HJ)d$DTas3qMTS[ȋRIՃ K]`\Cԉ`癇J1mGb\㉕)$6F`QQfCn,֚ 2ALX=rDH3)tN6I3 \aBDd#):[3ZbaQS$qAba HI0"PT7bi'(Ͳ8*hљS:kp% 񗉲j!{az9R+HqF1ř<Řp N n=T$*>Y3e:o"#ӗW%E]nu8s|L8"cNuy e\Y\  LXlab'_7pGu5Mi= ,]Rt t}=h$3(ހ2g2؊ s2{|@v^?`[;.a;y}cbJ@zb^oUF^p7il^sRds Yl?3E \r)(2#KQp6[ Lp:ga\pHw#[̈́aqeFgR "F0= gjrR ANJk3#[cZ7JQ= mq  ӵ/`7TT E#FEVپ{Cx񴼽9|X$7e909oڜG#-GN!|={}(yu FĘ+'S=ñ=\s BvhD6OFffY\Sjy-!(f*=yu"@?=qҷKZzJ]#~yK\q' $$`@ET aOBi7 }։i qGnR,DE;/\KaճRs:rZ&F6tv ʲ~$1$vUr׉~>/((Iv^: | $`Etb@w#fhAe̹LHPi Jɬ[ MS;R%>Y澱Hi.N %MնW< lh_8r?Ϳi:9&XCƂÒC H9QQϲMhi^Vs-mtypZZcę6mHQ`qfw,!Nl(`bV4_RoNa_?e& u8<^/<[E:+6 !/C Th힌SiR>&^/p3#,l/k? ^քԒۺe&ܯ1xdY߂ڄ4f 8,1BBcx >|b3FMuCI^4Bjzp*)Vd]eݼzRQ{pNMjbRYhLnlF1}=lJb-w&E+Օ.Bf-,A1"ؗh{\w];tJ'w<tWaȉZsxE'Xګ&M"C~f܁$vg#;\㬼xA$ď"L6Kl B/kakrsPS=VY[=63} @{ɤ0'שnÊ ζ5^&QD1:s[Z–c8e~`u42=%XawwMH,w C(:,!CLC90 SmA8g4$,aQ0( `&Q v;W3ǹwDeV)8>z:⮍tY\4Vӱb{gæDrcݠ\9>뀗biqڝǗ̾(MP-.)Wsue~Z=W@4{-$,Jzu!5v3|3~にD o.4p"a^v1A96AAŦ1u)J /7espmQ|չx\eU-`IFҿCڕ\T.6\e}U[FOU)=U=e5 E?طTXnxO=pS#/؁>^X!`Wͨ(\ooSG)ֿ3s&d]؀cPzLۣۿcaZ z3z4MN~xnˁ<|(\5F)赜@"Z!ዧΔjd~- TW@tĕ>a b>WTuf@60Ax"U?~ͨ:|{c=3qT;^X[j1rJpKW*@p'Am_=*_=ʺk'rj`#TI-_{_Z·hT*sd~ ʅQ8!x} ,Nv7褂v8EQ9>JQ0UYl:PkxrO\֔ {=FP ,].â=E\"6Pr[f3M}ܫYWe =Հ{8.9 X*@'E~SR'mǙb$QL_cl~\a'~Zsu+ B {wA~R$).Н¢ Q͊Lwtt*b[V5x xΆB,?SRvhd xiæ^)4Pͣut7_tT¬]"W뗨J0ƹ"/iLGj'=BN)z4Bx*>%1pAga PV4FY #痭~4GZl.P V>h?)Ubt:2ʲ &c Fq,v=kĆ>d.D bv8]J;' k(lm)_^BЧ LSPbӏzRdP+qX3 ^!FUǮo ֜QzIεZNU>H*.t4W; )O̹}Q =m:͹|> 7'"k= c )}M;P6TQ벦gE\mtyL&0-xhe"Čcd#$ʦjJa[OovM4ZA6r O8aH5jID) ɄXPqHPv.B$ =) P_? sD%邗#~ݱ>"$oR?0FSZ7`&V3ݺDs2Hcd9˾u7BQK:o r+4YT\ mJWpbII[@ԗB۾`N=D8U/$e>TF(MBvc3`Wm=yt`T..<{DqvƸ{ Z+m%:u+dͭyU=NkXWS ϰJbɆ9B!2[mF+)ϯ@c|$:z))z ݲ榊RgN5\a1^-EkE$O}se}F Jc<`q["us ==+1>׭t@3xҴS#/xIWt{{Ĝ!쩴ջ[=iilpDp{$(@l/ ~QRcp/TW I_< t!a,b`1Z tX-N(2y,cK(ʼnwxNx<5 NixܶKǞ:a2Ҍ 0y/?.YAb\[;k-҇gxSȂM%Y^O@qh;‰zT=dZs5(~RyH߲"c$APCُr؟C&lU=H8Wz&/T8fE"O#okۓD0OdW%.h6>:B)'qy\qcf{sq0saLC>X/25Neg=ƈ:$%]uө=^l7y6 pNhGU2H!\[ru9RїnۋnYs?̒eNtʀ }߼eLA)?:~t$_;0b*%~#mwi{Ԍh)^ŬC{JLI'\DQK7@7Eo7S6*Gy'bLqy0G~V~J`d6YYr~([&ЮJ=POw,=*'L*Z pAD&QY,HӘ-X&Pr-oɨQP^VT0~]n_>Iҿv`VMƿ_ኗpoF}t7bV\ct0vOI|EXlluWITNn~Pʑ|7+ko>̷tY^n!Qp;{M<): heRrk<W3u;jpJ}ZM pHHg٫z^Ox-.:z "n|.ior^{+du8EVf1_ /("Sե8 oH-GT+K#qxoVWM@aЦ+RP:q0u%zߒVk`pXv#z5qx'eFBM n~ICiX^KX6L"h$f} v”.~7O%߮FzX8*}u: %+_d}V֑p2 tg Vw\Af^?mQ#v݌]e%]EfLeef9Q%]`jnz9̈i<idI((1'uPw<{i\]_~GyH:PK8I?(z9sJۅ\jFX05K6`b{1un> 78lsۖSpsIx/B$c9g)l͝nн=F=+ܖeCT"Do[CG-p"9DM1RaDSE &#IpDʈ &O@G=PkZ4-R69Lk|7*S龒xlO'EWhj& ֌M $84BPL.4|[s|k:GEg։Ow &6ɛcI۸+,}ΐ}hR63Wl'6S,DI%ѦI%&ْh${x,RG{pOE& ߿7#3~݇acơMTx Y9nbWX rh9 ikN*.+— ~8k273taW۝kz̈Y3E OC)bڒ:øna. A]A$8D"uk(cx#"b? u6H* T1l2yѾї0YcB6'dk8]JX.׳kݍopԃhO%9,Tvr T f#qQ|6&-[敌aF.ݤҜlmWzp0CR]S[28AT.dzB ! 8apYEQ/h4bl0q%V>̩ܕaeaJw)\Y–t$_bR BB+dňk+07("SZJCtJ)gӆBRm]c<` N"\mL^>o'< ~vi<(|ӫ\xܩY,b5#tG&\+hyᶊdǞxٳwóE5XdMWkW :V&ǹ!*J<+a+D2H0[4ҳpRH]kYvYYn(2&X)q,UL1l0+QVHte)|i%v ss-]=ntq4Ýy@]7g':PSyuFơL}dڄQ#4G䠏T# qTS%[|sfD=@ ):5;8fۜRFU嚊T=<1Z#J=R{@z-^FZ$A!'Ja Z%YVv 5X؁N|nR7js?N5O]ՙ6h4΅ Gam翇;dg7.#Lkr5STA;.Ë!8p#X Ǔ֬Z_՞\+>&q:Osar? Njex,(*$?3V~񓈕 y"%S ~-\tY\zQ;Xuqѡ-]6 y"%SQh[kN`fr"FcN ~4KhkBB^fɔlϽfFm.)b:1ۡ-]6 y"#SqOgr\qsMДv!Y|~~2s;'1g?6 F#p+Z賟pгR> g?YM=??#F~«( E~Oh@Jܟ$'g?'(S)Qdua+($N*/JL<̽<ܔڶۢ" kV#EYwZy欒;Û*v<xpٻ0EKמbTVzE$?em'"V_st!$ ^ek6PEh6 $҈ZV'u8$z.iШ.cF=9|knbI}KI)FXs,8)0{ĥwʕ+"RB I ?3 7}3^ըd.vȁcoZFArg_2>h寃bZKQ!aT uޜݖ,ne~'5.|y~~~~,B@gBj4<~yt:h:"k`%<\Cǣy^ {B=L8'ZmkXW!OZw9!ŚuSODV] ׵BHPMR)z2RW1L4{Y'`jzxէ vq~˒ Z0Ǚ1 ؙ~?xop|Jc/Z?-LBQҨ/ @Lٰf&lU6vSm~o/~ q@X7, [F+|Np#k\o ~7K bJTc_-$gݩ4E\BwHI73** Pˍ!*P&+ĭqAa,֪.9ќ{ldlπ:d;jRb≖ Tuj p:K\di" [mnܥ }4;Gus; g!p&ߴIn滇0Vtjs ܋uo̲=KQGc *`/.t},]P_JKm.B#&K] DpY ")]TpXB)WzdJ/Jn v ?>-ea\\W w`o="ڍEӻ FL>__Â~տBտk3_no˨DbmĂmä9'YQ3%\?(,?]%H9ӆ"hP>o'§~ P]nJ]ߤQ.OyAU\yIvDL~0k 쪓o)+e]yͿަ&1h;?BT̢x#ٞF5ˍJ「EReqFs$7'z|nl`MslR(OR"epDI8!׉(ђoK:C 1HZq)qƱq8Vjf5Ex'٢0FyDY`CtI-I@LKr"Xvg2# (9EPό,P 5aD cyIǜID8Fi*1J"F=]t̳|9@oSͮREz~kKs=y? )l廙߭&%e09.=tvYǨLZŕ 0 !Ю* bx$27lxjxkG=۸D`~ԋBڒzpI- BFb`+pa-2],mH2 8{NߜYĀS;ݔ]', jFGJid=L 3^}, N()kxI@t^1RAҳ1%r9C@9TN-{TfWo'_A!qJtp C:ham-٧3>.3MR zs^`AeVHXQ|^3{?1_;\47oW)*4ߒ.F g^P鱦-"[ f Gua`=aR+|i:ֶ+f YC5_Vsw¯-X)Uv+ؕk<ǚsJ0袼&&<㨒aP&sp,UW;p|%*7,5:1&JI`ĢEnZ$>td a7Ip-jFcZ:*kE Fy$C-6hj&h!ZZ˨s%$6OWSs2;OOnBQȡ7!!\D)Ԑh}u\UKK1}GKw=WYDKׄr͑)dۺ_tX,#ErQҝΏ ]zHK_h隐W.+2eD;O#R"ӻkouCliva'zq7WOǹ<ݫȋDEKnC`sPfB7 "]"@0kk8xBy\8L?^ƫZRyw\Z^J6B\WIXQ˺#&5y?!Pf#%QS |&,uȨ,Yde1%;) .t 4;|&RVKY7%oV=D31ĜIϜET54_՘&1j;^Ȧ&9_\kavmkzM.Rr6gZ7q"ى1bFY'k* Nb0vMǬ#OEN8LDJx B/=0pfVU/Q!h{cޏU{ ?>XVcVPead/P^[)H<+[DAgLd522tP1W*bK fXm/џp\,/w}eg[94{կ@-Lrԩ=/QQtSvfQ]i0übp!!'1jL{Wj4b¥7+}\nF|T!0lnڈ| ~-G42_{$4tOO-ub1Ը(hcvTR^e5EATSX6mPKqM}ǒ$j͞ɾDbXIJ kXif$"36d\'Y!ROmň:L=ZFõ{~K%4Q*ky P, ]>Ц @$nnmZY`1UlyP /%~oB x 8 2B>RߚUx)HEj<>͗{$}6B4ܪ<;ka:J Z f~nsrT};|4iq׳ԀLڐ GB"Ƚn!뱄׈ò8s)ı"2hQZ3ZSPD"QZ'bN[)R^p2SQ"NQ5 ܦycQD r$hEfu^(g`s8ұ1皋|!ITx2/CyĂs>@<BI J4 Ҝ32N-tEb,1FgEDn!(C(C.FH';6sWӳ[MZ;Hb{FW`B84ёK09YmEnD<hw`c|c`\['/lTBj&D9=rX'1MF\ Qњo<u!Bkn3/ ޕ><aQ93\^>繠: uPAаZM 772$W mlJ2fyMě7͓iK2`\ Stn ̸ׅdrJX`r̸K2~QIFӏ/XZ3|)qJgA %4r#C$2MM1G `&9cY9wgbE ѽz^y64柍dp鄡\\ qP!FgyVd*I K24a)F!gȓ>*ԩ̙˸MD`[[D-@Re"24z_o_0x Vq{u|f~f]x="WT e YpLOa`g5υgvbhz+7L/orbTO{2vPI1L'KKگǾX xԍ(2.hp9;LfF ַ' |tXxM>Iw}8 h6 wX H.?ͤ^<'_dzsGS ӛiƼWkcܛ8:*\A[$p5=j$KC闾YÐ+Z[PXsC~ pC3~u7iO 5bzRE%?w>n"KEԵVA aDqAGINiic]5Ȕ0XKaDk`I"MRhŨG4@bKP! ڼs}#Jgxsy2&Ww}? MBޥ%A~SeVzTuG䷤0ښ߿og+-xMH;<>L!:DΏ|"ի̲ߌzW[J9"(R9NE+k#f.2gI{[5W%rRHAQ*.|ЙxW JӐ/ `HwzM hfws%n"y@w$lE|`" W&kíퟶFD~8$p3}{"HGFN^] ugl]6mu8^te@+Ϯ݋0d[=Ʌa)]O@%j0W!y`-s4˕G?Q1m77t:>:)0mF-}Wo%ѧrRi&KݭyO%] '["`$K`=JU'(0~K-$^&Җ }{5Z1|F>tCD,L]FKav%ړ( \hg=Ҭ'gͬǘY.~5Q_P`h-dC_$9%"5;I º,qyji,G2 | FڒlgI]V@"SzMR< Mjp#ڳ;[$/vF%BK-c|F-ФWeᱛD;cD\RJ+FPz.Ԃ2{$tIdCy<ȑ@ŀ=Uaٴm_@u&5kW]:O[m Qz@>V<ĬC(bLˤkE:ov'M ^?7Xfה8Zm} B W7Ϝ9NJQbJ2Lgζ J9ȮΆW*Fߗ|.n``+ͥ:+u3, ύ ؎~Xa$tt6s:(u=2du[YRXl"D`H0 Fњ*TeVt[G'['r-V"|8J.A`5,zɅ]Qkhmba ͈}s#0GB).۹ԙ{>j܄{u8~$GЖ@=BAѾm 'r-}(,DNO(Dڽd ϭ}mp32}'m~(_#&̷ogjv ΠW|oP,ur4[ьQE\@;jISO!} X}DU C4|%߿8)v">nͣAph}&VA.x>mb[`Z-x3 +<]a7`] ҕ_@#Lk]@]qҕw+.0ZmfV63ʊff٦F2jff"hf9bl3ᣚ5ffl'43{ b43[,NBiĞ53@szم paLLa3x$LXX l7NM6X;"{ G;&_8U'J 03g05`.*̅j%a/LNJ s!:6 oYX Hkz(f򮲻8^^Lj=Yެ\*)/;|0{d]]\]f+~W7*=CG~|Ѫ}= %*!nQv{wZ|')YWnn^$oW:K\e=>iM+X37F6ʒ3پy1߈t~kVkO׼0!ӵa!DٔRD2J DzްM{! I\WX%>@@@s#H~P$ZqOeDxmK89H. SkWI%N>"ɖz9h}DsaD M #fƈ/MD2$4Ƞ>3l#^4 6D[~83)9ScqƒmH.`ә GI=7؆xa1{ͦ߭le߳y6.z(,3fl%?.k h85 N.''/pr8U~~6=Ǯdڦ:Uv2`r6LR B1.l?oQ4x{`…vQB.e7uUwoʳٯse GwޝD4P~ݝ{8d1~郛9͞Xx 5ܗ?5RJ>vJ隉jWbu# :T,,~wRD%V7UI,Pq^-'},ķ8Kލp_%ڋjuV`%XpډgKk |s vDOvaVR2ݟPԘ}GMyWvv[ .YU Nx@sߙ4|_]yϯ|LH\Vwg a)TRlĈR #\vE.͊C6ohTzzb76Ony}g^-9`JWJUaHt0-@BR*-in^  o<}Misz8L} ?q?:%__6н$ؑ^.զ[mL̦:-j:p~ruA݋kQ=i`-e괤CvtJ]ky'x c|$($dBp01t+éF9Wy!2)Ux2W-fA,kgpty*`.,w5x4mweH2(;!l]xevJWx-CfX-b|lw!3,*(^9v2[;v9f{ϔ~'' 3:mxmxa t Z-'Wa7hըmK.9$TqV5F,p 8D3X9-&NTm{G?\\SJ+\@s0}r=UHC) }}x*17ÀBl`59ѬR(FQI,-eXAu3 .hX)<2zS#p175V!a#.)qpa~tsVĎմxo -"NXsJPimԖrBM~v)oZl[ PBy2'֬e`m+T[hd _$pʭo'y漵 kf]kOV( z4RNm0._g,UY&PY&aڡk1^Xcj#^F Bb4`{E> yvי:F3JJG.zHFYF\<\FV]u-Çk: S!oJhЬ+T0s%+,j%Th%7|}F u(*_b!0`aURdB{,E&ߓ < JA}DO\T}UusU;_o^uA/|5ֆii35> ecY Q*Ya&)R5]k()NXྲP$ضzMxtQ(;(pgt(U]tpIKFF6%;4 ,jMU%3WamY)(~D~~?,?|ɷDzix{ݬ z>?{w9RnnߟwF&=׿Ňڍg[6ǿ|}\V#^^)gD/ԭO>#2 2 O[7}?R$I䖀F8Y}ڂ=ӤptFvu5)|;xԸ[1POn@,;_W]Ob5v!3+s?ۏo..aDn ! Nkg]1Ν(B?d_[ ^ݺC}_8~.zq~~r3Jr=e< W޼I7r~-]!Vܘ;}=\j,]l>|bc du3.rh.?Rr_Y|]mrDuWrqWmG"cqڲFj3ayo\azr- Qu!bJdr$N1@^J] ^Yޛ)m= >;NtZ Z-ӗ'b!P>awc! >Þ BhZL+mu^ faAQ&oGsK5rcb7ij]cl"9K\w~0jKׇO7Jl.^w^;.*Z5ܤR_]=\|򱋋ޮ͟ք=U}]OՆ>[2;9?o6{3S}o_% ;ٕX~Xon]4]m+ӿQ@z,)Imq0M4ɦZUK<2.ӥbFψtdNU3KJ tCX 7$Hӱng-ӥbFψts4t1]zCB^fK<$qo7/[է;b W )y/՛ˋoCоג[z-kqYDg $D^Y찭X"j49%!N}n!NQC~ ק?w1ck[+SʉF(.OZ$,P X&R[py%lSOm_MrJJР_vM-ġZd+{n%!$)%oZDϯ$YT2Vν84ad_<=+Y4_H qVvȽ<ܪ'fr:ꟸĔ*_-_>9AxsPRG#{j:PcMOr]ˉh^UnH?Ft|rƗX^ ] ODt1(hR%.E "Js/k1LMخn($u^  Rc+B䢪 S)Htז"&[ׅS4ҭݪ -IE '!%#6εFBݨ[,E%MMch`@1bb<`t@`MZqጄ FM%?@Y$њ} Q }ZN-  S܌L &v %~&N.I+#s-D\Z~ZymjEpv]QN_W#>1C)Pڀ;w@M(:;NRIv W0õ aLUIXd2&pJ~—]P}l=vSɠ [c 9CWq2(ml |`PFyW}=WfoO~cn=˛M[[~|{?±Ig,`7uy%uǕxWP^ \`q2P@0Hi&cA$)%SO _qRr(Ier,MRv H*=\#@)5 a{[ aӎ$A:4jZ R9ii2 k@\az>|$7~q1z$[q ~2CaHГ -YG-AIԨzˡ{4o!fri,%xP@>#d>n26KI,R2pM)*N#<]*!fO7_+&tBn y&dS.>=t&GUk3؄g\il ޡlX 76JevbgsQZOvz&o}Sk3]2Sxs 0NwRx)n LUHUeRUR Gm8 1rnzR ㆨEe'L*Sv˯Aut!*w凫5r[u?y*K NsJFidNRXJ&CpaU&`A2oD2"[Yhs!vX`dBJZJF+gP0BIe8ʙ]"y.@+7wC~1,Oh$xyC)~!i3cJyȣ0C[/,}&{%A+^~ /ıw_8"=EF.x׽J+~!O_Az0ourgZbK]_^1}{Z^9u/NݷwΛ;oik\S5FFҺJ4VBЈ"rj.I2zi~X ~?[u. uVnOP[ Bכ~qE=Ё֬}+<ղ󙠍*ͥQa i+EUZ5ۦ2KT%F,ZZ4yIM)JvDA Hl/xv${ Ibxp.haI>kdQ . e#-d!ЙD)}k5XUMT3Z}e.`Yu(غ8AaQv.tR(Bψ†HYc-HvQG$>chONYk AZFiXAoY:k!+G9G. 栰u)hi:t(-bg0c t̕cލ@?#1*~RiF5JV}Q l`e!=[F&3F*v<cŽ|{ԕ̀Žg+1D0#pJ"-D VO >|(in2p2Ce]OMNN\0 wqG_Y츫CqKBd]~!S"L4w%9mtvdgկ s5!PGH N$cZ+Xhf3b HgrňY*>G#V\yӣ{qI*c?]"sp+=~cO!OꂑJUz";=ʌ]kk7Xf;)(B0L)P`O)Xl)&HIy"-4=P4M!Ŷ{g`ki.俓Ue TD)P0Zh3H Qj3j<0+HMNTb&TC8T(bPH5ZW#`~#}Ί PEL*[zfrn֔L2XRR#5T#FjqZ6H HNkrOJ.a9=P LPNAGIt>E7ӾzeZ?͛_6ӂe՝sǯmwa'_s֔s(e1eYM^UCa\7+J8wʝ{{-u~lW7]MȻªwcY5Lm5Dp|Np N9=̪z]/ۊ)嶐"))N WaK:)\2$H餺N[]ܲ ѩ)UWRtj4FL^I]):uJ1P+ sgXf"=^wJƈ"P'adviVAǞK=Tpr%N!ނYNz/bWwZ_Cll=߸Pb63-lҸes2h/7Š=hAP{\R:ECaS <ӽWM cL gvj8A>(3tGTST? 9y@1ԩ7S\1MFM7U2yXNᲩnD]P̍TP9WXMS *IdZc c&ܠ Fr ߭)(dK2hLe)KVVH\5ٷ*.Υ"l~;n9Rϳum N^}eSg?T"ݯ:,U[v-1:cu_ssu͊)ua@4^jԢ\R6 @Yo;ϊӜ*F+ޛxL #HɣsD"cAǑxkrg˸uS\Q)JVXY\EQ5%RV 9 na* ne{v``{8/7Ӥ;.7 v)`kBl%p//ŏEYu:/moz 䚣]=y} N;q,w'1VWἻ]@\/%;THDFf(&΁]EI/2Lj:fmu=kDv퍌!y $L ;ThiW0MMӷzzP/F >)Q-Bni40js:uy?r8S]_@< ͫc¤& hGHm =PDcݸmbGFء Ȋ.5F҉TCU͚5?.3mhR=|ebBt:(2Ǧ@_zR '112U8gog:8ZFË30A[YvfZFWZ&'11BL~%L-'bN0НɎT<3]y!S3tƈ/&WqYs@F Q :-o4~q ) Mbr O-D0aZ P@Ns+b,6xÀscOXtƘKGYֶ.].˥ucח97D$ )>-7!5oݒy-p.X)y5WSށ?d#1ʃ鿅Tp#n3ԩ]):)t%Eꓻ[HNی蔔*H4qGFډi#]q߹\{~r%0 +Սk\7嵍p%sj7\GSE@)3D_4AI$8χ VίY#η}OsxV;EZ&zn&Oϭs֋Hᅋ_F(u$@QUhSR)sōd\a,Ui**i#2Զ#je-+ 11SAo$)(' 4QLR{zLܾkEs2;Tg&uk!0?S;k^ʷr(.l-= ӂI@*߭vRcg4nuZ<#sg(w?QzBM<̵$|J@Obf^^Di7h@n'X-W1&C}&{:EJd3AMȱ!b~o_/y= o/i|)s0]Y:+n/F@jX(zݸܶz2 J~B`caAG&@sh i Wc&? 2, Xͺ/?YlGjamSwӁ뀊qwn4@},XC *!=oՉl6 M>Gh.rĹk=7yH)9Ͽ4Mt`)Es͗;.ZJ a% f0/2saJ3/o$꨹ѼէoUYOЌxH--&T|X7uvcblʨP?#H0^q \SNw@?JIyx?iP_/ݍ*`DچTCz |$nD*T1$YIlꆜ W#,jV**ee+If;`Gh4; PLh;p-v浞߱("'w:ߴ I)i}:;1.z24GۙQ2Wr^\,W!7׵87˫_잆Ċ.w\7/1Ah_fl=E;F!U}1]̴($ &d"MB2Hl #N}//WIHn^z2Ry]kY d*j3J;5Bsc/,]`2WT4θS)s=wq=_IiMcj:5Et9ʡa]6,zu[a&d$mVyD[s; MQ|"G[_5t59k (JW@v,GVhJ,Qm3 ;N!2bh?;N1C *'Kqڛ߰03`~]8MaHa튇N$?T6և$ `2EL(B֓ {ާZkVZw;2l:<+1)Z' @rJ$*pkFf,tb:Е(N.1s}-wacWM?Mj~|xpf +Wm|[p[(8zd@W!ޟGկ4]5(>bKMkݟu`fVB6}h|bl?^k[wGAWOb$hĘa:( ¤Tee 3?hZc^Í(WH=~N9iΔ\hDUc.y˲&oLJ(Xs %1&*R @9z;3!3izSw

R`XZ X?|rn+ru9B6SN\n&lrWEZL:!*Nx&y|ͫqoUy;*}%K؃Xu}2ug:PGn N, }#p@x5|N;7h#Pf\Gؔ#sVOO ?anOKNܢ۰Fq'jlި9c5/۶;&{X]8|+w@ ݇Аo\EQ:ŎUydn])%&jkPmOm] -(FDߢ=z,%9:K䇑!itt4h !߸)fe*!>W6je>,]j9 G`" W? E6@,z*jŲA|<5'-_QBs4j )8y2Hs4kInCx,%G!C. ٍj>9.[r'=1[nlGsuQRG[p|K9YAnw]v5Mӡ5 O.ů h/_Pٟ5ΖZ_@hmV5>ig}ɎNE4CS:bjG|!TI!iu뚳S9 #?L&LSϋ$ُnnf7~Kea`1tL\F\ ^#pUjۮNrP+Ow}`K [1@ɨYV>frRqx| a(VOVs0Gj~ 3 2O_&jKʋA/;n,ҽW`jR:L|^d{S*#}-RZ湑`8) }L}%§}'`x(h*oAHzڨ3&V5ӣpU1zqr=tL\|K'j zqG3hYk5s'`Lq&}GVl=Zk5:%~QX\4:|>DJ. Y][5mSrp{M9Kn23iޭ/Tfhʽp&}jt㭭f0L0"?j $coQ3f !FA9vrl}&NG@rzn,QU9?7{nhġ\Os&$ c\R [[+}ݭa*M=7k.W)357-v][i".٦`=D k%{}#dkUx 6:LwݍCftݘ9W5\=o;A*w(^ ;y ߞx}{+  "@vt[ә}[աA-m֣ dُ轭ި8B+ZH+%z2ba KK2דRv*BCqEq[f0Q#,.^Dzk|鏡BCqH'҉fc>QkO$Ngi|n0>;|*@9`h$٪sJEGKX%2hD=Hyb?)5 j;rxb>lF7|f q=FU4[%'F3}O O޽zq raT@T^ .*|v,O~<>D.oM) *%I R:QrdR5 jUُ/:3*RY e6['HoWy.Q8 7~Da!b8,oWƟPx\{8c &"Ѫ"гogh\\i\"`mm?.?][Q gjV^4CA<^a;\“!'o46 Ys2< w> ;ʶ:tC1#L}L 0DYO@S2"I2?|8)y=.|a(]Z'`Px2 `(sClyZ^gCVePY^-QPX܇x$s|>*P܁IW(<~:Y'XCMn $nZ?TNP&@?qtVi.Ua+uLRqBqL䄵lV/H^ST-52º&,V窓`*y>2$pdhI!]sUwͪ7}&e Ծ GmZAb9V)wm#In1nlƭy1qⵓA@ʼn$rDI7wCeGqvXG|կQsU_Hsyd}b\/ ?K$NL)%`c á;d܋ ]{" X* L*jK٭w-8Զ=ZA$2_NZf*[N~MuD;K+T.ЊLmPfiyKEZ@d]m v(  y "s#Ɖ㏯M2Bf趹W묜ڮjFoLK3bb+ʖ-r9$Ċ)9o;oxYbͯFb&Dft]wz6ZҭJM~[l1Ma 3د-yҸ /|f@@vg#]CEͬ<tUV}bm_q gÝr+/@WٌWzi !'^oFx[=rӾ6 K(`HoSDV$\r.o8~Ѹgr+ A1,TJ1eBif#15,ҊOS[z!!drgxVܦfض-Vq^z{Y-MKX';_$zgdCI¼X28XߵѪlo`f8k=LmNGH\ۗ|~S%o[4 zԚ/l[r!϶ɀzsW,mad7lza77<3hJvR%iJ%%PrB T1Z@dPbbTh*`#5Wl*0{u''l-1ՠNj Wt(@`R ڑ@Ts@>XW`I>Nwzz}U_Y}hW7$414KYSk0FqɔP}z?%6X;=k0N&L#ZaZ>OkV ǿ<>_ylEazS YR#A008WU82*iI*>t{&@d# D8n^{볓8˫wgůWg/:=e6HG`&V:`Vx~?gx湼+Gw3@-씥[?xJW4놃Fi03" 'ax]I0Jt)'UpI:@حJCkI|@j@1/P$3Gs%0YoO0пm0ht ZƘAB}# $4BfsҮF "NI#dmjc9[3ke255NLvHMeV)zx5 SA賩 e5(-@o]M ?a9'݉4# ֙hbhډhbu" sw! st% t* »u: P:υû&*0tx0uC\֥|. @^v+< ڈ{݋璐N ٭X|nrx{\[=,^WBϹtC%l-F6KHgȗgֵ/FD49m}K|K1 [s E3p9GFBVl@OXt0JGF;M 8 hX#OYf?jnrjp=`wb-Ŏiڀ98σٝ DG5(/B.ɠO,HfpNr礮)Â~Oz}Q'ѻ sghˍJ6bmUC$.T]H@'~%x 23odV#v#l"lGZ Ώ33??;Z1A4MBJ,9} +pFjk[F#&caurk`z><̋KoXu>JOS9[8ga K0bKz| 84!F`ÙAq^q~wxrmJ׻C`Qwbjr8ieiY 9~V I0կbuewXL{IǏ=t^%j4=T)k2\0ήέ%',7[74s("W63_LaP3f̸y w* %Юrz+mB(^3b /C7V`hDlҡ BQ,2 ̵ \P6)qג?Ii+RלF73J538nhWO|bM)v؟Oan @*|jֿ3ww`aWX ,0߇ ac.KA'JҴp5Vq’Q |{G<!JjaMN~$}E= ,]‹ŲVE٢;Ewg>-'[t{Ew/G'ݷ}vv}wmΛk9Ocٓ/g-w-wrƖ'[[l-[lwb{6l⻳şߓ-XgK؝-4Ğl :[rlɧ%dKn孳;[d`Kv`KΖz[jOԷZӘcR[GieZdc/Gߢa` I\H>MOd 'Te340Gbn7) ^/,e<ZR 74[o!tqte oާOlȾi?OOt:i<{W_Ag̭@6KzLzݸX0"tؠbj YPƅ+^MgX1UsRCr@ՈXƋ~Y?Cttg -W^K'ר=g gù5r@ s" g_kDir5{~P54|_D5z];/3UC]\(9Rh%0-Elm\?UåK"H};<fmU̐0XKKRX;5ʊ[]/[|뷣01vH"4|['>9|!@!dlOY01r9HDk ќ/?q!&cמi[ZL5&` W ?N1۔B~¯gA$Q"^8H&ߎp!(m|>@cf1]X6N>`5F/En)>"TZ} {d\ãL[9. - =`E9)6J$$ #JS˵P g7T^k/EO+_$ ݠ\9GU4Bf@㫙KXSV6^$ hD]Ѥ[اuVG'Q[J@YOzz(bK[h$7+l"DFsBݴni:#n],8ų3y䁖:ԶЛHP<(}clGnIͅ«Xw xdvcIP'4?^{qo FFeh;l+P3LqtN&~r΁0ByPM]1Ʃ]~Seǭj^*jxoVW }n:okٵՕFq\=8͕7+HҽΛcQ-okqy篻Q[nbZůG+7slG|ú:.I7] K^w}zd#}W_r-KM_ @UbDm1%Û_OQ7p]0^vQYugg$Pz|qۥTMz[$__Ek6σT쳓;~ec/D_G: -j20Eg[2d70 crA`OY>f9w2a`]׫A}OqlP7z4gYMts? _uT` vO7'&诺JG˿V-nZ^be.j#+3잤"C_PJԔPg?pv⮮xN^yK^E̿l.ks=ݗluF`c{wy5o//~o.\^oſo@:]t.ԥM}Tʗ_. z%|bMTDn}Me#갚ۏcG .0QSu#koA[Uwlܵ~_m@S|{V% ՒVZK_v+siDii6` 'VE>bo#3j1Zƪ\? v2zR>c<i!^OOlKX>_a(mmˉD@G; pf5IℇE(q̈tČw۫&Xca|JFGt*DT.pu%M ,sc# >@a(>9$k(+rqDn\x gRPOtYiuil _W883.%Hr:0 ClSPP"71&>~jȑS}'N/Ꞥ>l~(vi6CeLN. ;AG[iSS,bEJE@}y",kg-W!CZI1!F,2:d6aÐSA}奙!t|C+s ) $qd=2`FP&Ͷ&hI@K -.e1d5,[{5V0+0At8*ɰXg8bȐq@=*{1q[!qf -Dr)""i[ciF %r' ![KvFE,vN|M41Y<DE!fE 16A/1p])0I,H[A(ɴ JGY^M,vn#S$2ށ'2 b 2|?`t~ j,#T*(@0S$d|=Aateھ7rɃNުi4!ZfYe;DP,#,&2y+pS3gAZ!bA?O]2&eq#RC qAًuYX .@5i]&Xբd4Mf N% c\V1H^ܑڕ&!>XyJp  e!LǏJG8ȯ],'%u^bMvqO*`8j]FiݡB*~v=`F@?eQ&̨(?;y;(nU,{&_89W2Ѿފ2f ^SkDiO -/FḶ$N:ཱྀkK |J`&[pqMB 0sI:e2%S)Ls>:GDJ6RM%8`R!-@A#$2 P:txmҌϗg"m *(+N-K6Ɛi/ u)"C`DPSQdO*RCOLPS 9%Y'勆a+ %rg6bNSCe< ;Alޫb[k?+5Dw[`sO_$tBhǯ3)$Q."k>/)0ck26c㋲{W,Qp49x-Pjk.&XZHTKI8YdU|TP0W2{j![N ]&xySs T"ARItahaU A7/X2jϮ2إ)>.`)Ư<'Z'H)#Y7SLS, ['@P:CWM@gdQK R\O ^y1)2 ;bd$XeFu !(QU1#b(Aݽm bCl]7lMy_9G|V'Ca<@JUY] { xN( Waօ+Z8լ~qUF*)dq~joͯL5 ƪ՜֤('tWDrIIL$dW21/~K}D)zIֵ8CHbYY_Ӕsq% %~̶QVRs2E99Kn`'f,E: AZuBL#1 v<,0O+5FBMR"/߿/-ڤa:l#ö]L2&E1dk#b ƀ#MiG#FEt`X̦UC;թ ḭA;9S縱@L-3o4GiXu~S?eݤ;IVshRGZ#Ύ#fÔ&u(Y%b 82rN,QDtƩJ#O˅^{,Sݝ n!H^&NU~s,?ve ,e4>Qz+p>|N4S[v`L&yN Z˃)Ǯl.*Flt^@>wcSlxjx33ئqB kC6T)WUrp**R"kJI޵O7 b|vMg3)4W|D4kh=bʧhV`F4.tbpAfS,GVM4.H٘~Szijcu  "22/}5M`Twr')ooQAIe 8AY֜ZCa&8>_ƅT=붇6(j^cRhSc}/&z}֪ROIqZFb-^/'d,1dvVsiiNk@`iV11q I]-hG#Bm5SCuR4<=Npa5 "crMlF}GI1OP0r`->ԮvFgCpXNpvQ;uGu 9A41*a'd0fv!Ir&Cd2q<&yYZwwݤo'8#1&MPU=A(ŧZo%7=8qj%=+`'i# 3cb:0dfnq%S5.HBFcH4Ԗr95 ܇јfM'8}ǠZA(syY ,Atlv>3ccx3ZL0Ǚ_.)$/Z`SJ&33o`%YhgwzgL94[mӺ ;20OL9KymAo 9|1F2In/'%S1 2ܬƭZg:(-~k:]:o'17x;jaX1IDCsCu-_+˾='.e\מVOqĕyC[yOȹ M,&`0Kj0z̃9 Kk5~0-dZOpQ}(f f0nhwBغ TSG΁s׊F^%44+$,ggwi0EԿkTI NqiU/(l21,J2tċ&6ұpLN;|Cs^.70gh@ /N0*}jz%^}*t n 2G; -N B O{[̱iP3{Us7[oplWa6"guIQ\"q\ ?LqcDU^ b0Xfq]o7Wn]T6FHrAvCNE{8#ȣhlA{$G;缿iDRݹ+# v-ԭ8p-H-CdPub Hp^~R9v79)bcS%3LX/u)CXf|-ܑe8l!&#=I,v,cƪ%h*Ԡ=d hKAv)u)StƻP͍UVi72C6-`X_!4n|FB.9Y͋|Kwxg<5KNkau82 q-PZ1JKAўMI#Xvnb(bJbFZGؔ]*{2q#XRS cO(p9`B溹r 4vYaScvxS+,tzPsP2#\0śZP8+\D 6Jr/\I5iyOp3'htUZZJ4%^UTڬJ>4;ϯ&A#@PruE34s)V-J%n? ?4e IdF Ue$TJ**-KC3yCFA+wuQ*JŬ;,+9sWa,蜿"b*54kdgܵڨ}kX[~7żw]#}Qs|| MNCȶPnSnY/2d= UL=9A{dS ?=MFLB)2l$杫Ueύ`YmLY%CSTXj޹\A_U5{6b x;eئ0KH 'f5XWLR.v)5-n'9gS7n<كs DjzYBJ|*-5Ay?&?O\VV?>c螄?< xM> |pjttd"&CߓJl@n/E_]MBN&0nxi74ͧU2.Na{O @Qb{Ĕ2wwlE/>)?rqfv}߆\lpS=5j\ɇ7clj+!}e iRع.%Bpշa=!_uM0z v"?޸q?6Fzݫ*]~=Of j TGﺘgdGUKL/W90pCQ; 7^~`m7Ҩ2<Ƽ`&@W'jQkxw> AZ\ ~? >b4-+u9jY0az(*\$r$ӫ7> 6_'t: )CU&NΓ "y2Q ZS)csz$JBR2NBM?`x8Z.4@5I3vOs.N^au$,2cE/Co,>N1D2Y95Ldfj?ί{Qw/wWKvYfp?w@ kXaVr M73}\*!cQDzˏf ZH1H2 i.5? d$vsb 0)?pG%<1z>s)7Zq$(x.V?H%k6Yeb >ʳKv?y5a>s (GA!p\)gQhz.^:gS1)ԏ~X0̕3棁{\IދWl\W*}y:[+ 4Aަ2o7St6̂ΖF>O-ӟgiڽ<^*t.qَ wuz l|677hotw3#0N(@"t䁸G#,h ld%m-p`4Y<p*"g°/VwЀ?2z nϊc]9M/0ګZ87f. GFcž-ۏ@/_$ܸ8 a |r|\6f_۔Sc%o/"geÕ'R-wdqol~| 8G iݷI}k4  WK!88F)2E)r3̏+z}{ǭk(ͱzVdPC 񬀸q#pJaCSb RH0 cx1>x\dwҲ Q+fQxQN&pQ̅`[3 Vu?M~rMPS9n Uu4!)Lџ#ڏ$wi6tg6H$lPAA:BDƭ@HjSUijjܧ 7Π{vAAJLXf֬u@I¸߅;?z#q+Q 22)'x&Dw랋UǷQiu Uu* uiG\)чu'ۣ8` 2%IywbDAZIK#}5c)FƆ2 ʠ-:9/뼱]0ebd\꼑F8@vg¨Q롬ɏLnL.yИ&Җ)թ11 TdR1Λ0":e"cA[~;n aa".m]TOaqx{7M&k34- dQ a!;QKApž^w Jqb KljcZ o|Xp$ W8; οqzq$Dy.n n:NB?fa?"u9d[ɡ@aj%PкG n{wM>*U% ~ٲ]mx99j+u| =zCͥR}F:d[F⍣Sd}vZJXS>e[v|(O۫#.̀"'/#HC#m$ ij#šRO h;~#~4>}1rx8醆ه^y l  ?38~jyTnXW;}릿ʄ.{vȖ@F~|BΣ;8QĶX1V}OF~> G9`rwׂO[-DI7'ϮjöУqd#5}wޙT Wʀ-Ce|1bJSB7VAѿGPs^LOW_>9J ( %N'u?xRAv8wbQJ]v+ecv JP>Jy* ewt?ÐHV 3Nvt * 84 eZL\i`$r<=39V)RK]jZ*n%`d-E ]+ό՚u;8 ;l;+0mwx#-9>tY Ym6M:,ZR% ɄCzI#L"NOV:s H[5vS ~6ŝ;rݤMfS%U6"NݶFN5WLLPƙ?Q͹% RKa˕LjnGN<3t);81wA o7St6̆gK_#Dy@=I }7޺k`!L48~5_x*m!>bfa ݢ-O `+;_"V)s_,4+7xAx:@f)`{0l F.N:x2[k8b)estqRNoM;%0ʥ? .[;ƫSU+DA*,Mȿ_wgE;>}DfF>EWԕj*2:~||W?|s>7箯-(MO`wdۓ`'_,շmxxRI{g (Z{>ЖvSͺ!J7uȭ+Yw;U H3;:ڗZuݢu]D2|"5T[d)!P`MGGY(˰İV&1:Nߊ$a:l C(ƋLva)qَ *c*-<hnF6.{-KNƊUv1)#ּ##ݤ#)R(`'u$U}$4J" 2?? p!\=Xȧ\Sk3핐6͜ty*b(#μOƱh/ŒAu^Sˤ.A`~ ]P[C VuFqBuiPQLsi-|fY*ʘqSkBHr9™Ko1PhBb lKB*O2M9ɼA:;L.epTTHB:u3c8󙰠"`1É7f$U[S3R3 tL*Ad.JtAkXk(!_#k9F֊` 2{ửg|YReDT)[M=w$ PXxJ_ΰ#wshO|G{FmNT=I$Cc":D 0^h(퐗S""l CȨl~0ePJ01)ψKS[,ΉKm5aR,  (݋BLt)ֺK˦E)cD#[hb =\Aa|#c9h}^ё<>%9,:Xy{)U#d3 f`ik:FGv}jIj[R`8N_iTQAS]DZ,*Gvidjrdr!*J2MS=8"igѐd G1\#vEeFVQh4R/ 'a ,2'Rj'e)K~FS+P*klNܒ3}ɯȪ 2X-PYH%B$$sc$4jP*ұJpU$ ;;[>ȕHrfe`@5HGByA VTYNuIHkU$ JbB(kt.urՏ>n`+kRQU_rom–",ʲ)$Y6i>ӪCr)9eRW :y'dMQ,Hk~)7eč @9{3o|iA0(& $VLt`PC5̦'xuT&V8?Z=?|E6j\Z30=U~X{_8]ķD` 0dZ*ijr9wkZ.~7ETƨU?]}R 6g,<ׅR0̳ HUVW,Z|F-r-qVGv{XFF:B2.^{UBu4e( yKȪ "o2AWl%IY#KMU5|masO}-~y~sB;xexh kEa:`J4$ ˀGª6,c̠VGZ S#a"3`g5 Z J9[Հhq_iz(ZVMF:Dxe%2Ne8]̻m)`kkԅ4VL 9Y]SQTOgLj){Qչo*h |6ނ7:2 K|]&v.PLT4$W,'RE]ckn8T٢p|WȂX;V&$JJZTEOT*sq0Qm?L-Ijإ./_J}MC:mȓ~\+f;j(FgSSa+Wg<_|O?^;tzT`,`-bU']ݾ{Ts4N{ 4FQ7WwHRSMrL҈Nѩ_|?4IsluΩ*XW 2U5)sYZ?NZN4XyUU2v{۪zI`h@(Vɫn((yj@ICZ `(JQnf0#\=\KYI:yOCKDܺ] .#,f͏N2: "X~\&T@+!f J 8Y߮P XJ L#mE̦( &/d1 I 7= 5vX)5y\>}D 3F4xːL>Òq"KTwۗoʦDPeCqW bcެqpmX(Xոg4q ?Gm,SU(ye6xAkV/[/7Kx+ͿU)_x{,xfLhk?ٟ͟E1fzr}'??Uuu^՗Nʹݿ]S1o9gl/˪,~|+]MӳCn^9w52oHiSe_1?1l̞P{+JkM-C[~+(w} w~}myΏ9 F?eUټSuv{C{' ge&b}MS!(5'czO?.O>tSN 5+otT.QHEBnvV$n;8 8&+)1ƀ6"X1Xat# QS.tU5= ?.ޙ-o~],˟hNңt7i~ȧ\wI~ҵyB ]4#E/%u+MG#kAL,(! ]D5xG 9dLBiEĈ1mtA!. tpI E@͚lkěi~qxw`8Mڴn ,'m;.5vsB`8D/hS?b H87i=柌*%$OFw`~\%?eXz]NIݮn?Lo[[K{M91,y)?k%Q+ő"o><]ןq5ƦE3yZF_{i?Nʝλs;;_;{i4h_~۹|qYϳӓǧW̊F\5^*f#M+X&?E/>]hvX}f܉+owޖcDžB#v]+]9}O6],>:`gp~.>]^?? svrJnaΐfE]t 21~:( (/A^qY|4`~ xps_OMXثNʳS>I.}~la[Iӌ5b:}ی`Y#aA5^n5&Q9hSHYj0WEkA`8j5 M'.UQ@%5Onu gHF=,d|O[s>ǁ=+!=t`Lʇ흝~:*>w>f751ҁܗ`j@50o9o^G^KϘN65r6PWEU7ʮƏ*y?|sr]X8?;m)'U.5[6JVEak8R&HBW %.sbfŽ+ZE^1hK7:s'zn#m?>JCA.!n 3:YdjɏǗL,("13 U>"HΦQh5#&P΁?bsnP H_gQUM#(zc9ZEhGuxgXsh<>O-R聳RZe3m*R**B((/)4Z5deteTޏPxDr}jNH_d`EWbH˘s^I#Sy(u^ z*rp<Jv8ÇTTq Lr%1p.8+]bq['GF].k);ZoWE1Z)>Xyx} e^i%Ս!jjpp*C]fI/?$X6,K^!f?\&a3 20M6%;٣S ۔6Oa.P }R uσim.kpc_ ݸe+~A[DWe\'vR@BK_X|!1DQ0v HZ&5PG5/8@k/'ƍ)&BSbؑgB`4 F'ō3-bX($]IdM=k9>NGb`4ь/U:bXl)9jqHf'S`@ 5cG v/8N L,heMtiQKC`4%6xF5j 4);yc .QS&5I1L cOx0o1Ο*0D5e q+|W>PVƞ@/S}T^GT Ќ}uOn LW/NφɌIX`=ݪ4f܌X9LkF2)93R:EɌ1O>#`uT'N}-^vUǪEzqNT?= &(XH`db˔Ή oo9xNub- Q'8;TR5% QK}V|r]w}ɛGR.aV3?EBF9-zS[GG*4yV챦{NQ{i3(@] `*hY|thbGXmS2MAE8']wbXA9_oW5kؗOPǽ Obb Sk+R;Ywc8Y"ᚴIqh0sGLq1O$cWcu(NcuS:Mtecp! {K󮋎gg?FM?OL^>3&V܈&ڍ br? *1A/oÅ%k┢CMivK ?ԊNsS/}Oӝݶ]jVk9o.p۝u^ؚ.xa8aYk;멻Au{~_h=R6p*Gjy 9ԭY$,zv?6c?PSЖ޼iιzGq\'c% 70$j`~Qlj)}Q01u꿒kh")%@nE{q~)Pkj;vQ̼h< =舢ȇd9k۞h ja-&mFtEkc<;4}uzdfCwm9 8Mǣ#Zr^4,lK{rs |m9TSj`Z+HkrFRTSdG:rr\R7cWrQPT3 T\}^c l)2{81h̨6{b^yb% VV%b=JCT0CՇ()A}X}Q`w&tF9*=|lw3EEMq3)ǻ,j4*Yer &~NIH?evMH¦c'!;YggbQ~Oz1jtlq"n2S"xQ}FCfa6CσeuQ%܈ba]S`1rR:]o,)kuULG V jCX(d[a`Y(aP}R]+գ8[)y1λ͕ pmmid7WNxos`zNc8,D'tSWThcS&LFbt99!] {Π[#7teٗhk=$,r,OM9 vqM v` v7PEЕ?Wzؾ|:.zA&PHxE=BDl&U]JX݈X3bjhJ&T se/USkrK ].H*9ZS1ѥ=ǯy< pg[YH1j.'dbN2Lߞ3}5B3}KXILAHWyk+ =לc{gzo{;Xza +0F{X~mX)D,05, .SnX^יay%ZwW&DRX^N9Ub?O  g@g`153Lq 3}P%9)aCuN8Vvtp s~Pr~>~7ttqnAfh nWk7Y1C}"z8X#/I3{I*k5\u$ Zvt#لk9ߏ5B R5TX&*9~T* כo&\k ՄΩղ%/BOye5zf"RA2sTi`XΩ n"Be^/_TK:k`F# c 2\k @5rÄi_N; Qb˄Q F¨^(FAih| {,{;]"f DJH#6^WB=\Mbaf8x}r@{PB hn@B rFBV Ov;-a4U*!x%D:woU& L q:׼ba ̌G:V(j6T]2.;renTڜ|j}> dXd%4^uUk4U* UX)6CW+]6":Uޢ*P(}'pl| /y*;.җ.}җͥ/8=v1w k鿾oy-DZsfig~Gq7d2xO.gz侟ve3$ONd}sE` ~ɣݮnnW6߸ zL2tIzL4RI | kOd ]`nN*uN:鐪wRu{:v8@a0nI{X|TƓ'ty248Өf[f$_x6tO/E/?V]4PxVPߺw'aƩ!a5 .'CIKK}C..2`B?Ȩ o_B?说. EPEqt9碫48D3D^+EuAxsO։ ʞeD,FI{Y 䬺5PgiFBKd w8l|r4~ʖO^G?oҕXf^b9ΰ){.W]j$F+V+&^ d?c&x]<(Hݦ9l_믺j鯬Pe7.BU]YZ? Xx͏)=+%b1XMQ)=`*UM)r]JOa0VJON9(;+Z7İlڜ<~|gc`b$ٿ_N []hnnS%LӻP]qMJu3 ژ WqqL#x~4aOWyi'_>$gIϞ|}V!̧2>ęg>,) ŕgXY0Y\/pvkW䯇_⫫m:c\lt.;[g[+;}"ɘ_/d%IߗgN"F6Gu'@>ƿ")6ĆޥGN\^\fOVeiiz.Z+z;~j' t}ІXcG7˴]Fow_=ٮA(mMcهơg':_Q0 Fߖ?#AxR_/:|}ba3{#6}lr ũfKEd떉ilF Wr:̘-q3Bȕ'n;8o_^d-V˄BFBG "'S3iX0K ӄ~/[~mL.<]pH2abC2 Jw,7\z9oᴘ~NyH?eo/J qLOȏ[/{NzCA'wǰU׳jݘøK! $wt :rͨn|Рvt(Mŀ:=2JNc zWޕ^WVboAXJX;_9Ý.!]]. +_tpG*8}.gYc-K)J@M굑:HX Z;>ܻn0󦩬|rƀoB(6jD1N.J8hptI ;e 3pCwc抨.YFUz֢g-zbY f-d-i1$IvDsŜHJJU0%wsucNJn?.%Z D萛j=Xrm6RnvL=sorr[nLu7vwxo&FٛӴÇH$W|Oav&mN{c VXdoahjd [ KT`F}oFCtpU!: D MTzZtbl;+.%ҥ.]ҥ=KPdgu3 $x]n:]R=F3DO1P嬮|fu-O؂;|-`[N;]R $*BuI0MpCݦ 6a򀽩4..T$b Sh`}2uW:܁0e@9|V mK{-΀}|48ÍXlq =^S0f~m+"u7a*ШᐴT,eǷ~.Ts+Q䧧{`,9c@_{DLca*#!)ԾSY)Kufh#."0Ld =J,|5^3_6/eaCst } .L囫BWN.眧* *%`ce 0sǗNJ7DZ0ӭG&`җ3 h:}\OWd(1!է;kz>>{|ipo?Y%NW>i郹eJ!ޛ0?Ipƻg[7f}$'@zd҇ϓtP<{8INO^aX.w^/eL֞xe͓i_.s&6t&Km@;vi'لPX Շi`+{Y*%G{Wݍ,рS')I5IľI,YwmI<}MAaXu(ݤ\ XdHH46d(&-J{̑gԥ³`# ~pmk\-{eێw@I^$…+9K]b k8u3<|ى7uZ].V.o^0dl|O`[*c[xz~mMWO{v`8:VOj[ե:e?ǀ9giPDnyW3B1T?i^}gȣfcnGS{"\X>>G`wdylZGHsI7l=U;g}`=î6rkDzpAOo8zHWONƲ:|r.1RFrjSW3ħS)ĴTFo>s{4%̓?]O 7o_488c{lig#; :i臖YLJ7'1fE7ґ]]q)Jh8H.e`$~QQMҰ3Q9xj>>YK$ubʇd<J4C%?t=YBx }rd*_; TF+Ps]-o+f8^oJӸx`<ŷt]o}u/뭟zܼe^U/yWU+jJ6Vi@Yxf6^0m9c{vqӀ=q'5C{&ˡuo) csJN]ck0{ti 獜w'+ @=xR>>Ra*]rT:IC?]#cʝcrk%'췍u8~+ҨFz$Lݫا#5Wmޚ 즺)>4(}Ivz~ HYt٧AnI]D@AqB>r:".1Cm?h0Y`|LL2_o<) hA/>LV0!|ʋ АPLqQ)A5Iylxj\ٞ?Nĥ~w =+r xX<ZLȚe{ܝx;<|W>6\zSr|?5`/o^<`W[pP-a8=v?% `a{|l7Nfבݑw(;;+o}&몡%>o 14}v=YCO:ma<)HD5 4 -sJqVfQ٠E%l^0A^0X]Z=qVDor>ē{Trg[,/LmΦ_eivʧ[fE[GvȲJL7^|b~K[>;-xzK}3+|̜Cώ9xd Y<-Z[~6A)%`"YAe;Q̃kX@ " Ii))Uz2D_^M?rR}#Mo\x|>yH.9ǹ A-4!Hg AFiNx4d_1y(|~腶Q8[at:<%&1o!ǡ{K+M۵g&7Yd g_VIKσu, kVyFqj%J*}Rn0Y$?.L'qx>yɝxY'iBDAf:b(QX>X~U$pDAQ[͉Z{Sޤ8;cWvo/ l.*-){oH\\r {ctde\UU NbI@VΑ9z9w>úHC{Ẽy> *Q0[yͻ&"*)ylpX0!?b,sfYQ,@DyKKhH ~eō($6B(Waċ3,gW "JؑТcQAq&-|ĆWIظb~E^oγ`OҾ`=ȕ, ZFh  nO2bWfth7O;/H1m5ͫ3 ^8 NJtढ@I^Unq 3}_o(q$lT*~h2!߄4_&4w;h=[9|6y"|_ZC rےOsω`Py'!if8ad^-:" bgoi2\o@sQN($y|nCHAЈpx7ѩ&~sgp%+4+|:BqMmŢn*f'8D!y^I$ f'"OA(^(_D8(QCj.7t*Owa4VE\gnjHn zD Ҁb|T$;p͛+=9 LCkXjTQ# ox$A8bf= qxuH)5fKEAtyqx2p)X=`-As-N{LvFOy״  _P:i$j1tf'GhP;Qd!<rMmkЇCА l33Amsf'"j"^X"%-Sy^]ʢ<a8LA^v`^1Q=-Qnu$hjaM)?Cw{їDE K'ĸ!{[iZ*3MmM/34HָU343o2_k֎ 1M5[}feU\?kaoLTk\ Ji#Yfyo2ynp=a>&(WՍd*z2dR]*V#mDƢ?q-\T]qe|!_p1v@ Vx 6pI<4/haryEHqK8"[?f8`BBGa ]7rbaqSF\%/@|#\ZUH 0"ep}(.?0 #]3$ehʨVpoWy &spI8ͯnřjm!u~8FXSHێ&uP}m aGQSHx͜!d&uyXp xތ gl,iހ^fѴ3DLj L,S ,mHbM.Kjs&%,Sdf[fKl0B]Qc c,ٯ6g|4.Һ\u.gvYL(h!N9H C פ\xq"C0V,br>Sh+(sHwE<ż곭uKӬxgU$fJYʨymUԣKf{]8vL ||l[7@LJ7ajN IQ5)`'wrlaQ{jBO/2?!3Q5Кj"MM]EB1tJ\6%ԝn3"OAMNٳ PBv4$/?$ѬX9*!BdĻ%.AWYبr|m!k@P#sp_?L"sm :w|kq̛ ?/Opc\Sxqq|*2HN7&ߌYYex\:Psvu=1IkYW+ej !Kl+V"B_|ՕZ9l5VB_t7j01jMijhGjYj-xǒ ~j-E'j$!wK{NxCGJ l&(,P_U0)M(-}johѵxuHRuЮj^Ϣ8Ƈ< iszTT>~EWmw:ZM`6EٗhQiQY.U>+\b=?<ϒbB8o)l4/ pV _!O|^Z]{!Պ?|$(Z{b`8$Sn?++:`zt9 / 9/9d{ۗLJ=yrN?=:~{HqM ..;(ڦ HtY!U飨={]P7舦Q^)i׊iJhE=kwQEr?(.mj%V{v;WθʺvPߛ581Zj ZsA\/,f9X ;Yր@Q0(`Ej _ X+ἧMw|@pӆK/طK5j.Հ:cT6[9 |Sܿ9_*oGkeGZ7p @Lup)o@$t< ڵ wk.C}P+Y\ 0UM @Vzr1pcCv:Xgc.ecxU:Xh3FH`v~cRv!it! dT#$tg181B2b.m#$ɂwm#$^j!92FHpQ;\o!]g!H[*eᄲ}c!]2FHn#sg1śů=cm#?sܞ1Bn~r\"f Gݸy)}_tߥ:rRm<楔A[y)o^JpݛR K7/t K)%K)ޕR*R:ġ|̛2pGK8Ĥlt~& Ch"z(d>{RedF! 8KhӵPpA2ͮ^`:5x+r -4N<ʮoB;BcUP+9iAA+( c5YD㌌l>X|9b~\:X.ջ?adh&r^qeNm^ 'LŸT]pdH-(q`3^߷37g5b1d уȇO%iL`eסS :a*Ӻ v5.Pcח" o@sLϯۇiZZ.IJRj)K1YX{#@ @Di# LXҨ?4q4,w)]Gs`^W Wrĸ46Y }% Yґ΁LX'k1; f&{?amN^иSnHp:>}E()KԹ5n[3}X2/0fލT{͋Exh"^=]2 SOU}  <"U"G<})BDsJG:LG-J_% 2t$y :}C>b4@8' kyP?cHQ$6H (qL:݂im_3h!}gG_/p9]r2EQwRn0U|W߁EDlh~WDX`(7ÃĈ(O_w}5?n|{w}ڊJl~sƘ5k(,m)RH j0jg"z3D?zA}">|q6^= AͿD5>:yFެh}:Ȥ|Όzf /V/L!eZ;1؏E! +. kG7ț'FAV;ѪpX<^|~ua iAHYq7iz;(~0<7(R > D`R4IF(3E`4 QOg_P/+TyB$DVLYt+K#u X2nWFBAjXߜ1ӓrUF_0%1VGg0_q% 90a =&־_rg0$0d'~O0^%X[Y%leF XoLmhV*Z2'1ۊLJP}1ARc,h[PP$O"NԪ,$@ݝ,0=ɢd.K{t}`vg7;m^O=q=ǚHcSF']l,$@ߝ,0=d&Ybd%I o@;YIAl%w KN|Yd &YdOhjN x|//%/h:wEywi6"镱^|Ij ve]9Lo}iRx}PA:bUs)EYͫЙ?GK Xd<%dzW{F@yhJcʏ?޽GG7zl8*@ETeA B@D9T c<""?cX3<O{Sxt+eݗ sc иRO%F9 /r<(~?;-`džu^;(TA>ii9*jzOzQL(~^Z?Yw@ (_~?ޞ̱L-ץbâq^3Y:VҲ)ijIʚzh)vHs"ĔlVBXA[^LiM^q;û1~ 0|gLpQkP)Px o_y2ӏde(*2U5b~ ̢89=靌gi<ӂO,8PGLk4?06/ʄ |z҃g6/a |Z+ TYg-_8Exb<8hTcLd/T0K' l_Ay $/%].<<8B";إ㋃+zPQOSCVBJф,j@=6Pk|)D^ ~ =\o0["x0^Ԍ3cɹ?nGn|=fԚFtE S-$c3CW^쾹TBKn \a~/\k}3Q57n0|I͑-mRI2tX}MpDl~ qeZ{76AG!rĬlXΘbEٶX{6Gf]`4Nl[cK~}¨-tnŶ ϧ9Y{9\nuJ8LƣJ4z1Z;'P }򮵷dc? .6r܍|͢-IZ~,zHcqIȀaɲ]VWUԣ.ͨ]{}^˜7tF;;zyҡeB=L' pN3lI+~ݯ}ɿ=~` 9~_Wu%^:Om^.V:~1w؁o*7u{*՚TvN@h!+ZݗC.R[" ("utgIց&K}w-H"'FZ⻻?DŪn~oսW߃MbcsXGtHxL ER KGZj?3>~*+圫j$5ī );`9t#B vs-,VSi@rĕdY~CKS)ԇmdrt4;"1dѲdE ]w7&Xe,V/@H qR("1xb,]Ĉ!Pr0G~4&vTiPQR+d"LA#Kpꉑtp ʍ%T*("a܄$!;VJG#˴}l;u۴ؓm_!s(AtH qII.>X811BGE8j-\qTCœ IbLSUR#F-nV]ԏ#@uD"T#7F ܨݠ휵4^@ 0Ec<9+ʙI[ d$KǁDu+Po ,x{HQ B;e%b"GbBe8IBqSH"AHx^文=w.~~ 1)BuQ% @Y`$]&M㫩wʙL U{p(@1Bb$ZXP͇&(vAUb`紕8j]Ƥ}bHNJFGHF&QVɢUKRijX/6M5g)-| DF*OCQXP &&XNP}`TSS$I:J&"(r*KU+cn(L[M'8h\e*b$]J4 ӳBePkX.yfD{f2#m9EOjS6BM0u v}!J $I(:Ƅ!vkɰl|b 75$GI1JLFLklM,lV,9$k(+rq#97[F'"\q煩a|gX1ΜQi17FA\)Щ962?%+jΓ}hek:'u:=*Y Qاr{wWZ`]-WÚ6:>YF>nzb$fj_0b!J!iᛘUcĐv4D*[h9x%#7Ta~#$=MP{bLelyEb}`$N{M;iF4YѹXó. S]|MR=#0rvX.mZo%س>yEOޚ "t\C(U$ALɔ$b$ON0i6Eb)^/'Z`}2fLFL-(SM&)VT*hB6xmQ^pN045l;O#4th,E;Hm 1q?WfOHjbQ0B.ߴ̤D]m8#r>˼~g`9(w1 gɤ9p (TU,'Xsvm@Y=QOs[`'T z3xpD/9kMc:n/E&XJ >j\M<\M03a!l~ׂBPff!Guaf "uܐ:yMm0TZ]@2pl_|Rg'/ no9ڨp%t "VۢTz*r{y\O7nPG! 8~k3l1 .!CzM ٱk0Zmk.:CYOPMfFNlh$Hr`b%WМQS@H"1㯧s><54:ӊN`*K ɨ!FFB 1ccb>1or}Czd:]Hmq!iiy"cP"38k] Y+pSeĈ!!8{oQc|S|],'%babT1mι5k^>sTrS^l#O?r M55u1a(T4EE]I\v2y\nm#o:Om`|v}ZLGrM`Jq8|"%BpDJm2d:pTvImC,(E~XupK:-3N'rp:HFHP]t % 1 Q:]Du'> 49ʐ,EI M[F|;bǏ8 7i^1z|/J.1b/n&}$,+3ε&1+o5͏'Gf Z}~ء<7% ?f=b' jZgtSyC(' [p'5=~> K."$]5_wM 14;v>רY<.u#Ϡ FnqٽZ`&cɠ:PKb,H[{nc0&ӂ$DHjxsލoݵ촾 zU^6J9AЅ/\fKknqs)|fQB"¿RyY$oDCHji'~68=uڈfXa71cTc-49IQΟp*{Q_I )".7MU< @ :kIJ'1iw:"6Ko,>P@sB\3M Z.X0iZȋma쳿4Mࢇ@r ccNg}b:Ag^jM0Hdt`)sg^/NIFNڀכ4]8&Xj::BoXSG8u;-t97Ar q.4ί wJw+ZǧHLPd-9kx@ީ!f(O##w|c%=*3l;"k"ΏAOhw5R^i^84[mM@9Z^yRcƌFf@D]/ oP{7Žf70 1Si! 7CN0bmʅ`QUv/Sr^h16! aw|{Lw|A,M,'X$6C9o;BQr󭌜vٓ"4r]C+g q˧-Rx6(/ɥzhuKfGk0ҊNѮ" \fowkyQ#14Oq#)0%{͞ h&d:"Id">,JDAHdnF6r|6-pt\H" L2 RnHrim,]W1 jkESlHr~iADlW$.=fknqwL *C{^]D6Uب'+AvŰTfb=礨$8It)>4i7C^g1Nh/J Q2g$FYlu܎%hru} BITXE:yw6щғ[MZ Q$;UKTnr@A%ſ-5yme8B.Ɇ K5<{smL{-ׂ/݂c 7cXzVw`~Mj;7ifs0S ibtT]^2i ! RM3}l&^l{mlyKz-Xnz BYӚ4O#޵ ؃ˬ0⮡n,ףkr  ȧ[v 2`k9]R(X M@#oDN<[Xn[ߤvуXԧ˵`I_|\dr+v{KnvwRf;ߍ͍)zIx2 kqŲw?[[vY޶4Z-}7L|kr!u[kMn5ƓQכpA rey1wb/F{|]WDq6wӡz'b xYzXiH8s+e2uA1`|Ǜ.x=(gziBNLC-K:TP9_!w]!7єS}{^qɌ$$1s f_ݘQGS6F緷Hn Wi è^*  {z3iRMQ;藅SAGϭ3"uo(@R?UOuy8n;9xH#V.h%%嘌cD-%~_vhI)eօ>F5) K7ěEuzsmxp/( f|x-]_~>Kp>ҚMJ%NzKۜX-C?(kw/AD`9hkN3O7'gFЍzt|t<0 C42ǂ1ɦé7hXR;am*'a4 ]5 SF :8xP]w:PCPqtľ%q`tnπ~|wz }9'ik=F'{0\q|WG` C Zn pԾVsOnPO߫A'G0/=S v"k<g-hR柧O:y<,ihF)l8zŴ\SiXѪŔO[n Бr^eQFuga\Za0c^Ltx :!SpI& 3oOn:1_&W.]єnf?Qq*b ҋwAl" ,:" Kʟ'I#'?_nFg ?ՠMT/Y?N`Βuh'.UVҊڞ#  0 su`iI^?=qK$C1eD@6ES#F^B 'CEҒçΠNcABe&5цU[pjBȳM+YYt;S@ hAc͟/f#@f6dlOv`8x}i*c!kNƄ  qV 0i$PP~>UhqH^Ȅ1Tp5nXksrkfdj,%Y"2#V #VbI^.k,FXLs?+1J/$Jgۃ#iriG - }FF sOnpu5K!v# bT &ƄgFO*+\uY.?CX"A ( 8z̤^YqX1wHt*0uZh&+pR/o\n(|Ll&iDD-%WL$ GU=Qq~ݛ\RR%qO^ٻ:1A}(f-JR̾kICGڕzit/AD`ӑ磙-N_o!`w:,nem!e;o{U1>}u+p9AC05zL0Q^G+bz^q_ùN=2j (t[JI!%C:gheL=~~ ;_9:+0U>ujs>[9H1 |ͭ?}V'“S36_}g 9z@.v3 6n[eY^ˋYu51K|4# /K.R'EAkVP)"Zz/gE MdpXY`A. TZ|4Q8MjkDp{()ޜ#LoI!͒v ZEP*M:v(w1@OjBuinO7=-CJ EeToit5hKˉK|r.i4ڜB]/ oĿiOlgFR7k=q[X@-f}aӆt-71zi)@q@tB)DjKI1p0RL1C(vATPfB`G#!uZ0ĺq@7L8cMt VJp\22wp2>Paw_tqO)K}DK2> Q?TB$}dQsZ;-VնeTZDr2݀h)iՊnr,I&X;jHyy׌M`1(oܙYnkX n6`e2`)P0e6̱0YF'8e>PZ6{ynh@RZ}ށmT!)(r1)BzLm"wG[QdV5f4'#_# ЬU6B@D< AE)IJJ3ԓ@Fqt/Ku26BkllDB,i>V'Y.Vf0/ aS J2`p""v)/DBngZ׆M%2ϊ2# |( J_qZ'E-wJY.FZ&zin=2jS!K2hhW(%i 'NՒp,AM-x!kTB`r s^8#MK` ;cHYf?ؖJ-V`{ꨕ䵒jTT]~d4B mU&uXrkqJ-A+{ܼa8ZT}mJ3w9xsr#gm ܻŴE:SA!Tm9<PCvlDz(@flA& "%) o, BO>KBaL UL:fQ!b|h_ , sU”1D)_fk)@?Y `zC=pY'ԁ684(R@q~CbfvjMk( Be$\NA*JbwiB$韕sLVvi ;li ޳K g6S (s~ `c:ϚX 5 E^#9 ^L&^Ap-OO2pO2)szbSb_7wRg9NZ+NI: Tϼ[eAǛE.D'*=Q10( ,E|ki!&Q8@{F$.\PXa$6T5T`X@E1N*"JS"b|_bqdr gv!/:,RWS؇EOgU3;'}NWppG-ieKZjA+:-n>%n bOhE-fm͛SӱR@XS[a 2>Xj5eڂY.#߆l8gPa~J<"1!U4pP>?\zE0&*02Tb?6҄FJFVCK1?*F5v- i{bmיd촸A~ [=pr2 ׯ$WJteZg=A 턐*M.?Vm+Zશ րUZDePQφw͍0_ #>:`2))6|dpSGl*SYNUu*,/\\:` Etn݅|wjкCٳJ[3%?*̧{2i]&c Q\pr|b"} @RS|B0.)9!OF-g&+lqѮ炷$Y5 )_VzSAe8En`R7pݵ`ƽj`aO_=&gwٝj+{d8 4dV(4_'?Zt(lJؕkE]b<,֮ =R4N}}_y[vYmCMfnKPmL| w BT"{s;}y.4X;Izsq㬍C㋢ّ/\C]/^82Y/JWzAG/0@9~uaW26Xy.ʾ>F|_ϳߍM)tHۯm8v]XûYoe@?¼=w .Qj]\XVŠ߿Md:|n,w p%>r2w>o~G1)?%KõI6۬ ;70'@]x X{7zwE8ZRv80 *8 Ӭi`xn{1 ˙3۽˳i^ VckSF ~t۳h VqB=a . ?mVPnJu |Z/U`vNtMpYaWh -<~, .D:`2x`qp5?}0{br{uQz|PPf@A3NQgݯO0=P Q;zgYnp|vOJXr6Z?ZZ8ycpi +Ap؜v 5,^$|*ܓ9 ΚKTra-X8&*N;|6ç~2 H (4#8siXSm XHGS Ɂ&d&Ҷ5- ˎ Kwfւ-+X0y۹dKĨg )^{cjK ٖxL^`Zz ڮxޖ;A0-etXn;%@+6Xe>^{;x]Q Ld=b;(dύ J*="rX7WW+zrW ;Z&3fUm=7̪JZqwXդRzT= $-< 90sY{̮ٶ!ڝ䧓BvWɰ-*(/R1K~s;Y-whvb>Wmy )O ;}xti<u~d鲳Btg*!_I2ڋtBQwG?r٪7w9A0<focY%yWTNnf<2FM.Ϗ~FL ng xRSyiei2]3|z(/w}wA*O]KSv,p]|wN<|d ̲vX 9,O'G{[R$3/2eܐyq5Qz&A^uWvT H"U1o&Wg8n&)(670"Q/wǏ.a[nI0~ӤGeM"lu&88H~wً܏@{|c_r~BQܛ3s̋3ɥ~UId:ֿy?JCg+@G&cmG7'_J,CB\̺;3IeL!1c: ͙nb-~xMwF9v/gZxn"TKy1g򓁽0h&CQBVOV݆*}Ip9hNFe}nRFq,r5\k,x&Rkz%$39^}d$F^uv1VfBtBLÐXj4bWYY҇+8*hY%l# ͖o"M5ptn։>gDl4 5=A&19^ݓƹ_#d=Ey>/xc#F(V &\(4~V GĖ)%E$Qb h Y0B0"(mi~TqC&Qε Rp6Gy&q\t~fdG;[[J<G&Ct#dŔWls/6^Ekf]gR)\!f Cz_>C4PҊtҹ<2}1PUEJ16+#ZU.ޖen'A3^cהA[W-Q'^JՑskF,`$E[v\W/WEpiA7LOido-7@9 f Z+m HuQ eSх+ɠeL$Qvaxڦy?[S!\xp89_!V>~l\:9N*qM9mvN2Vuh;F lC, W33,I iXČ[Lj#}U7(Mxß_9zJ4CbY0yӔ۴?0w.&CQVs)'@r'>xC\{?#S&W@1q(%X! TE iտu]=RMf } "p}h,|#}.|16Q9sDx`%w].syP.㽷eMzlhJ @MHP Q-"*eqlE!5G!+E?OBtn%X{uJ ΗXJ-"ӾZ*+-Wsޗ\=8d++-tgw>$/ez2&(yo߿L2Y(uuQIwU\tʙwLn\r?m=L^31rd%_.K`I ΏP„4bn[^3O$V$̸,y6W,8cG4IpD㐣21 dhD Wދ F Yh6`֚ hcN5|{7ON06PI" ~M%!REg 5Sb!ӱ+LEԉ8Z$C Ea,.k>p َbК c ' 9&HI W[ "R &A4Ύhyh/Z(RT/GG1{03gU&,L|UAf\1l1f~VQgz;_YyL}}Z y~zj͌FE俧"1!ϐ]]U_Uey@n '3pXzIpa^L!h&-e-Z&?&+A^7M,&q7q5ocm#!߬rퟎHr 2;Ӓ΋Wqv}vӤҔTQ6C҄N[DحLov˅~PxW5 Lcv RB\x7.k9@:%h)@Lc"Mn@%P͙44X/9 Ou<wmG*"R("-o&ۋiL]j]sp@!gjA9zSs刻 #Z#H(]igɐJK!.ߎUM og~/hG_!t, q h,وHiL+&_ Z %G1TqQޡ|9ˌHNPa9`8IGfyVmt2k~Q`ZRLa7{;g+rBIn{]nLen׬1"؂$!NMBHk~1/ͽe8~in OIpFRFj$c9޵XȇnO~ )y# SEYݕsf?% &<jF-cc:TRM)!f56(萆(PIY\5qňDHP-0ZEj%ht"T d섖rM8)1Bbi"B/БNǃ4v|Co7"ʟA2yf#V-}eʡMACx5#RǏp6nKۄwhBN TbiB3"ސbV2JW0Bk+H4yk[v #^zˊ[x !APZ}AގjrG?|I4R^q(3Z,NIzڮu@&plm &ODtvWD6@CdEj֫|CI5-|¡)`Һ*bEn{d5yd8Ozk3J#5]=_Efe*:-1u!Bш5I_0/p1<s|j>Y7Xqxl)pa?9G!uذ}ς< Ȋ+b2A1ܗ HjG )݈ \&ZQV QZA--wLaPX'kwWNAmME-Ծ /~C?dKYǠ] Ax5>u[D>_<>VEƅ.zwa1%MƀA#JZD_kdċEX; -/Ñ4c4b%.ٞg5k|z#u%78?p S=+nW )G= K<Gi!iͲf|yfraB({u:xLլQ0&AͫPB!ʌmNF߫ux l!o@$;IVS؊gq`H+H-ҝ\/&+g޾>x{=pf#k#lj}=kղ7w3ٍZ[% Y3ÝRA5!N^_~tC{|p:hςErL@;%E)mכ쉻GL~}y^_ޢ߇ tvy,چK7cp/gRBeErnFCw)&m/2W p)e29!_ 1y㖉>F} Kvvǽ;6Z7XVTPAF=90EE˶j{>a!nfT rٚ8rnY6YmKإ˙0qC}TYV0G<PcwCJZ L&չ%a}0ca,\HU n 0VR4€kV6L/\_Ʋgw!_,b 3?mht3E+4G. 7u ;Qb}- Tdwהr͡^!9-Hok 0h0tXtBˮl/^ۛiUǢjiӈ!`jb8Z;ԉdW*ȷ`UR3IS P`WJ]&y}̲酗XH֫sQFVLM.ذ ׷ rǵ݆MvxagÇ,5x_nˇm-^bǨXs2PJ"x#S.{Y21&Y!b{^V?!6k)XeRMGyhdڻZh_W7 R̰9^G2VGo)g1le,2K~5@o [CtY};>R-΋82 afiO? ~9Ĉ(s&@Bj҉m{DyL29r!׾g#wqC-+CDsL99o)sR&PN8 UCXm=2T~HG6R{!2cAّVƚ"4\iKi?[}8d7qhd#>4D+а0V@e$>Ue9}.h?7NhO^|.ZFr d. ^40*y@-bN1Ȝ$}DMA{iSXIO0.sIHXLfNa}n.d%hX P0G2L2[OGQB; C }D{():!HB$S_jtz.1 qa)h73Ym $.2>,+d-F.=6;4txJ ,rvGq{\VUz~1RԔjJ RRdEDAL:4&=@.x[Ԛpo<mÿ׈$#B;s4nn lsVMo`#DxDTVYW.vfl%/0phnZ7uҵlZcrDt\#\,,9}6)gѺlXF܁t X}R1ZsC5ߙlbQ .(jrb$l97!JנZ6M)NyE: 2/霺}cO7~)0a7[裏\(͐+Gd*οNd!VSuKMWG,x[ 9o| 1tӣW:RFњ[OE:+;iboϮf3- ;@0kЭ-:`<8Cr?h4 ZI77(с%t.i9ÑczTVj3VM+GIČ3MW[&X}y'!'+qHQxL>9$t]Ą. 2}E| #|HCj#EN`KUCِDOeM uv솵 ]m6cO~ w mh{oěz=[Jz,^=xM^8=|Ug믿DOHyBk [/P޻fkyɏ/zyX/n˻n橛?e@_.L{ݷZv=]o>~h]?) :_z/>F @hP{I?qÛwUa_9˧p'F??>axaf~΢4$vϛS$=|?\MzNkPeG7{WƑ c꼏Y= <[bVKod*UdU>Զ$+2#38f y]I\Kp4{,Bbd!D`X) )h6:O2?m:/ˇ(|he 7C+L 8kB %\GFvV amlcxGWGnt7C6"®fٟzu/gz2OǓ`Η#[[Jt+K|l!/"U;?m3l>h?_rOzoG?Z֤~{w3=u ~~¥0;0=X*9;Cۼ_ PYΧ`74jdz9 t;Of/6+?e0 4}4`e`cGMW]hLk5W<*o;k@`ԡB48LXZaC@ :{bM * N MxRd7>Ə^hVb%8F=PW庂5[6&s[$i8-' , CaAі\9|a\vO24O " J2ՊQЃ zGc( ̻2ZݍF#UL%pWNo1ntav+{ɹ1Ŷ zӋBpNXU-nw ݸea0r/i{QMіѐp`74W G\h, XAӈ{s|f _)p31D2aSRc2H`FRa6VT PW!p)~ S.%@*ゅ (Ė`<9*H-dhw7cnF5,vi8?%YjFo8"pLaCsZ4 E$V #EC(r,op ;𕢇'z)J(< J;l"E-VV:?lʽR^SAgz w0xT83b@/=$g >aJ}^| "O>=| mm r :qUf*`ܳ)mXA1@dK'W&qUN,.jѱ5XtRr+> |!> mZUObw/*ʡYˮ ,xeO<+:N@!MSFL)UP$9-j]=ib=~ps_aL9b6C+>1zSٴ5F([B"J@jpX?9*6S_! $¬bmu-ڱK56:r%+5Є)1%X+"n>ƼZؚ6rn/G90"6 *9 $]LUgx\Uӻ4VtS!`#7728˟ sO~Y9:~x?iQbh0uk `n]XK"~_YY; \x.,{,SM,Ռ9a(tеgů?E_s@!~)I(.}sG nAp#nX &L!,M\`[pMQ-]T!rU_t"#r@ G7ȅI9XʠGlu{J·}:tPQV׻_Mfؗco.Y)Ճ&!Z0$B ؑP﫣>aо}' @("uh6X5X/7UhCM_ ڎS/* ID2vו]AUoF_Ht?{miqtޡg~`x6g}wޡs]B',nHr 8ܗ\* x,>묬.UZ]UԷ EĦH]$xQP2Z/\Tr?`eIcŪmgZp9b=|d3w(v#]RyW&եNNꄓgRCmҖYS*Q]_z _Nq r|e;؜T[1LѾB9MNJpH.(]aV. kL-|-W4Nes'Uӳ6L[4T4AYi-nÕ`X ((嵺`J1P3K*:=$*mj[]e>I[p?Enw]GU^zs_3J;J٦_%S\D\҅S8U aUnvyw֡ÎT/Oe!xІDK5]|9+;OdIc(Q;gvP^{'>Wos爋S_.e]Zũ۟[?Yy>0n&(4nj"`Qiٝ[r~8У) "!B~2œlf/EuVO5pqB{gEa΃BƤ1FjT݉-e! O] Ů{4x (9Z<֑~DQ])aOehVס╞!tzqClĚ p 82NcJS@{WܰlYCvZ]61xAV4к:mZy]%ԥ*6u,aEJ`1K1M/?cgH 0S BS%R R$:Pb!OcDC1|1&ɿs4H_)@Xő`;]RMuxg]FZBo^*TΡsfQ!ZaSsp$Љ SNmC'[.z+r+ɭQԊ&4:=8MOvyƸWUrDyCh*dP+Ay7>ݩ4"N}~4\3U]U-ftwTssyz^0MCLoN0`k1ɭpU1.q97߄B)=8w(UzO=:0rFa07qq X=g!W@JW ߠ+z/5UEv(rT]dQeղlx"u5S6<&0eR{vKav9AaXo་ z1XWbWp!ŮH݉ݣ.w+|4iu-36IB丢t=Xj^g~>S=ζu9N1bċ#BV,Ւ*4V^ƪ#+d7Mu"_t£ĕ 0ݘoZ@CgK Ăj⩕gpi 4*li˚T>7ݬ>[q_do}B+ҥ9ϑPw}ַZˎ[+.˗Ii :0-'EP+0i2$XPVȎݾVv1UQA(~p)B3awV;bB_o=.Foo&saK),auϋW\9wdST\{ [̵77re˲E{ky3Y9.TG}]jI{׎ nIr#JQߢp ܡkoYnVhJvhfiۖjtl<Mˀjp{Kby'0p ]䑜f])FY3iE5a: YcP øC@L`i$#B%ZS &3i}eU>O=y\bt)^:Ùx[[GQU"N *<>_@ a`Ĉi!7Zd X,r,c3r*v0rUWAn_q%Mr.G@Iqe"S֑-NOf<0N}m?YsOТSolf4d%2ifXA9&N@hbb2Q<&ۘu J:e{Hik@adL:U?Jz!"IDE&Cͬ" eM23'L@`MuV|<ûlNCxbIQ't&xOX8_O8|ywd-7*q+-=rXW +Y"xZTH&L|8@H*3)0(v+ʤpFߟ杗 ]+oOYl,&"{#cSMo=萃Ō&iAiNR\lnlK*!)F^_K\<.|xN^v@F BGS kM=wsYUrYϿ,8H6ͬP6G^SℵM>|e;9lZ2.!O/Njdu );t;ihmچ~a/Sˆg1k^e!|*S]DCs8Qо6>! ›*f:k{1QD5G(YaȾLl͙ 8'aX$==+-h *YM ϥa(-w rPɇKM:.'VuP7D|zܧ{bDVc]}Vt)Nr$)IF 62b)|d%rhx4Z!6%gsf^vZ΍Þ/. J5.hװ^'4xurHdkMR5hFTct=Q"\47DJŝX Չ}Q/MY6>)<6tФ!b1in3Wm])C e8S.[xeznmޕcjI9v'zI`LEl^n?Rr|Ulð9Ԍ Gr'j6Q j^Ixyx^fDhIbqwl_YUT!b.0Om]a=B,6]ŵlzH_ u_`N"wi/REqohlT@c@ji {E1V^~>eϊ1Ü71] Ț1v9n& ޮͼe;ˤ $ŲP%=kWtE U$uR!Dpq& c\{3;!Ƿ%Գ88"%Bp.@LXaKO~ճ5o9rlB B`+j߭R>maOB4գ8>J\E5e29 mp2Dv5>҄y{ V*acm`TSS$I:U;r5T9Ml]3J(Pp?phZ ~pWՅǹiB4Џ>:TzZxm%8j1lȝݧ9%5t⼡O!=@!vu~D_ 7"jW=uc4=sWnp`3n~:GX\q3Z9xMW$-Ve[v-x1~}}.G3$W6)e]{| aozqOHىwBaB 빕Bҕ[#X>:JK˛i=|,.\#-sarn߷F +swQk~Ear5 4Sv9󅔑.񥍔eS3fg>)zPotģa4F w>Pw_ Y.OGk9gھ}>ͩR).\ #kZa0_ Ӷe';@ ?A? ԏ$u x(}W`JcUcoPQ `9k|MucC!>rٓ貔b ,$e1>M ng_i-V'i~ݶ> |E;RX4bW61 ј1-X30MYRis\zmeP#vNaBP}crMz#빛DbL cpA D*E$Vps>r]l}ZRkװ?o%4[N"H}ZPA*Pz &"q3N[@Ӓ#l}Y!/`4OHqD[*ddUiǨOuJyST!\&_:rfc?d&ń$?kfIF b뿓>ˁf~j=Xm:ɜ ւ{]%4^E$3ElQ&50ԏ4޵%3a'"whr&8omM \&BsFcLYGVDФl*suv_*' cE~`O@O3l=趡}}nPV^`]n " R(#A@b,(74=: -cݴIoWf!tvycObNNwj-Ղ)0)uV>[]xճjh6$cldPr jBx^<7^.m:m*my):6<xЉIk}G!8U;llf'Ȍէ^ƅ>SқLzp~_p^j|ԓ)^4q1uNEVJdzG}o^mz??-&naSέJ^׿<)TQȃ[>C.:L-ë9,o8ȻȤ73$.j;.jeQc5EgD d_HgزSŒ2Q@8]?pZ}Q2Qd'1b,d8GfU/4Ur,s] 6vUM&^UzVu aje`g2Z9>÷Tu^Uq̩ٽU qK5 c ~'O_uoXPip{V a8rђbQFyi\އ'7"ޡsé>%X\Tp9˷f_PN2sW)FV&⭫A6mm?YTC7?TK`K'eKԲ P`*;ͫ*;0&~Q\Y~,Gcf.V_uo~<_I}%vP XSwC 7-*I.оf#2Ym",=,'r-7P!-d@_Ch=V5l@H1Xbu,VǺM޻i[Y6஁t/9h&C~=whSH\ QPعNzHJc]+!Tv\TȲø۰P1+Ֆ2x յSi_J ΑX.4s˃[U#ڐu-tUw'*7^Xz"wƃv҄3 ˙T rr>ka} )j)D}y%aGޢ$-:|nqktҨq8i5űL'=<=DD#.໖y<6 QH^ⱓu(mnqڥj, B~;!D|%+&rR»l}Q)i#ӟQ8moO`N P̨ :2#-+*l7ɭC> C stZw6ZswF`tp tHK5k!KMɕ[ ?면<fwνXw@?$t#.$3kOR&LQ8u^g6匕ZD4߳r,M:IiBGj74o<\ 6U-~WhPVSE$!+Sl.$H/iY|},L,^k!0InJڎIxu[Q+P)xl(S*xBMʖ Ā[ѥWrhf >||r>e{a@oRKTznݟwm.ZF@-4I ǙP%=@6+_dde볆ynfh]hLZJRqVmwN*/X: C7ݷ`FϯC *y[aǾ0L`TFn~>*4bTS RMLR6hY"'gCi ˭Z8@>|pqU/tEBv(aFfTƅ:&4' m`iM Z]$CwX4 aF*`6G4)|w'&뙕gwI[2چjtce m&H(ڕp QGUlwZ^e 22OM҉K:MNx}sJv5iZW©V1UܬJw1Z>HTjsF=eNݶrl:ͥ.!Rgg'GDxo}ST=5\8q^puFHh2hvJ3@ؖڨADp:t#҆9#t1Dͤ%rg))I&A8BC%\IE㏍\Сm5=""L)\P3$nߜI*vubJL踾`#MkY, ۶1D^rtR4V:1rIJƥqZΝ'iQsh?Noc *sSZԠuMIL7 s!nӏ@,WJ'WXr[R,>uY Se)S+_ɷe(ղS ň:sQrLmB/}\HݝQ[tOw!Yj\T=D(t'jJhdedec9 VQ*ҝ Q 8 C]`bMLO66YmF5~5 7\ٟ?F…9Z4B70)o8xHgA!6x 1tA*=?]w[ a&GnCb%,YUa&uYBv/-ei"OtHcHtY$}89uEQm9Gf9y U\V*Qr9'`bD-@u{Y@2jRن]vЉa $n|r,WS=5-ǷKƢlJ4f4O-rc.|}?񙏨a4l_np!+tT;rQl*ט v{!Xa$\ɩa̹|=| Qm1]~bT-H hO? fp yftMR+K({;F#[ݚkU]S{E-,ii 1`k/ %KUv5 4عAXb&撛Tzd(jZefc (,7v5b_4-O1Y7LG;ΫSܸvvm>RzXd]Є9v?CiJ3Qde 0q*ɚJ0u/]2b\.4`٨hiD}}ʃI86e@pNeK=+P);vໟ[1r`)jyR9Ng.+RR@j#v-(僝ncSJZ;(bxA#8KQ8EI]@T bj+kJKF"otS=" HƗ\w0I_O7eq(L#ℓ ub;v23Si3N] u9O2YfC;9ds8dČYDMR39=협Nb\io Č]w͊m5Сy$xЇҷ$;0FD_;FLN/s;c'$herntɪR{{d]dv>a쾛O#.Ո #atwWsnn;;^[n7ŕ3mdN{/#d[9 :Gavņ}nekz=ifSBbκXQ0 7s%lk Rycz{ڨ ٜ47׫8*դ\BX+1$Sqv5aaֶ≡qANiĕz9bBRjv˜ЁePUEyr2<~ ګT:7Tl~'σ:3i4 ˄:geNtkR{ڛAoVp䄯h*N\#sP",8O=ѦImvQ+iuϳ .8<˵3*ȊI QĬ 3Kp)69_4y&L'ɣK.[y̔KLFY3keFy[FnCu@5NL?9~8b/Q95/f'͙_'bRDDpvs4 +53kjʞ%#Co:L2nt89\:΢ -5af/OMx~\&8%jd|_=*T*YYɚxRXnkU4ip&oU߷ң"H:Fwr17#'z-Ę\WdƯasEm{cՆ AB&|orwzPBQ='iXZH| liJ_.wѲT ؤ1Ju(Ղŕ,0Dg| b)1j27/@Peʷķ+rKmjM0umb݈tFRX 4d 5Mpj%#3RTDLJmՌJ|[!4ӄb69㭬fC 6KA睊P!#yB͹[=oqц |?oQUl U@rHoM߾I8.@JK34A GsddaYDYBI[>|)?^4-T0QljFA?^Xkr4"`DT!y\jr0{,]}WV_`(l9Hh?+5 ^FXI/\md@8@i^U^PV=TbZ~^g"'ON0.z;+(<[۽ThToIrն(4a?u.|G]=1clXoJuIuxܻDNKh":+_ p1N$Fj"BB*N[UdFgN {X֑_oQh-0'm:MELQ𯘐:{9=2G$BSJtQfi\DDҚ.7=1$BˊDN["FZZKL|Iɯ,=m ݵ t1ԣg̅}E&lTE`a.xQeUJ3x\>Tdv_J# |E Z`?be[XZe &[BwUSWzOh%]3bQ 8q=GA< F/eztІ+Q^,2f1N(А YH<Ɩ-t=J^]d: qsJ'65ՂrSl&jPFEon3їOkq(+Iwf SGʇ&*Ed_*A~owHDiA(,oc84uvó7PYM5g,>H޿kJZQWZyP=S x'%Iet]UFA-ݼKS7RڭPC$47Bsז d|f%%-ZxeP'eړ̄u?65&݉[ge.G{d;=w)rq\їr_}`Ge!/j|U&XKfM >Z"7`= w|Y S%wEQTi[MwmV L_QG]Z^5Mʇ# ;8BtUxi'?f݅Yצ|Ɋ{xy`VmAp*-$J6л/̏sM8BtuE<\dJ[MDMKn㓢'fL&KQMa3LI &av}P0S[ C` b*(v558t|31ܙ5W&uŽj5ZuP0 & Ը@L$Hh. iKOw53]S֑Τ?O=f1ձ]q=oTˇ9~=}<4Aum q*cx3B38`G85l<ԍ1w5{`WG[8;X`K$,uhx%ˡ%S[^-/ͩj=_}oZ-OD٣=nvLСѽ9Sd fhMz7`'gEW|8|pbwCa#u9Sڞs)_BДMܸ!ߣ4L}W숀uT*̝}\VŒ̤Ri)1Jpn1W*y2j-krŦ4"P%͑f@9KZo?{zh]nX{/hN5 jy}[:pȘ']Z[jEyۗjvMĖźR\9V*}INedeH/!!1mɮskCߟ.&8]7OFM-`D $WJ瘱*@:5ZÝJV{^y"|(̑*vF+Ja$Jހ/e~u/Wdl>r6ư\_g 4%ʖlWŜjO@P.,( ʼ*JEw^FVn*#?f`Ua7TvĿL6R7-k`VƇ5l4%Y~olWTkiAЯr_(ط|v~Ѭ쯿OC1,L2Ǭy窼L>Xъ_kLaFNe3͇Ofsv>DZ^2 , }ބ/VIx7LI5Ϝ5.) ^"&ʒe+Z?W$Ǔd!2T; G#%G f/‡VX!8 c,*B|!rʩ!LC6:)"-ݹ64p\p>KfO!M֝|LV_H&v.q)XOqi^4 ;]̒x-f9g35?;m:|զ*5?3&jbN f^O:vnIwYWq'LIq1] "EI3SL ^",MGL\`?p.r+MU|χyY4z, jS<cfHom<@K˖JWse!v~׿3&۟|6=X "c:zYο6T+B15"֚F fkC'ХSN: dB>dC+CcQ/,f~ 6[&bb - UZUD-4jP] _:hcgM"{HR>wąį7ue,}2+[YEs,g~f;9qӂi/`hL vt'\N&GBzL@Mpm.5pIYi BI&IJU-ޕ6"[~{_p_n?4O"ʤ% tcOcnH)fPZ/aKheF-4 qnj )q Id{JK5tY%4-4*(;Lm>1 @J-be1SZk2Ns[j¬=niR>ԗ'l)3ӂsjB]+ !d(Ύմ.؍B gvN)v Nn626qMLo$?3;g0nJ6ѡ;]R+|(<ݒ%*|\ g8Or ״j`vS佃1ͪ"Q-@>Ak !ıʩ@W 3 K8$&6KSlkO)# U0Ɇ/XG.l!>tIMA#XW ) VF<j5lj~B|}l%9ʽ3=n57Mv6}jN>. k[@eQEжd=T-& =VC7DyMuf/r2@<* ;>D֬й +Ғ32{Rם\ƎJ!m -voRC(B0%lho}ȉ1d )w'/j8;"[%Z-A:cK~/LZaW$,f mz2Z0>0Z0W3>@{7 RK:pz]J KUʧt2T>ul-^*gYTm.~\5!,_YrʩeQ35ߝwgR{{.1јCM%ٝ̈&T)S#fəPA0ՙDIUn¿dG1 zZ̺-.))x20?{u~\˩ZEOww ]*Zb,^#ؼ5v3Q5kd: * 8q!Z2"X /ќ RzǵF@4ZO5 KQ"f͓}Sy>'rL& j&Fx2,E~ q`!6.)> (q'b#&*Ʃgle S3<8\Fyjp|%Lk%i)1ĺi5'xyFZ֖3{̪F>0R |reY\ ># ҫuY+fr;`FY r}P7Qh)UV[Aj_?WydKu@>HQmN8twAO*ΧI<ޫeF_&ce]eU/mΡo{ow?C8mf-\\go?G9c\YfVg;bO _rֱٜLxaK4WhZ:W~Դϊ:Ҍǒ|cISK _)Eҗe/ Q cc%6&҂!|8J=߂8'H9͒߭~HիZ2b7i?<=ɀL{{i͹}{'"[oBy \ X\ׁ <ۻn/br^Gqrc?@c(rxP<;==QنVk<#d7:OEtG)eNJQ\~\%\L?{WȑJA/;q>x ,^a*)ԶE,E)ɪe<زGFdFD5fɹu<ӟbS(^ dF lň{̑"ٻZ(|M(._]Ab(3`i2=P %. !bU}(43E0.LQ5E<DZ|ãdti#9$Cuۈfɹ6r<[2SkМFj=&ZG]n4C\uT]D2Vvr]~)i1Y/uWwJi1. ZL\9/*@'?[ЊbG=R}Y@̧nkb?~ oQ~2X\9ȍƆ0 Wi\ <' qkdqHوݱ+-㡴XZ,__rCiXsch^vƼZpBaOzlLGu좘$nE0-Ҧ\,9[Ǐu< >BWz)z- %8Q9&+򗦒Cb(SтnSє,9TuƔʰϤk#B=dNEEPg HPuD;>U5R وZ*e#4Q4KNui#]8>k8r-qQKpAX)8Ce5 >=$#7`(@W%g?s4KKxH}tO?Pd%I#gWIHlEE0 J$hP5UMU*QtFB+x:FIdL(^bgtbsj(x,9hPw/2*|vj=SX;`@3|> Aj‚[ pZQA&^Ʀky.pN4^/3zz"׋>s"z6$^8S, >~8`8j/C*Iu)εN{kjE{xl?`uvIqzs2X=' 8)'xԆDkg(bFjj{ԲctPҸ?gAYz̈ 4Ƚ[XCBUw0uyͧtVbI 6ԚZ#Bk(FdwXed$I0('>\JHKNRu+Yu6S*Vm;<&jd,$[7%u\vyv4kUtx&ouo,uslM7+4G/4BqbHS&D $N۠煳kZmK`9lo;-A1k@&uq{icud؆j OxDz@I] G\`fMW.,x9|jn XM>N6FYϗxO|'Ԇa^ =llVߕ=цˡz_v,\ji?>P~9{ R'$PZ`QΧ0@0U&0 mddGYǸ B7 g8ӎq{U~=y#˓7NDW*ƅ` :.}*J !D* _Ԭ3zs]R߾OWr6(,bft~D~ u7q/=q6jw __r̥c]z0?!?bHdM]S!6EyU” eb 9Q꧴yvzx-D1Ӫ/s Yb1O7T{xYͯch~]BokRb\}I5RhlkG$f\AHEaY Ņ @TKQ3/v$_;2}$Oщ_oM~@7y?[ȤBNBw͠P腵(n) 97Վ#ch# {V=d$  N0e16)-XYrvѠ.c`#t,b'ЂWm,_L/\k.5kFMQC Y땪0;jT[5#aRB* kdvїQh/-$^?fg2c2?I/Svx7I -!y1o;&/R'#MHQ=JQ=JQ=JQ=jG9@*f8H?U߄vNNNNv:mמakO9#J9r$v-x,/x9Wf%;{ܴ($t JCgBȡzHoZjK^ %ntGׅx[>3zZE*ʸ M])"|dJ2!1H`a7 tfŹvqn+*a7omV4 饟Nƍcbi_Oq@K+5,n䝝_ 4PثTghK9MxUsd3dۧ`nZ|RnV}@Z0oUK$y/nPQ2ft?͇.%FUa_,,FJQJF|c! lA&5.FEqr6jT"~W/o`I ӁQEwh6Sh!tB;OZ3?Z%bMAh؃Cj/kEhAr[tct @ G wL픤!չq`h=3I-}˹"M-gl$_HfO^du, KgC-rSUHk@tKڄwڈ(}K V[#2z E\n4.WHF![3Pj ~V;j|3B{)+{{`r3Rtx/[H?45;"ȗDܴ\NbrZI| > D#|Jh,"Lg4K`?t|׀K*ϧwPQ0y;?l?C(zdfMTভaߏGfvaxgPPԔ^}28Zʲi״MT\E73/I72Hs\ʟs΁ ڄ@僟/^|C%ؗɄ$w΀aeY 90_^>B+e#u P-+r1^ϭyY㕳^%C)|,]Hv){HA(ݥl?A[êTmd4.ImFpʃk/ܩa#x3:PLu><#KaFtN'-zu]0 κ~(;tHw/'Y:*?>[BBW,n > HC+QjKj#UP82w`O/Ȣң vS١Gw4\ ?QłVБz~,bIm%.+ϮՄ>;Z66 xɀχp!⧧??/2hVUqހӞ^~v(-t7=YlO:$+ѮtvE`Uf4f, s\z[ f@Άs$={ >>hɾ3_ x13g7.աBY89zRƾaۄU/~ v7Sb:7*PjK  nHl _aS7N13Ŧ $# vn 8MoHRjsϮapoP0L{ș1I  Z]9GD+M~_!w6ɯJ9aO|(58~Ax7:~>t9"=F :B TzhӚp?$9fvskZE~ W"l5x/ۖ =fM$gMۼjeӍi߶;Y[?&..Ż?Ɠyؽ4^Kz%F>}>[GKƠOIEN V)F[hKЈލ˘߈OW+T2d@ofX?A ۬\\O -o] Ԝ G^#+CVS] ˨ uK Ր!Q%W|%5QUoG$n,*fh cF`_?~W/&iD֤S_  Ʒ?PBs[;>c/rSTO4~2 &I֎UlBP}!SA. ux^}Ǐjl2 |ՏOg{ee꘽Lc1DOGʪ1XEiؠҜj"DTX[愦O9j~O=,O5!9s A}g<=(|b j~[LGjz3?>[sOۮy)>_7N%f4Rx#`u2Gj؎ӓL'7F)B#ycT%=!{a+6Oqټ>_4腅Lw7X>ԅ N]nֿ\j9t7Ҥհ^ .#ja5EQ[/L\-PmA[bnx˥&3V.m2Un_{6J*Z'_;]6$^Ԑx/ WZc uٻ6F>$[C! .{e;B?-Y)d=Cq8"R3#W"g]jiD# 2MLZb\ەP'\ەSAC­Qv苭ǡGfTf1^o@KoV5:T%^e%JJ0(bJHʼnB %*KB]AW} ͂{U4h1eQD!#5i!lC4BC/v|B2 N0{.DD2 I?B%x =0Q庡N­ qUnBm)hZEWJ#;|դyJ ._0^ɞ'7 eoNp*J pj,ܓMDZ)|]k:Oo;I f1< )pa HjP YQBI%YU,#BV^ٵ:MLh.g:ㅌKj0bZHĵLBHPm]رFYԹRlM»"u^_=h`vؓ}zC:0 !}Ml %EkIoH2Zh* Kaaø4Å0~PWa^BdW `3z58zVG契1MK0%/"@#Anw"{Ɔ5u%#?|)Dc!-hsՃ-. 8'\ԱW*O&_ߐ.n{Zo{}^+@83Xa kGH>2q|2} m'K5$0 ̀/v4b[%)KԮ0,`@Cn ƒq%!z  +KBߜ=n5B, !~sd+x2؜][<:,{>=}(Fr$(XR$|| gq>*L0]ͽeBlӼgP` r'{ F4\N7FN8)Q ~:^*]dy搀&=$wiK)J":sZo@,4%Ch} \Ih0@S\ h Д[|kJ>k%ohclrfjirwj@{5dZrEO01wIc =$'(i̫-O@d-5ݕP]]n77~U{? G{Z ςi`ڭS<̹}v>(}3xD`ܮ6~7)W 4ߪm PyI3`d)N3ă9s!WG"VWd^`3s6lJr"6W,Åͅ%wao~la0H #M}y^_q8^YX\YvGX^U 5_hç3, =ΗTox5s .ƒsby[qgv2y}~fd^MbX}K `_5/dIߙLGm=27NZ$TڏufŦ]VZ~1dnfq Q7SnF`^Y*  G1Ki'F*7DH|‚cèę$L"j/ 5?üq/ǵYP=D18Ad+͢;!Uc[Yb+HC\%gګYs\a(98\l ;an_^'+Ż'b9ޗ؆Q?bXIfu|=T_m\<[ ln.33|˗;+8xh^˛53&yq ";[z=pfގ9︍l:UxSTQp0뀛GY q2u:_vPurbg0/_M"x F 5I '2=tX JUFwg]-drTR7\Lo8vrLELBHS)rS xSO~@d4龿,z !p3SnGrUUS3Ko/;G2xY.(@ jev)in/޿wbt||hYSԫvg_ʹ{m/_<<ܷV4vkY^ y_q,_K) \!c<tԈɋæ /#Bj/LpItKHǻhtr!Y \ sߙ4"*%%@̇JH8\3tYs9fNw; v_w3ݷ  5R񨲿ϗ36_L/9/R9r&d-jZPsmQIEGvL~K^<w_/)iqL<%džr.9$I6F-hQ3Ny"1ERmYݵ{i_ϯ{g'>@v?(eL@0A'R4ѨGbΗuJ7,3)3T%9ʼn+'ڗO&$I+ ؎) Ѐr}\bEGbhtTDWrF#Q uv5r1`ʣH'O"Y@%2"F_G@#ns4L5lZǥIsኝUXQT>s>O!T?@4qHx{LC@2FYr,)Dr %BAy·( Ԫ]ĎP9S}<Ԡ՝U58>om?|&QkD?㣔> Ca!rЇ "2`쐿`$ [Ѩݬ:$8ޜ&j(!{FRapt~|v[YN6wp"$,K(paTz+r>4y/D>Xz?hnJBLɗVO4ˮagdžZ4U6L4EZ4F0h:pfSaqU@dGE\\Tiev+oٵVyHW$)B)'L"aC;hl̕a#=W2eN7(DYZa$@XdQՌX DhLfzg*QTPM GUP7 mM^\iYy"" :dƢE)hvШusw$vk<yKA%3>LIg@},KP%Z%P$N6eK{m*lM&%z3u="6hb RWFK.*rL nOZnBf) k@];um!Q^Jyr% V"jx{ M@ .6QFqYva4GW皖U (.#pnR`Ü&*7tBz戤vAg #Rĥpp%qmOM hQxS#@o:U?\f DxԬC`!/=` *$8-J[1G`BO )e^ ?L q@~1]Mrwa\Ӣ4@0 MQ͜N Pd-_JC|R`4R`™$rZ231TiSyk,v%FT$ery lh<}ߦMhKJ&@1_B0 0nA֗pzc5 2 4G$E ,zsp3W8}u;[ն+ 2N k Z0T0:\>b;oW L򥙌| BL-UD/ 0sP Q\a|1iT5.Zl +nV(־s5MMaJr|_x2B0[dJ[JQeT `_'=M?e3 MJ Y:[<ۗNeIjûe?Ҡ%;!7`5ZblynaFr|sJB}I`Qps^?/E?tkJ{N'ٌԫ`DB|jg".99VcUEje%UjdBKn .8Z1^$<[ Ǭ: YR/V0˧ ; %ذ3H3ʑ6#šƲƇFr]6! pN' 7kM%RQnѩ2z_ Υ -qg1@Ÿĥ yDWg@HW2 -7t<6BZ(."'\עS!.[z۔o1j<]76XI?m>q`]ʧDϗ_;P 6T xP sqi0'j?Φ/\Iyayu/$)ZPa&kOmJׅ poY~}U 9®E!`>: xk3/_̀VSD*JolY%y]$c WӇFk8A@h"*XW(~)D#)/p5_<]K=5X`3-A^~O 7Іyw BqUa(+bLn˚{Qm` pf?So_KrT %&u;GۛnptϩI"95\R tK'lofvp<Åz~˯5Vwی_͸ˠ2wxBem֡;u [`YZg.:t鐰kGpV\=+'~Yc `,3C( PceqJ7:S fLj`pP lMٟDe{iR^"v}^XJY2[\UJLג1 F-0(mbQkǝ}I)`ǜ& wXא>>\4{pAKuvctnE(V T qܐO5}wU)ˆ` (4MI-Ry|?\(Wh8wrGK&;)+viWLeXV AmS83Ԥzs!f侹yrH8f!q9XhdqrYcc?wݕ+Z>>/\`˪.d5oogK ]B10oO)}P"O ag$ˁ3}a*_h`DI2^i@۪ PaY@S ҁ~^ 6sBheԌO9sI ~X2/vk7I놕ՇXqj³ZNIM,1t|'\]*<ŷᏤLLaY`zE9p)AxlyDAyfF`$xf?.;FծsFf.׬5CZM$4[>WTCq3NHIe>IJppQ0Ze>khaյǔ`G(R1H@tMOmYϹkeSധGW;R$B\¥T{?O=4kSXcM(Nn{=G 223"I>hiMu27uzWRKPNjo.-Zvc:2*:_oZZi5-J,J&_` a|K\.G|^2)[pnR`v@g}; c_'<>OVrۤ[ncQ)w9&͗Y.Jc7geno;/v%7-5.zP`OsJX\<ܯ*rQ0CkI3&_&3"BZu~;)?4[&ֈoM)ZU MZfy,<48쎎h0ᙡh5%6R fDh;j[sf5ni/H ]~6;I~6p}Կ&*RҸb8iҪ$"R$wOMc)k% Lz拌_\D k mO=w\es;LXl Trb 225ur p5 Po)9h%W]IZ)YzD72$U,nH^N hWGWaՑ>h|xצ溱>z:]~HfzŲe"cp"y+/wn?y¿:F ~|\@G* X{Lt|>eؽqMq%h&onS}6&[fُ VH3K2춻HۿQXg2w_w)sv=~zݞs9*5Ԭ!̭ adR,Nt#MJh-N4: ɧ149!桜MXqPf8")"겸`FF*Z /EûT cMݬͩtjX-/VeQ++("jedNB-Q_Σbh{ >yN cC u (Ȕ2I˲`XKSk8PU& qT zWzSi }/?:wJiuGCrSHd*#^; !%(-/ S EP2!ChKKpfBe(' [bG Z+~;FIt6h21ζ ,tfU^UksJS$EVST  ,@sqE☩YzðtbQMaNL`zJ-O1&Fd  c"22݇7 `4b #1"Nd$ 88jj!PBBk!?JOt[vb"ƨigpӺ>̧[vÙ8˖H#[h!\f-\ˈhi=?qŽ.^o'HQfZp:GD%kcEz#\a#H)K $0XDZUo@:D)oS 646r1GNB^[ۼc=l6a!1@\bCpB`U(v>muS5Hy,;*n4otyz^l|!oˇzZԼ_rǞtK:_U:)U-C8h4{Fٺ^bʣQ)WYO롌wL"Wi=}<[LY-t v;zt?\-&XjòD ^q)2^pT*.(Ҷ&ɁP0FyLVp;s*Tݚ]Y b8YVRa zl5Kv/Z2;$%Co/+K$O=onqcN tb-UW:lyzY9~=qp2%'}Nw!ٽv+ z)8gl|3n!*g~ZF ?=xl-* o"05n=7PK[ G*VzVqiŔg2XEb?Û74kz > 4 NPv2yM%e5gcќCu;{\λR Vc +<..48d-~AN5<2 9sHk D.;FcK i&{ˈ6}#]@m9x.a][o+A /[-0o}3XҴ˲lveV,evr$lU#YWЁ9f 莆cZx,>UH*jE&΍!GκZF[z ܆7͈5 &{WEOm|͋KDQº]ިћuO; Dn+ N3 &^ڶ\GVŤyU yU)&hgLcꅟ _{8׶c[Xn-)$0xd|yyCgnZjcD۝̛7fs~7OQvIK n<BŜU%qES~eH(ᄓxoñ?̿4'w RgSW#}&t/t/+%APm.woonW[ee:v.yȎhE([; }K É5}R۴<ŦYɲّ.z)Zv6E ;reflZ@.aC^5c^˶x➿{pUz :tz,|;(t1_kAU`D^A/⨤M[q;*Bqƾƃ/f2E u-TVG߰uH:::vver{}ps4i+PpR!E8,IV`%6o24[\ӱrTGe'\d9I_Y|PS3 w+2skfRkMH{7Zxw41d{.Ǚqq6ۭ$p%p@ S],9H5`ۓs w ޾ 8]87sZtn~Ƹ]Sc/oo<[4ϠAKT>>s0%+Wpht'"٪^aq+hua1ּp_R}oY?̛xkA}1tS/]nf~چ&L /<\7_BZ* ; th_ϧ{_o٧GgcF2y>5ZqT=Ǐˆ;ʙ٬]]a7iЕo/\tv^y,)|r'\/?*%qZ]4? ^WnKg Ւs^㚍 ##q@@Dż0E\A:nLUfs* ĒWZv/aax;УR.Kk/m Y'd8I_^nH-),=b;[g#}?Y@uO+ s]0c 9%0)"mzEiR͙3E3KKI҇HV0kys[L.!yP_w35=Zwvj^{7L OU&:_@:HZ֗Qt㲦m(HK+&H HИxA{ݦ3Ц T )%i7:$;'y~-.iy9pAtZt iu[EA&DO$ ǸJ0>/i>5~DTP 'PPl*ELr̍Q\Ep,fU\$+? @j"KMRVp^&}㣖w_ǁ89jzlͮK>nb3VsOq+9z8u6 $/sApĺTC9dPǕ9O:Xu㑤c_ZS;ohJ*׏OnVO='iSq`,fȕ6)$t3ϣ=-Z[xwŻ$qE] U`;OfNҎtHGs!oҵSq%d{EWprT$0*Q~R˿Y%~OxdZ*/ J=!T-) Jz?Gs31Zi-78?X̪JNV SuJ8r_b`L*eV8V2 I͋cdf|8-)<%wDC ^ $)HmlӤO{ 8U3SG`-2h<~2:hB#_ե)#ǒ929)/'gޔ̌%e97RE^80 :bzvL4-($k)Oi|hʼnا#t֕Ow@yb:ey WPRp9h;O%4(d Rj< M )t3B,=Y71Q|"Z|P}}*O*YFjT9oɯ 21(\5wV47Y|tց5gR`Zak%YTH|}@^E'j ֓@L\)4[6l_2/9Ru+[KTpWelc*DQѬ"i .e4ĥ"5,Ofo*ɄNTb^ Iys=+!MJCj<ƫrefFl^F/ϑ I^ R@2Ձsf܎qh68\cqZJ犈V޿C[-o&|R'L$e*8S*X՝;ARJfU묓;¶zBASPi,A`̵GgF*T 7QRb j[fP8+eZb&;LQ LLCÕH^t;X4r F9%an-+`UQib8zS\=:i12(.86Kjٴ5څg7[FW !Hw7},;9Ck`)Y'RV=?lڒhk i5R RȾȘ:}bRcF;.n9N]Ǟ+CKfiǤ~BfH"" j7.^|&|9T~Q⊮AnHy I+om`!"4!ȤbRI~hA1iԘ_Ԏ RG5K(1W`94JA48@Zu bp4Vٹ+r&*\ k@$c: u^鑪efzi_my2 $fU&j#OdK^p ُ*?{9Sy4_kysmVfv .D #9QUTb5<Rb^ ,ҲTy LU2㪗4ޞ3H WpCU7t%(-r kPdZD]p7qɝ1E) Tc+^ IyE5pnBOF\u4WL;@ "p"tL^├pų[ּrHQu@8jx'V 4mNYHƩmo+Ҟ!(]<@w[OUs(b[;z2AփG˂t4Ź1T[&4Ts6h}X%T6r]8x\Vͩ\e NtQz $Y*f%,=Bg?i)9'"sy6$u3NiQ;h_2[kf7Zʪz8j ; A,jx0mۜ )ߜ9J2FU:`o+%p.ioczxܝAwߗHۥC2/ :;]gZ?,flY~#;G\Us4pQ%ڻ@Rl2ȻNGY= htb@"[sn挋y6?jy~{ld,]Y<8)QώAaųPXdHeI@̧44by SO4cĽm@A(u/e)e"h[r=-r :(Ww2gY1ָQwy꾰F[CG`)OEJxM=0*B(&[wZ|oxp{+Ø_LU=vp}5UG'_ 6>ݚO3>DôpWjxCWc\>v*Gs=2EsIW\d hD;#R43)M(+UdƵ-e"CB)a02jo ERco"%*QPKmAQQUPێmWR͒܆Ԟڗye]F^VBD$787FZ芋7fN6\OQtFqvf8i[}2L9`=^4RT,7E2?^ 9~SkL>Ɩ*jL ׾"D6~9?%*/JusLEL('OFCLuF~=mV4 ;vvm24:+fȋLy!yq!lm)bQ_BqU* 6cP8J":YꭉG 4 Brd("*iiޠ{&k81Tό$AL8KߚKsW$03 y=uP^ivyV&cZ3üNtw^rtWv߂xǁ$4ۗBTj9@@ J 2/L{]zc4qQsvgZC諌:RrWlw@4$_/_hHY8O'i0BJ31(& }?S0U"Plh8O's\F&t-ւjQ]e%+c ,?2CF"|1H nv抃[ς?lv}.QYZA[/8יJA T`i(~% 5dT,BJK%T5Cj'mEaP\HˁWj}hRUi51.'?%YeQ;̼$9T:'iҶ4&]-UM,q6uirףW W>o[S5`HI1x ("MU"sBܭ_FTH|,f[s5`ą5lXe|BH`/1B0zz϶*$tw 9GEc VHkKPns[JFڀHi\@ Rg:w|.OsM' X UM5mԩM%Ҡ__>5ڛe 'Hg_y1SMu*)0e0NHeDJinmEx_I#I" NQOnmM3lQSYĴ 2:ZSؓ5[UKV[ɊA*VBXq%l9:f&KocڋIʷk+5̇H^%D щcf?L5$%%E"2 ұp)Z1's>XGaZU*+Y+NԂPsQ)tqP [qXMT,z:qD {_ռ.p˳ӭ'9`3]&ml)/Ete)R1+bF,ـ1hi^s&08qk\hC[}-1oưPjQAQZcRPmҏ"a|\Pi,qcZ 'fJ8 {M`I\6Lk/aSCx t?T BJ:־~}d+g3-~뎥o\1uKF[X$>*5jy :-rAmT.W9G8 iM U}#k%tM:OB k`S^̶8d6'݅ziakeƆ*'[mG%ƾtYgɧvӒak H `e^3 &> |1RQh̖,W+u3z^,YJ/^\}:;{D/WDA%7fYw뉯C\`}QsQe<> a]`sk{ufrq#Vק~cˬ,g_>LZf2j'0a6,^'lvw^sSCh-dQvB g|}gEeg, # v3~yLj~!wlO/Dx,g`# wL7$Ode>f_n%-JTOWHuqQ!e,/r5sn3\ QUEeKEn?o>g u-X:8GY(XEVZ֏'w[?ܕ qQ3-4zgzO-Ũh@rNa?'`'~774>O?_|+F:FpEWrl WkbPmPX/{fԏ l_ʴ7W@4ӌOD&xrM}~il6K@ey?!!ZAZ~np8[fYOnt; w K`;/DFs4dSZeMoEV2?KPmy˶*UOzdžfHQdJQYI2gd?I([dEN ɕ"'%)5aLBsʈ<HM2%.ZyoZřZ LٰֈDCjz_r}$7BdےMĀS"7Oݖ-l6j__هܓuu6:D- `ּ`oV>Bx^βvյp? ?lP7;5spxP;3>]!jIjBhȯ8?8Fl'K`vo/;}xNjM+:>mM~G6zA[<ù*03!%o:|ϸγ  ]w jYcCk]m<ϗ YȻCAE^woW+KOỤ A*‚gBT94Օ5ֻm . @lnVz_hт6_%kCAihAU\(P/I IM>M;zU6.o#,R9NqO#Il)a׷6C͗[Vrb.J %,W8#V*Ε͋J*S|Vh _bZ7c v噫S+Q O)_fUԭտ>qG4N݌gZ82`. . #Ňl/rАqHz-lz:޿|;m/fIZA%k j GWQMOwbrMV^\ ;^{q.}CTq7*wC&0_W ,Opn|Gb2gPS}mDžqX܎B?aߌa1/8 MG(ie+xe0gNN2N'pjY<JQG (9RL2' 1( (2NZjZ<͞AP i+_]e{ ֣j"[M`"a1gLX1&~hD vx m`ȌsI2Xh~Ie ̓2 TL):`p0EJ>˥T(9vQ`Z!Lpi[,Ӕ3Ϟ!Ưkγ'5y[%x0zyl\-jNmMկo&#a #wL]84*X8{IRL+PBgN%^Y~c6,oG7]Kı~S_9':OO߃M!5fu0' B뫊ŻL;1HIucmbE+ %bՊxi%p8"a%V$-Lϯ^K4*8WXi fy5imՠjЖ#Bg3 bPNS*Kw<"#)z"H/Ø u$k@Tp5ZP3Z{]LR`XjB$ak!y r\@Kg. Qd m!}bcp bܴp&gx`G>TQ7`Ұ dzC>zP/yFć^dKl=5 P ǽo{C!5,寰ݴ_prV_P9^4hLyezMJӡuLv>7iz\(nX5=JT`/f䢾kiAx$xU2;!V#/15Μ6歼mZal/"Tr L ;ܻO|'`o{q`ſ^Y9'ysL2$KB}{6U(8ףf>/5b1G3my=4(wߧ @2~b~QV@`OD;ӂ?H.=5z,gnl> Kl(p#>0 qP_9eEYI I({ZjbƂ.v0 ԥ0k-۔pd1f#)Eg$x JS

!gCȑJz[=:݆.&h1ζR/B9x"?Ά7{`P I;m##FT$rQ?ꋆT ^C@o3boh4b. :ޤlsa=L q#F$AV:XUc4C2RePJ!n[lo:7HS@#2FŁ| 8k$1sVqitQ{ȫCO~'6O@<tЭ#ݘVXO0Gkf^?u h WpmFvHiidLJk'^Nz^&riݼJJԨ%] ;y0J5#1cB&Dchy~( QfȵǸkӽu|l:[IiI%yJ)ZDm*&)sP`%ŁHăCp Bs\$wRr&5L  M`U1IĹ5^_J$3)׭\KU*ky\bŤ\_4<ay"t{µ!2mٟE8Ww_T9^~?}g6\HI1"N"c*n}7c및ܪsy W7/b$UTJ;&1yH=NVvu#UDJ ćY7oM> A6er)鳎 cQ>% Nɇ^ll6N '&g"AH@]`mb'V+&ά6fN02PC?3CҘl'vjJVƵfh(E ҌrM좻^ ݃{/=;ۃ2*6PC2QVp g8/*kA㓉eKlb F6`|$3YvKu;*@yȹ&Ƥh# Nrv^f[dNEEgP1b0Λ.BSl߲[0,26S@RKe+ήm 턓wbWZxx[1.ڏލFlT`}'Ml|dW\ܯqlξz1;CĶ{n&'{i{iu~t:Կ2tҦbm:8OQB5tN u4j`'1zu,tKczXnCΓy{ aIՑ^{.^?/`tǁ _ےcV68zHLnREdHQRV9υr?kF-ŻWa|NG ^p0aJj͝A푤R;rX 1G2$ӹWh.Y7R<)NB-f5.`BR,TFZ- ltTxO#c7y&:鄲H w,vμ$̫Knr_@{Zlue^5B)?Z!\ro(f=՞CTQ1†qxaڨ_*}nc&,69牀 0.p)vYpWnz CEFPJ\6b&WNbؼ*c "26HM= Jf4r֩LB\`&王]Ũf}`v4gsYw^HZ}Yj#5\:6Eo 1ts ͥG E)& Pm L[ˠr]tJDYz)ϑ0ꑠn%@BDJlj t .&(P1mtPZnj"xcHHd."TC@M P |Uk9YI,c``d1(ls> P'9% ủҲM%;]Fl35b*QJQ24}y-3Q֨keVW5mA[ڻLL,*YqQ. 6搼'T,4( lu^eN{0*9R e k;[5@ `6kHL0܃PQ%y'q,Ut\te/0g!h/ظiJI*ƒP1xArmƿԤ!d Te $ê,!qF䇃Q%ǵ]r~M2mas| A^+p؍"0az%f:y/_mAjDI0xd>J>xm@Zۉ1FSF䒓(GȅDƯ. Ue>"Ѝ) -FH T9R99܅3SC80EZ(H-mU/ 0un( Dtt=] "RrgI |2IPHlLqB*UR:\R,çtTyO[OZi|,sP(܄RumTa; 8i&mX('#CLfn+9#*-2qX%HAi> 4UwX:9dusy-i os!'K ^ʘKI.!ZgeJ44S t jh7noV5Y9{b6j{ &Yۑ2`kbsm^C-`5e@4OmXUpE7j$?\h$VOT5k5shҵ ֈ}mZTq^(}jP\gv A 冁"8X†pU(h8:iTT)MM",* Pȑ:0a5YW : /D Fz\z8ȼAK6V@UIͭG)*[Z/07 BIsFCs?[ea;Z;"j{1LUWKa5j.A7"9]:n7x*j*I<[6M*&iMKPY\vderBO{0?lAՁNo m3Ʒ:bXαn>$?n_ܾ};لUz892"KY 9 81s(B%f"GH*F qxގvxg}Ul?_F r@p*^sScN>砸4&r1!_ KH"!"&MEBrU3Qɚ\I6,Kӵ 񽱎4W)RN_2O8[IˉqBOd˖WZh]D[FK}:j{/$N qYW/$ojx2 ]NRwt "{[ %4nN&MjTf+h869)T b>v\j@To|A6|+,vS.͓i!a$ *. P+m)=i$\bO@E)duJ.0!X!Ic<8oBPb\LMZ>|o.@;sg'Hv ǝzr!/ю8C^}8mkv_r;OmĴܸwn#W^^fC)ftv&Vzz. /r(dE$Ia$Q窕穒+$a<^n"K~IPflC98O8M>s׽1\hbʺf ޼rRͺ)'% fGlIK:s1jC~Dx?+ݎB<t~V-n|d2Z_V_-왣] rr&+!L$N)VFIxG2Uk AxZ{%Z y*Q9l%o v7=*. w(#Zor8+*}]Y[vܳY=h=o `juB ؒ ډ׉{oH=)62};ؑ[v5g3%?z5ɞt"<7"Ӟ[BR,KZ~~e՛n` 1DYB\h(\k.y jxCK[G1#I>H*-[z0?2Z*4:}I-s]wdM7ioA;AwgB2b/QG`, Hfѯ±焃VxX+Ȼ^<Ohu)}Q$6N;O&9$S*k(tE_v`G z19,ˏf.I \:.O֓ί-ޑ.D{ųGrh>Hl [\7!m!l\, K8nHNn`PDTu4ͫyoJ/u?b:' !̚듥[}:-d^[[]4Iiţٚ7腓~3ofOl[Wr1/s y&Ͽ~:frY7$ILWs A d?am*-ecH?4b|ܺaaOe[){RFt; iDʸPefx5qHhryuKuՌOzm[yIhE*6r P+T!EJKO+jTgor6\>wE Sm]r웇-ZGe>~v՛!oR@ս ḣée WQLc<ٌ5%ب#[K^A8ZϦ\ɶ\ hxgm3ӚFW^lH;y1e;V 'rb7Mm\{Owܐ8OdQf+N=f)Ll[@n <ײLDneڵO5du[!Od2u#jl|ܛYǏ$ Öui/cP[_tH)F #s6$ؗ$q,5a4ٶƲ,K{f)JDKTٲF1Ȯa>Z$8\Jje`t99d ~q]\kWưB,fXI S9ZQϦf=~{k}C^]LF,uSpHY$⢒4:塌{e dTT=M{ IDjľC/~t˻܀sU<>4V)p\rni;QXYX 9T IH+PŭU2=%X -e PL%,(IjC#m6c}Fc=W!֨v7G ?˺D p)u!5(+eFb\v9.sU>҇PY3@*ER,+̤ BrQF8ɲNZhyDԄ\+e ܅{ AUmnoч\Nձ2ٰ5؜iW wʜPU`Z^!MspEonԲ =M J`B6 })c6[ⵇq0kw4;'~H]v WQɶ4nPjiw!jHGm>DB5*bPD/3{jv]n6OI uVk?kCQ4йepso>{vi_pkCU7pφ5̄gWt(.5YthݫU9r^eg.T_YӍj{dJFQDRjUaG]jǡFݍ߆h_cH-=32{>;#BZ2!QgRXÛBֆU~a\T-:#Պ.glp5-T6%’ d$L2?쭾V%˿ب &ǻ"iT$(+ҡ$;_or:jW#u@t2?GSQQB!IoQCU"%; oaѲks!d3F̒mjU5+hٹ rj5K"L݇5?BfÃ~.-K+~^N@\W8{_]Y;Y~ß?zmw-6X=f 5<&t'8q]c[_x#WZSD$T}j;/XueՙlqݡJKĂCIUL6ЄBCP[l&,RyoTCO,\VJl%k:Gxg\C5S+w=[K`JM*X,^fDF|esQaB5iΥ:;n:r)Xk;Z[0Q3wS}>y]-"Oda^#e9Z-66:_=$y!R 4d:֪sA91wnK tםke+ɨ1cN͏ !hEeM1`e7yx(IZH i o蝲k8_DmJЇn%OE,0g;wEG FS ֹadB4tLgDx|DƉ ˧Q)Gc%$f*avjXnQtbw߉ Cu߾͘.?Amȇ]ewO2q!9 Qr; \N{JXxCrY ig0?s(s:3xZi<x7 3 ]WƺNstIlgKy! 1py ׎a18yGGkk ei6jO&^6} Pf C?ㄼH>KSQO#RY8p$r2iJӌ : /_2B8[mA#aǟdV l;>Qҷ'a?z6[QU{^}Jُ-NEu_ʜ ~%a4II"U,*|LDAZhlz\*,R䙧K]dk6xM#J6;Q8Ǎ#lAPR5FsԤn pW՘J`G3~כ1Tڠ w{G1Bl7ZBq!0W گT7uiACԃZՋao4i/{RL25leHR,RځUT1‹DA8uooYo6Z!(Us=!sr]K5:g2ӟ00&I~JdX,")cHlznWI/?}0u ȹ58w9VC{qsua&**g 0eU IK/~[M*8X㥈z'Bl-FwZ?dkWT,Lխ!`Ĵ ?dAǁ-O5'*#`X^S ]1x-@tJ&ќul^Z01_W { 5vBs%lCUBu\63c)s*]W/G/c([),l lQ Ơԗԍ6|$`2~k)!/BU1ɵCQD|_ֈP|qe\8sbĈ$7f3jc`+v{_b=7 zIS~9 hnW"]GS6L8,m ktou%Z>l2?q TU R\  C-pE!P}.m(cXsfUvUיlnQWBD!. 4-{U6lG?{(|BMcaI y#E@Xnp*yp S,"l˔~SH0-%:g5tUzdw 5TrL-Gj e6k]U-Xo1Ѻ6xݥ!BO?CfKN_IUGfi,""$-RJg|܊lr4_L=\'Q/g'v9{BȩSp`uBn3\mz7%ҡz@ijjW۸ɚuWp5dᴽ!Bf!T/xn;.TޡPр2"DqOmav_؅O s`m0y_|AպMd0ESvAICuxP w"ΊH/q%ji; @pg,eب2muМ$aZ"-5S})_xZh\y9E;d srwD{f4 W=b\M]@">1D1<0 d`xE'=ѕiQkpkU+#qƨ!>O@&TI&y{&JOR NLJ%Phq/_ /gpV-w^OZRruLտ9~*X)*ޢgL:]J@th&NcFHqihfΟV4c:CphS@pTb2i?NO>ΉVhke{ /u| W;^ٺTbp% bfK`\={V2xSb' A3p[h^G0{Tj{Kvb"s򏛿<}?-nݴ$v'y*{{En&OUԴFmPnRVZޯ9jgG8?iY¬ W/$b6*7!E% 1lׄ 7/7'ŸJۛV;o%lI>/7 *M]t; p&G-(E/V4?Q/mz &7}y==JM{o@Ү%5O޵5cW@tt z-@BHRPȁ؎#%˶dSГ.ؖ׺x1% $?/@'(Ԡ*`CػY>J;agA)ںMT{p=4mm\zX_ jFAFGO!q" Pd [|bT@ 1&:NV;PyHS6HSfO4 YKєyZjX$POQ_yY@aaNq|taFQٕw(چ1eZSqk_ǞhuVòQ&.&>2F瑇e7% kge}fʭL'ǧe}G._{{<ӿ!ӷu]:A*3eVLߞ|n52P߾9 J4_i-" q軽J7YC gqE.^AUyҨȚXjPLUhf"Yg$@HjiU٫|6K1B1_$m,sBpQ;~@r"Rs!t8ZK#pp1Qscx(1>H[/)b^^B;%Qՙ 7PnŮLϽ11-=d-VXK+Z9몯V 8k~l^>JY2N=aVΘ1!H/~h&eG!3jVaa>$ID#AvHPMRf &R\axjc͎z>vQ<ϼ©M:+ba@wϏw!"Hr" =jo_$$O&dʑ뚦ɤME2KT ԍ25`ZVYUV0gl,0*]S: v^03Mv@~[RqAیCoW8`2֥`m0)lă}[k# pݳ.+yS-q:ӏE0NƇrJ @K"H ?u"e ) ,XD uΫ=09 'ݓ&._(jrLpua!+# 0%/&^!]),F_hzl5i SǙb@tBj\u=oP:.zr ljm_N*Å TǺ<$8]VWJT)lV]QvstiYL C. Avx|:ޕ~nQ\J[W^ rD@Gi&gM6x>N?N.X e0c YgM#ފ2WBV"?S6 `H6OyL+S]: tO~%9nCDBCZ\ipYW)Z4jCExE$5^K໪ ]S|Jx6(gqW/M{8qM:rRrR҉5{ʛս=j-azK*0:VA.0j>FtȪI-n_vC{ox?በCsZc,Ci` kϟʢ<)F gfFj('ZPv]]u΃:뢸gֻUd@n2i!Q'e]-*)+vGBiQ3Z`C"κ$lh]ʼh]nM^(s{:&&:&0fE%BV1isMU\PHDƿ6h M,Y(:骕;y8%QI҉%\d3U]{J 1RIo(-"*PoN 0yxF!oa'",] ΅e28% ̅-x~_Vƒ \#LPUW^"~(WԅHQx^4*{5F/d B:oH]͙њrfOj|Y̘jldvQQ|jHx 820f!ƭQG_v?dKNLīD-󻧧4޷4!^{!hŸ?%IuD[aMM,e(ϙ.}Vvۚ(&u~6LK=?>.nMw#H.z^M0ֿ\=>UbփC{:$=xsLBPXgG}90'4w31 c_ Lh1'MD32I>(>#$, !ݒEGPoʁZw:{oH~mLz;u>/ty.% zr68JX f/٤9U)I 3\O:xC:4"1D LD_x,E:G֖[-zXKuG1M014!,+e ar˒"S%dLa"E|DJ`qi@ zސmཡxDckjr]]C[:$ۜ:CY47&٧S.*D8b0{BTʽ>/TfI!E@N8WJ4+"1T[++?EO\ K` m.[)uN]H)3/sqm &5Y\th>LbkR }-p(28xF3C^.:9ZIZhH8,t?ޮl5dNgmbXf%lb; vtLb8jۓF_}b{\!'.+O43#aY} S;@YKVRŒNd@?f9a)TGc뤍@29gsbC%e MOݠ?ӒMV]/I1SbLGz}T})̴gMtpXH:=/.~@V qQT(X N-3Jk=Z,eȍ?cN:-cwRXUK乵BhcB^1<bbbé wW!Qy`R_k6T5e-1ՂC[Jݒihf~9|^gD~8[*iML7TcŤ\I`A9i"k-q'ѝFYKsң)T k\q]wbX*lkT+O9_gQC2hoAɦsxa aؐgg'R4҂tDRv/ ƣZk4Eh OG7?gz2y/S'P6fX:.]֥l>(|RomGm-?ֹ @p(\w!1(ӓ:WPSwVD=/kk**ڣNXաjK-[(NrvREdׯsw&|QuyV+G_d#!<9Z`/sxcpii},âp)+]lK)DEZ8X˘Q˼Թj vZFйȥDų6Z+ B,t0UguȈ .\ kyj^ c1 ;qO>H@ٶ!%**GWvp<#S"\!Yx R#f ^r?S:uü), |:}TL 0EQX7\vg'v: w~D;IwLB".zAYQrFdJr37dP+2$PL@uM_;p3dǰYpzoJv_Cwk3i-k^ӐlP7̖ y>R2iE^,"Sdر2͑!D2#3^*ڳU)5T w%q"85"fTp[HAeEYKxlN YW%|C0S!g =Jx#lRT{];&7f%-7k75AuR >ذ/=_PkDQW\q4V20M4JO u!-Foy]颓te1hgzV6?W+8 :QGVͪd;mN" c3{{-o'Gާ[u2lXb.=CL g'k@x—D > W+~|)u:NA|cOODή#y3qN3?o/@wپWD qΑށ#z)9cIM`F)A<2U#V)IN+\!?$V4ey)\D6% P5Ɵ zΤ[&:i D$i#?z6cmN_iKϬMstM W ]Ō d"lNE%⼮rʬ$4CgZMP,%rtCѺ5y48(Jz&+mDPBrR Ɯ:]Ȍt"YOT6,/~v'a('VnlbNzӽ\LTt} TfY<6=(9w9b%^yPz vH,V5xK9Mnv)nv[w)K+.&JQm(QY` [f6wKMhC,q%JĕpO\M (\B^oa1TI\*J—86<2ɉ@<ωey" QdVJgMkb+ 2B3Ь*NYz@E^Dhɪ]&PZ-,V8 n=ُe¨42IB|h+b o7It.d`_>ax Mzrꟓbl7+5{'lI#)lm!MwÒf"iqPO2{2^$_dR}8ޕY>cXIEf1|ΒTxd'i>ᬫɍQeo q>>64Sjׅaԩ2ؙmVٺޕO}~c?eg54-+wzc?OzBG{#lUc?{Fe<,yi,^;X/DĒXE LcbYU0Z8wϻv#:>39 |6<tٽsQ^nBh{S<^DrU: OJ5% R ^"B/J)N%>rCT'k(*8A]n_>mސὗt.Aa/^$B Bcaө/ǃIke9h'{}4:/xİС4뫋ՆzkċHw[fqا -;>CDuxcf;L8ƥc9Ɂ&ݞ2cEF&Ƙ@(cڜ)ۛ/o(wvA0ю= a(^g mywO}Wiu}pFî!Q@=ǯګ;=AlE3gL[{S-pc{Ñ@ FE qmZF3^U2(0N]b(n(ަKWEN^ge?4cɬ; bHSEg"aypj̈@,54VA:S*9OR{"I=adjaQdV^}J#q'y6Ѱr+ H]H2a䆚#9gI}fa< )'N(=tDce< pӄcC Ўz0@K,=3ӣss"_fzTֽu;fLP SΜj(KA}`ˢ\i%26Z@–`!:PkTFVh6Hz x6ԖDRm7 ?қ讈} n3~m| ,-h;pH@Y%K0ۡ2?!v{2^Q.$0mfv7{`lF?'cQA\"Tp=ӨYˣ8يʔ0LeCoh{7IM#4 X &YZ„ﶱ lG|AX4@@ $WhQ`:ŴB-YO]HΣlDAk^ #jM4̭HN(Fg!Mƀ4Vs^QK%,ِ%BUֆ\Şѽ>,:BЮ+%C|9\9B^k2^ _4@[V>Q dQbf: 8y:d]0P`"{&P! %!9߼YqlkDv$pXBB@%@f!-8&VS>RBxb APZ%f^Nf^ 9ԟ!p9}/q,&vEwgw*]BA[O5X*/(Pu$_]#˩<ƖW7k ,i5$ aaR8M= Wz, Ik=Oқ[Bѭ^t;gr[@ x=Q IE񇒆VTn%Wz-{Bc,eQcS8:) I2yjsyZ#p|,u2z~͵DB҅$p>]K( ydN"6^RPƉ.[ʤ#TSiĔP&h5"OKkl^t{ ~ [0xd %PRR۷+}]?\foQJh =dn $pCB6S7JblONֆUY^3J#Ox08ŀ.aAü4}c>:v_2[DR{f9%kmQ&tQ ^ĮibW,aW¨Mӎ^K5+8?E]|JdTJKC H`Co|ւs(}!NsBBlCo6nl60+*مD\%N΂]0Dicz'fstkGJiE xѽPR87Yc[98}VȋrHixjݟ-3،c;NQ-iD9"¤NXtޘ,q-yMP+"J;M!7)l6Df#]6f3qz0w+XKX+q4enTx_dfA! i\m{+&- b~OxE%PqSA U޾7 stn7~e,~=a/#<.<}2>!G TȂZS?f|qZ_dK!0`5ô/cdr>\j ~V[!OBʂYcƍ*gŤ9A*Bh!7b3@Fm`P-`sDku^q;/Y()dS: *A6 Af9-k4Q)Ň=TŚ$}$dc򿋸L`B%GݨEƌo{EF4`| ڱ~ +gn6PWg ?~YE dnTt0UQ9MՇ&-"7D fp 1##<}⦈3Qvjs1\`ű[aX&˻5kHF9BL%޵q6NРq?D!KN}!xSn ٣ƹ@ wWHԚz +c)P@@)jQ"@9y`(=1O-3 sJdлqvfQ/ -~2ա=Q.rIB@ PhV[#D=-MJONi/Q%!kX `` B, {m+aEr$̒# vK\$mt=N#q}k[/{7 ^g7E<(E#0MwѲ'm#IW =`;Gy iz0<4mX%d{>x6wuϲ;{~<{\F{3uHzX o\R 䵟=u"9aIt {Mm8\Y?@7+dNy|dz_X2rm "rwA88 l⿟6/cbbd{řad6@`IyyVpm~yzgY>ஃ9r"EY,Rɹiؓʢb wgs;)i4Ѻ~W~+vg\|/k3&W˸^\:YLWA"#AIԑ[<x0D`2<U.>a Nb D}_f4|5( C. aDB|^.J>N!UVZק}p+}2t/|+DrD07j"d8D=MPZ~l$!?WcdF2ۑO`)1f=moDY,xL7.Fd X ^:jqW\FzB%p^єxT$Ήj G9 z:v*UPShаE55BFKb1Gmn" 8)%C>-sVmWqm^kϽ}Hu[uʔGc=0rі@ڞ}¢(57x!,ѓ Fu ;-[حcZ i"A%l-"x8ph☓rbjc}}Ma̓p @ /8xڣO3U-3ֺ!EH;/~E2 5 ]sNi!_qBz.J9UDs*ͼi6%d(wAdy*u)f&Y9M0Șm$AcjkdF&~Xr2Ze^ݱլQQ95%"|$U XϾ2 jƅ<}3uܨ+$ަHG '&;P lČCKǓ6gՈFJN-#jrUf+30W{ʈ85\tn4?8ϟ]2"OsƆt>48г=WdIO# o&$EiiFt6%SD#(?Yu-PU"rDn`8 qI*rfNxF`CI+@DQΤ1:I5| L׳PڶPhjtDQ, CV&UGɪ'1=nBaS!8 ?Vw>8!#67jh@&o aEu!#UQVSEك/=ED;`\M4gÄVDTͩP4ޑcҁ4eu-#2"S"R*VEUjs UFWU"B](#2zsAPK ɉ'&_ eI@"y^c* a{r@=NMnnyI&jY4wdk绡3eu[o.H)[m6e^j޺K՝^ΈcӋ"(Edk޷K 1wuyKyKQmnNeƆsuZ.o Ź5G2xY.(:mb:FZt%d;X"%.?]_6l V![81*ıoOE0pUH%Srr͵9}IP21u?(ܔn`*lCc˫\*Gj~jn 櫯(qјE1tqYI%&q6mn(6E2',]ȖNQHT,v<߽֠0l7oͩ% ũZaEZ9U3Cɐ:-e8[eS\NdUAȕXt\~y # 4#漯yxR kI&{S;+,Y3tRC%Xv֣/_wAȘ #t溒g:EiuE@\`)kM;b{R7m_ZIu^wǛ_PtlstƓ,Xg 4OmJ+q.a /v1 ݻX+9LgDdڐ[0}|[# )m|hCEt4Xћ*|MPa K/xJprp`xwsrdؗ@MDMhYrd#,9[ ϶2g@G>|$DVBt;/~:RIza&%TP!ڵ4hVfGF̖΢g:r.KP2gF~خ~LbM ƒ+%ƚ5 U}[j9dr v\e N=Iꀱʲ}ik.^ѯ'5^V쯴^C41.bO9t9=Hcb< !Vc\h0ߧ^_fDVJu:/}:IFb] )B0>쓟,}IQ퉈(/, FxgꀱlmQ&N7(t+Dyz| [ vOyKԑ#qy('W$)VZ楐2"aC0^ +驾 )5-Qf4kD9eJƍkj M )Ջ6!B4s"ϔ0"baCSMS`@:EhZ%pmXߦ)#u9t@0V-Ѡe VZ|C&|Ieǽ,&#uGף w/LmN18Iy~0␲G%Piwу 锕u;~*Zr>Q4~7ȸ.`1rejV Kuڛof8.@f kaod3)YCT.x`]?r7d:ygIauǞq5׏۶*28R=T`ɤEctTpgBuN +25B.*_,\^=r\G/6Utܗ ||sHܹA t% 2ݣ{UK74RAĹ@Xp '0: ˨ =h%H'zx4(;Lrm/*0|4WFRX#9O2`E)qkuu+P{y%v F|:cŘշ+gKTLfMrg9Q|Բ{J>Rr!̥QQ9W(=#ٮYXKC F]^H ŅL$_[~|F˧mlDtaopLQS420ʥ_Q5꧂LMu,~~QE%= %c~' I2MyrsgIsJ 2ٞF7*HVfY[ޟT/O#` -|pcgq C2E]ovWyi;}}l OH)ql[MGx؋$)|g_ѺPҠj{^<.;a-x?u\,=鲢JS{Ra襁ͰC6iB%!CM u=lXčp s2;SJ'9&'wÙ!:Yh  r!Y+Nz9UgYTA#4`Ú;Q}WK49W _q-f ,oa2[B;9)Û(C(+~ػPhxsUoplFɈym_u3z,}: 2ZS4Pxҿ޹V9 _w"Oc1._,: CenV2{>>wwSasA'X!yܺ\)/0:x2_P6ݧj嬲ZW݈y2ey3.)BZЙ:VgSLD$sYߓJݔF؎ʝNz^v52".SZ jg"\j`*CGvƏqy DKϹiPW2YQ3tJKBcz+=Uӻpw7u MKu4ecg Ӟɞ4מ_8lSG"Qn~|lX 4}~}k%<$*!SI"JGcePzK{-ixm2`k*Y`pAdàq'_9W):uB\8!ū!IUjn{ Zpy=uXb97]+?YƱ0{-ߊYWGR+%TM2C~]Mggz2%5| -"*k e.қa'ujy Z3wnU2m@pw!/ ~B0Lf Ȗ9>.ce^^YgZb)=vWT.>j&:fC35VW˻4+1dxo斶C'4ʪ.\qjaTZB },h.yn9Z}[<76 ēi|.+)@է6ńA M6+̓ӹ2k꿮2n;$. CIh9zLLpܩT]t8FHy Ov^o_ʡTOR'u!9Rs2$O4 *nS@*%K>!"ZXa $R-Hcb%و .'g\jmBi7 'ί ¶$*Y% {٘ۚIOjJȪ>^Pttb|\6dYM]}jj1_M-řaX>bemxo8:a\Ӽ0^YWZ{e@sO %w4?\z2sK~9mJ}&zµ+@&9̖)(%Ww7n?}\27#4b?ɦX0lA>y9;9ҀHoc>-a=)ֳ *]=س۟fl{te-ش-gLN)[z}V9{tw4!vЌhQT{^U*ٿ~ȧݒkí1YfP˞n iVf^6/{:cˇu^=D=ސI/^믳 5_4evѽl/_|~3/s۬!g4eޱ;xQޛ ;?ahҲͪKNoPj3jZIQNƉ] H<8xc]|%ގTE 2v q&yZǷ79Qzfm!yOb7"GHO&Zow;F>B[kvKUnyN$޴&ă 4xpm縮v/`|J2ȉaZ˟ Ķd8BIMeu Ј s#v#1) +2s7!]b14as{ mNC$ߥ! ߙ҄St'Cz/!o@5̶nAk@Sʍf~ƈ)`&=x6Z5EJU^Hp ]:"x'.&Xc Ib xuqj'ou.IOz`Cpd<MsFd0J"nwmZk04S$ ZĮ!*ºi I3RN-֤۬}[ vs42K)37/3FbQ5"SHZ9$C}vd"(%^'6f ڼ_z=S USh]t叠ZH8j6P- 7|NQO}0D}1 iXDi^FOѝF &jLd{F*^ B\0bLaNX(./iJ 51Mt&*y7oܯQkTʌ^,1.ÈWq+CHdF/kdT_@މųP"<%2 сf;V ہ2ٵ{k70A m"pvxpi^NCR&YrJxOZVֻN8ns=dF+bDiWxd2"CE挰yovCw2D$A|Ac8V&Y/CO}. d6_qj"$SC\T1uIvլ +dLT'VijKIQB\*r]SYߋ(3;u":T( 8 B0BI]L1K*tJB`1hT ,%Qſʻ5n04Z9atA y` LArZf% CDq Y Zٛ2ج%Stt꽚1)5!p@j9c]Id;'T[ f$a "ZAU-N{9u&>M;N @͒ a~GeE%Γע.ѐ jr=B^ūc& ƥL>$cK-l9mV%qA| q1ҒAktۙy<Ϛ 42ZX iV?;)PM̌J*ӛ_Km$S2qy x'a Y&Tt912|;b(gr!e27:iq`Qgy/G gj' ,e$ZO$dTz['Spgn 2f W%="KufTefj# Qœ-)?&GK/#aHkV"q '0:^_x;1NqMb=Jc :SWQ']oaNqmPڹs35ryPlX<"o~zgbUxub8QeIWQE1*TI'jd)RULVB\ă!vRkE+mОb  ~yiMs^Z>xatz_uJMYU, U8ZN0(UMtpz^_B|xxYܭ6?dM+LKTȢ@iF92+K#14ݚ=B`HJV9$ &I^Oڒk95(`)3y?C0p;hw^AKǔެMfށz:Ϩ`TXփm]IRΥX=+uٺmD䍨KRš.9l/ϱC.4b{ܦdX;#91ЈA~B}lVkT-`H7ztbMYϯw:ϷHO/9ھ{8|ׯKU4]b5!N^+P&*F"GDO.f\EE$:V♝"Se$ץXl- >'Zѓy7 sg?_%f&ɒfӟ Rr.T( 6``P.e+RVU/jqjC]j;cHD$PrT52G4w&GbCAA* Z~qZE'"g8,*qL7LMFKR?؊$s$fDaҰ`[_LAprFRoO (U\a;)7h_żr^\ aȒQp k%J hd4xJ([;F1geJ7Nf8YH܆h$ܦjW^+)}6mtpxsp,A7e ,*R%Cj\T_9{e޼|vX>Fa|`rX)ToqO 3yF;^}&S™^%wCS i2Yg%6ʯqQ5ĩ~_uH5DKo7WG$bHs*!C``Fc" u$.Ph{z`ԝ~1բU 4948s,kWyJsD5ʡfnY9{Jq2i hގXV]Tԛc`o̘DDk@m tvjeK F"ٗum1뤻=3v^ #<(:5rJs)* |hAmD >1Yi%&ܡD44/ ,/'L*v ե[dWAeä; T+̻2N\<9sQ7y`R "_L~?~<_aD&]՜4vRmph<#(\ϕF5p\Bd{ϯ#{zx{.{5&v`]rՆcJK[qw!M-#-+ %U%с M(@#:UK퉜q܈ע܍"Jc4/ei2sC`wخ_%!0k6@̋y7؀(X"GJJ"{ֶbLoj) 79vIˮ %uwze^4ݫv=Ă \ĈV-XT6HSđP8rs oVWTO>&AC#4[W[8*TkRh HuѲ䞦1Cx?C99F$k<3*휮>_zu鯦'oo^|TC*p+Ύ:Ay~C.IA C@?u6P+1QIyMګ'U" UVoҘOocj9՗Q10/F*xޑ?عEuz,@zoȼ]Qk->%2tznzGzl+q ;z^ LbQ盲S}y-*l6E@x5(f`{2Sz)aV'% OoO_ 2 i ks8`{R˸$[&:k罭Y$Eji1MUUVEp׳ "Ht}R1:J)D@=]^4]⩵#Rʠhp>H$T%>t+H+X1,׃ɞy=*|o`4{WsgYY;pt?vPSҙc/N1?F c)e2kMX[;]! 32I<ԅ((g&5YyzDwei58Wi]a[BբW uҤ"a i w4ߠQB VZOz]̸T^P)i%ԍ=&t㏱)ڏ l].f[6O_cqr;I&ys€~TҒRGG|H3EIhDՀG &$st.dIϤ~:+! ٹɤߺG{ )^׉uJQ|k]QEE,q`_}aPi-KғؘloSO)#SCUIA27ǘE!]tru\DNf`AUQ`$:sMT]hP|\h u~ЍiPcd1?_k1,Kkzxsf^wL1û5sI9aKXWe*Xgj9T3_?@{ p~a F4]>djp[ NE6}'=9tv;ke|Q`T/χ4*z>oYBġl>``|j/;f~_]\Q)0K 3xM3dz_tz]@*ЧnޛP$>w>JK_vcH3AzT$k Y;{ońp>22EǺV.SF8pd-X4൵Y[dIYС2<oWrXRMG~i\=[ݡ+z;mrh45emfKuHFXcRQ휙?J3=%<ΑX61zCPkY .r, AeM-E ͎Tbs!E\:JSHx-۷S$:@@CP$Ȕ~<[mw#3~"NLAG&R۬F%"زʸОO9R|<Ƶ㙩m|RqMF.> `^SIF#P4C(c1!Lt2ՍZڶ|kZN?iJ<"ߤ0Db3s\`PU5;rXO)XI{!qHTPޘ.#iX,XT<>+fc,waQi;PQT M(@Uqb"*!E^4bDIkUBa$M?(ol^>鍰3⻃l|tإ-KsS\E@ >9C@:Sky¿ :: ǻ#46'Rd~ h3,.fY+aK@+!'E)`$k#HtjkBTT\0y/gkw p1L'+8/Sn uruN[X)Lz*hRKo8`wOoXɾwL&)8mdK|ԅ"H́X8-TRİ4r@MIr[IjköC҈e]!_Ig(6$gm=~?4SR|lQ=oӖ]dtl@h|1Q')-S/-j`gAH7ĿEGVHp[S$aW`4uwKeUZ kǻ.o Y*q* %`)#)hYK͝}^Ax9Z9*ØT*9[*s"1G|nHW8_V*ٿ|& zޕ9,xL495#CD]Z[beGn2IsG2|Bkɽu?tn8qu[:15Rw/eBRƭ~ gU8cMBSpC I ,Iɓ.& -}}ߞs-S%UT|IY8VpwkRH$k^£Z)K[9Uu;>Qtt`E%#\0FKR3V,ƌJàT}3%ؤt:Ii{n.(5HekY~3]^Xk6HF=jRro"+t, [rѥ" U^]19" U8@lz?C~Nc>Jڑ>䐫(D,)TTe;BnYҠ2FJh^ψq2h S5y 1{.獣ҐKQ$xn <wj/$<'#O]`YwٖjUf"]edxקDMK7rFϸI<8r-Fu Gl0Yj.5TӛNVޭTXTi"^&8bX+CqW,۠ B4\Jc !-nUh;u '9LElWr> 07Cd 6cIdZ lۆcmnr,*Cԅմ' *[Z<qO60nΒv* iJ!9=)Kƚs Φ^-u2JzM*$W64R=oh^i1!msdkVr2qsNtV.weƤ*1"gGK!1R`+"gmU@o٘y:qUzw2&D!og+IA~>kZoZ0EۨlBb2ֻ\.fwSPW2;5W=ܴOƖ{CnE/q +Q2@`)3!ZAkS3KDeC`س}݅QU܀^M~+e[$s5Lhq{%Hѷs5z4\֔3D`5r5)G"ԫ鵟;oyJ֟C^2YL×*xB)`&*mxеD?Nh}X\h/^CVpdMitW5ddᷟ V|#/Slm|ܫO0>vMqI:X >&9(gg/tv28D fG72Cj*21=p8C-# ^J(бƊC} nxdJYCj%~,.oe:cȨV@saXVLk4I;3;0^&Rke"篮zQFwF~mʅ/IltSC.ǴZL$SZ Z=nM <_m !% !h0X.^鿣koUN&u/.fu.u0SL~άAI!fc˴`chx7VI%Jq3D-5AcJMoU鿣%7qLo8a<N:kx3=MYIC?O8{ШW245n^_;&$D`y1=8" 1ËLcUJ`urp9[yd,dw-sҨ *˘%~?A.FX]6wK#Y5nBqjє!J d5WF7nFXnvnMN D pٵ+aj< Y`E4XwerFv܎/oWϾ1% *-b&i8!; مy=tUp5`/{])ZqZy%QZQ̋~7^hSNI]sJwo8;spvsg Ť>9PdÁ8^rzr]mjJ(s!-D7РOן/ozso e!Y 1VMb#DN"rOM9*QНˠ5//+ޒ M&Q Ul 7WY@R\pßi@d Iّ ¼)v}bNZ;TխYR,x f7sw!get '@@5FnRr =Rf=MnΨRRz_0n?^ &k9T80@-IZsO\k6kfWKNl@A6YP:R2 >xŭaT1 9Gpq뒘hP]3Aؙ]]e/'`-siDT B CD,gQ̹;)]dp5y"J`X\uS-s*9J%H6F`T ЊaMA(:D'8/?hg nX(SZ)dCH*l5Wf.A=$bO尙z@+H$-B'/\][氭sPf*yoѕCđ<Jzt0)!*SYA%@ZH L<Ǧ1a;wh9.=g%:1&]魗{%HHdpMgK*L\=2$(kmwv)*$DZo  8*\2LOZvX'(? Zq7{{wsH mVNKyT 7F)ͤs@ $^Dz(V`X˘ϓ/9tū^^!8/lOY W Y,DF 8-QټLgJ\)A [gHKLfRXª IݺJqU*Tv*3ɬBLFh<G$xzN2knhV][2ǰB H@ CϬC}ta{1 2I=nD_EDǁϠ5Ơb>(kgYqSF1Chj``\JyLN= ZN7@@ ":-S`bZpJιG1H=''Tg5V=_,ivOXyQj)$k~֯L| '8:)'cp׏(LYo#R6@d/NJ6fd 204o%x/H,3^@D $iCඣھWXTfVU?%:hz7k(Z~ ji\w!zn {]<ⅺzv)nIn]%Xqub"#?У7y׬d1f@#As)Tc 頚wt^/rwCEe湅/?@|qt:<)d[Q-"5l܉ǝv>j(`mR1}1td-Ax5&߫FXb'a8^v;l~_Ǔl<}YO q{%ev9wY~r4 o[VcQf?/.?x~ ?b-ypWs]d /xG\sxh6ُOŬ$uׯ_=H_"5sŧٯ?[{qd_Wik`ٜS1u~I 4$x+-:`+jӗ?~8[P;ach4tsz*tJC4R) wE>n`EY~eNtr⍠mE{` Row,*5#cqRVmeEȯ9FES"ٜ"EIq$,''僽Յ̂ѻRJrgj<ϮbdVѮ 1qV*,S-Y5qE"cÁC8E)%(dB#qx'Lqlb?Y[@[mA0 Xx wRx*`Z1+K%kV,F:l,8HVx-?1۷zWvg)av΍B7N`Q6?3?[<_g?x>mߝcF8Lxwc0lT_FFčtcba2[`R>_=5:V:֊*;bR}yyԽhE4yZryt` $U8Ę, "uw-6i%hJ,$$Gη 㕝QJ{x?fAE8=<OxonDm7hE?_!njt>B8۠A`), !Oe}]qg:x Ra@"@TY&P^ٻɝn֜*b{Ô^cPVۨym ҁ/>>{x%If'FAaB t yGV#FoAm/,yCX`Y@`$ YI ;9b;F?%%R+RkFI 3|Ùwu=c@&)l:!yɯgm_t l@큰 ٠rIH"33`,Bi"ʝhxdB @S"*H>+cȌWX+C̠HֱDxy:3PqUtKړyF}6\]Db".0LȲuanNU8-Ė({U.)1YHF0k8\Okz+JFxeNm-"F;[ ΄nwQ} a` RZKbmN.W_8gd UYobu- cID&yU.Z/AH(W4F=c]ucت3Qjr! $G̚<%/(sȕ`=zclcmcŘv1AdhlsiayN\k"e{ %ȂiCIl0Hz msGYtuh$)=FmR"SǗhU+Gz#[碈%1\L>-_fh"ūZ0f__m<Χz<~)P_[ czC|]zϮ?*ZЯ U2zAA̳kj^3q+ w4+ڭzvKja5| &^xk7[B.[)ݴ)q^㝛Ҫ}ɛmj'ߩxU 'B, l'@% ?~NwcKVP4Y>×*c@zilҢ[" oBI·;cT O=T=6oqHA+5} w|E%dx`&h xT7U}wCWӐ9䮊Fʮqa{⨳D*(Kyv2kb |HlzZճ DK`IU!֐6:cmUmP|5~,P !H$<`' oLY֐R\7PH~TTcx܍&s.4."=C"l(ɬ:K$?=Q0"_)F>)IE9p"Ĉ,5VeVu#y3vN% V[OY]5@ϭ&^*%$OTQ 7w A-)Ès/aAZ֜ ʵ6 nC@[Fnowb&yN.ΈVh-S ڡqGc:/Sաbf,7嬍 Ls&7J繐M*D&C"c$3\x='wDE-F(sUZw .t]|< %Nq*o!TcP9ɗ(o>js%U֖K,^Ua}lcN+OVb@2삲9\Bv3>t6uK؝s4"/X!7PƔ*ܮ+A8%`!F\L Y*giwOYYyЇG}|R ^ߴlȌM 6s |56%Q)]ӻWw"(yVp~r[D0^\7ᢘ*%ZFUUU},:3mHv}E_>U6cp)M#3IVhJK%5ˑXU'~9ZKkC6oiWtUy_K5iyI@uQg.zU68|S>9dP+,yR0Hk^4Pjz~j]ʮ VŀFU=+-M霤%:&*F&qqW4 I{JQ   lAX5O?X JʠI,q̒DGЃj_☏}7t^ak[&O$@Hp͜(UR{l3`T l̈G@Id/{t؇|ӇCU@P=!?j WLox;zoV |޽ݟ$JY1UVLxGf D&o VZSg&K/ng\~U00O1kesfZ-MA-Ds5nf•:}q_U| [wLK08;h^D\ڕO$2L*!0oGVB`퓁|x4w[m%)dȯmn*o?tJ1 Fk `zDPm.Y'Ӧ+pa:"qbsi^Β&Fq=f%~ss¦cOG/6n|3EH3QIR B6JJ30cP$z*H"3Z0(?rY7;T3fM.z6K@pI@brqb_vD~%B4("z*>+cȌWᘱ޳ G"5KQD"܅J8`fBNffp܎ю8pIAcV+T@B*s"錨6e(zZT`>Q]w/6F2Y]1q$byiB:۹9Xjл๊Rҫ?#7[K,Uo>bM8|j*E =UZ7ƺ@;P~a"NImSLB2%_Y+ d7I'F9t,RX38N3PTJi#:sd *%L9U'rFlWؗ ў 0K#5М((D[N!F#Şކ=ZO,&]JIbdLS^'!(nU[%L&[WdJ40u;mۭaU7Ք,e>{** 1$\ s(I M4C FThPBr0!-ɍ.&bʏl u” XJg(0]0d MdQFDn8szL B2r4PKSz5hhҹCW/h %yw eU@isK9SNpBKYO)ܣ);5IkaAK?/gj~iܸ]泑CY|u bky =Hp{'h4olLgi{w{vڗ糩]8Y+f65n\,Cnؐ´MX_ZP.sA=Pbų;_wŊ+tws5/T\k ЗS:{G_n0Y4W$3}컥w?Ok;k=a>wżG8.Yoξ}!jTb Q;+id~Swe+5B%pb=paL vGf(e,d<_*>w{՞Pu ao| ]^WN/QsFͭ)n QGe|/eߝwyg?,CFwy?i0 0Ion\+#WJӄJy|)е0+!8]~MR@2{ߥ?:x%`KtyVMPק0bŠ*w#R'bBvH$Rv#s $]^y?ֳ`ϒ\>v2n##@L؏peÕ@JoJá4+vk}k;ε'!뿽$VoWw}DDTQ{,ZUN{kEI 1[m$瘡;/k H}/!nZ;]Ɏw3?.ي#ZYgm\Wyi3wr}yl =O. R8̞^WZ 56pf<u]IZB12QX*+[S"1&2득%Å^6ˌd d67@S^QJt, /3u F$lBX-R!BIZPg$gWg]FK2pxu ~xJ#].pV=AsvȐ,];Y*Q5**hR^Mr"WkORk I"!˟|]<(S$˵#$c#u\O)H.lVp rxJes&Ea2!k2N}u 0!O\r!qZ(j$-+3wIRJ7}W[i18;0wW3 ՌsXt+EfɲR74ɣ%?֎$x3V00,JΑT5Es-/6av^?p`{ _UP&Dc4Q عh|}| ZS'6tT_~|߁G "b)ֽP0´pF)dkRAC4pV ZkI+xu)I`D}b9Vpvm ^P o ] Fُ-";+\'-{^2n @C c,ʮN!VLΝ}' bཐƢy%:h DX4+FvF2RB 8+$QYU6JuR&O&8Xi1W# r+Clҭu ~ht$4 p4)A"N'Z6mE~EMZm0P N/̈hUXSeᘁ̬@50V7C E腛kje&8VYw0%ana9+C-2P @r/?YBryc}08N sj{X\4X4TN!u;gHysc1s4qZ!L27 `Yg_\]_i?(M2$͒):[2s}bGszlQCc$EyPa0c sv'e5j`sjWHp&R,H7PJ/?ʖf ΡsdGz ̉zԩP d:V4bNf)6EXeLN5H2{^ 8fO^*&;'(fW#~RR  *"F 5P UBGgq`O'tnu`hQ6Ar_B>z '슺FȮaҟVh3T'f.}d -Sv-Ѡ8q];0r25|$(CtI6V&Ͱ6c!50&]yv֡L$ #RLH҈B'JLuIÎ9IQ 7o~k *H8Au ݃96Tr_N.8TZ'\B50ΝРWaF B>Zvs,2|C/TFSTličcV22- LgP,`4>k1Q5ﭤ}.׼(4?O@ ] }X<䫁۫O4K\ L@)md*ҙ9`dgBiNP}~C/T)K8bbuv:-ShRZ ˈaP:[R yV/~ c~ ~XZr|Cqi[^Օ0ŨIJ)rlIR WgĚw潛."ew qT"AhshR3x.Y~&4ZUV Rk)+!,Z.֠q(ʼn't1C:oOםVp!"l{jfTu -KʡTO7ԉU&7n{v%fuf׻vu{ F['UyJ6`˥E̟7[~/sg+}祔sU19\Uyo}^~\{U6[^ j`M5췛{vg^>uIYfМiPt<9"%Kw?oJ%0ۃܝDn?nꟾm|^v_>'hOHK3=ܢWw)?Q, S>k VZm s)fVmV_t[)o[A۫{qml3I~ߒ=_Kǿn6̣{tOyUm.~nuTЮC}Ζ{g;AJ'xVNJD"6s!NtElc'B^̓5^nܱ}+K-׼=$C O޸H*3 L!*Nѣx *ߣ;ٞ/3d]PHOz7lc9nBb@nyo J + T:@Fm-ϮJqg=Am&xցEUS{ȫyc-Ԯ5a(oX hrn* ATB-FKUxg T /dů3ۓc}rƸ*iب{y5yVnjB m'rKhnHP-B5v!L!EbeCԞ !kא7S8|.5ܶc{awS$D`H@@Wxս4qz8V0WjA#+1䝁(4+lUK4gv V=B\]?h%cvޟߺPڸqۃnՋc-K$˵ƖwJ鬜) 5m@"\*gY:TĦp|Fes&Ea†@!_si{;V>\yⒻ6׼HL1-qZ(겗/ELgN%\'IW4䝁R=.5M~nqdRp3g"CGZj.2tQO`m=e?ݲ״$9+aXaXV m9CEjD6x?^`τ<,t@&$r귑EYѝ~ySG4|& oDX҅ܺ ~8vJx3gmlX$:ff(^T=mJbC&aU' L03;A7t^B fq͸nsm vH3&>(x9Zy% m?۸DrD:^u1ĖqtȮԉ`lU嫖 g6J۰׆\±w*?ϼkz{`aAG0q ^@aYnz_ݿ{8Zi5f{ 5o\mƠ2ֽ9UҿJ7L\5S(Y۩Hӌ( .[gOU9VҐGJocG)1!]['ۨgv 8+m$GŨLl<,/YGҖ-K$]˔-eJbJLRF7\V`0/"ҳ(J,'~R5عX ߁#៳GWR(]ԛfCJΪ|D@*I-F+! &_B]Bh'fM nj 2˨^ZQ2LN>gMDkȝ9kG Es*7ÆJ^80-V/w'KF6 uXZ̴qq/`~w3Bp>Y9,5@8Gy ߫3_t(Dz@+aՃ+@R l\9E.:kRu~%m,nV2dwGPw9/a0:]KJm*äL-͙hTwz Eyz~BQ^،"d\?)1uz/>_gӹv+\JjP-HN/}"I _T&ww?{2Iʩ1V? n1If~2TҬ8ۥ_ih5~ fvS+ŕSSWhpXC}qι.D7Sxӗ`eJGf|>?>>&[.ʿ mJ; bZNJGٚS%S9RxNgHVG:D k\N"%coԘt!]R:rIwv:7zD*w$VA^xzTL#S/!-]&PZYoo|ϑIHxqP۩Hxx@F1$NZtSEtxG.hh޺ՉC ڤJz|W_{6!ֹ|& ng<ߝ=;>PFHŧ؋{F^K3 4 9 G[27>6.WLucLˇKPJ.w~v Q cHE5돫ֶ^F܇;ix\6 K\{`BU/&bx`T;Iy}>31-O l? >Gjp=ձ'XYp4:$]n(븴Z5g\Z<*y@w9 K#fr7'Gyak-H붚(Y*(}lsp C]2Sr:vqoNJE>(}.JFMdpjΫ6?oz4aT{ԓe=}"[E =blSn>Db%Dx'pޒ* Ԟ]kw ރURx$KHΡG:㉁%= ȗZC:Z5V[s^O4 Aa箈3.WXŠiCK$\$w%ؑ}t‰>m {r ԢۗU_r+f ]v'oQq!tJ'FB <[˯]Z+ء9gPԄԢh脃(J?2ٝ3JEI[cgUA^D*~ۿOݖj]~̿ds_LOwz{TBŋ9+oTjH0_JZlT4谛#I qs#H@CPO߿<۟IJ,0eD=U7rǟ571φ6w0Ca~J%~MTև5ǩR{jXk*jv@!ly v 2a͟o7zZش9 rjr/'m~kf2 &4t33r_j^+e6M?75JY/yGT~i qk>L6~|aF[Yla8T! @)_ 9 dR̢`++(SVֿ-bjj<nsWqfUxŸkMusM[?dVZ!+R;FȨU[IU.(7 ƚj۝Ĕ'sDHPF(p'I G7"g:xy;)kw),%b2!@P-RL N);Ly0H*)I+${˺@4ּRÔ;(d !0b%Jd3@+61ioim')e EDqGc cV ?nVH;_kLvQ|FEXr9%SPĂiPތ1 *v,*_)OVʃΈ +% k]彙; kUO/ŞS9į5JW*#+ r)O*T67۔rɭ P7>$1rw03rUl۔:jPJ^ii6y@,#|cbBpmRQNfS%Jpģ2(-7ndR3]1C"0 f+t Zx]:٘>)KUp ߄(7j@yK&(r <{;M٪KC7䲤BOBmUk 2F!}'0U}YaX9Rt-y| "hp̰ "v\ОX.L ݯ: [ZV >6v6e*11^ouõ<0x(d5a^mw6qÙUASb@p+Q}I=}mL-ZF]9kPkwqX\J"D1oxEN*HJM* vY8nhAS%Ft6ܖ r6䦺xs:ƃc9^Ww p/e.W,A휻d9:_ { G2~䴂1L¼6Ti/VuN,D0 Cmդ%ʊ8ƕ+Ka\Su.g;-ػ=O9QynN4mOvu8ګ ,-1#NW5I j1n.nitĖpE6BPx 9^;nC<X4 +`EQΦsV3*(^K3? e΢A54/"TzwW(DIw(%zS8GKU-D6 LMqw*[ AfXU;jDl8)a`caJB>w~# !C(MkPmaq3էX2Aԫ*M;`bF>XS;Ks!{ID7ЌG-=yoeY-0IYc8 _!NRbO[s#G8֦-U^ZVS]lu,: YFd,}R;o!{ΚQ03B!\(jϸW~1&uOr"@#ZWȫ~ uʃ_]\ΞQCgq$j|LdT)u5dy$>%5eW!0crĐP8P=g?}Nv[ ~!|* h攆#^i׎ڕ]Boǁ ϪރeqXrd✗Y= xVuWKaXx c8pzV>ܬV۫=mU8_Z:]rzWn֋m+_Xet9AueAL }ސtkB"PYl$ T&v$±Jg\>{|pl_dFA}電ܙWAo(4`BAzY6_4RCcOߋiC /}혂[es&:ϩ(D1r ~qx:5ʵI!u8MpRHXR&JN4χ8J̝TԵ"h2Gc ub=_T/^#JA8<Q&5|ConxU_9KR Zz_N׶'ݝdΧ kUhPg d]QTPLuJZin~ù͘uغ4*V =FT??h*`ކf; 8<g*vdQ ~} ,t) Elt4G˻~~@)gTp'Ѯ8.7H18㩦/.DZVd %TR^x'pȻ->2Ek 5t^ǟ4W/e`#Ф˓u̼,7Dk`;)e'M/3c+)ׁ&ҍc R=U59ՠ1>_AzMF9,֔HD6_445e&4yz.7x]k W@- ҭ _xϷpF# ʇ$o5sCovPc4Gw]^.4Aἥ;Ywi Cxz4׶+YSkŗJ%퓚 | n2֯.V]L9= RYf,hWDh k jk&henoUlJA5\8 UJIƄ)rv@_V_9+d@A@yO0R2hcHo8#l@cws `e?=tshMkn~5yqB@eEXc|9 9O1܎Xl νd 3MAw4I8] %$X Ӵ(oEɊqLCsiji(m0&Ʃ*盅)IdP.ԩ~=Ũ=P@1NC4 x+d#i:qy4Hb<5nj)C)jrh&@V2a&omtvgKZt2ewfP,bd{VJ+ |. Tcs|taǖvww(6?ikogY8\³YiK|i}3mk|ߏ7m=EdZEeGЫvBorKn"['S6L̴aR\ܨ cpeMA@MҭH[hh-:vö-eQ=Y(Sۧ+s[vu~|ܩ;mvGqp> =i&m^{O|f@(x \WcS#cד2^o2tgެ}t{$oϧYsѥ FǢkUզ㵜GfaV@g*~6~sF_MХo!mJW9s7U %6NX?G}Ij/`l T9IqUE6eB[Ie>MdLNyD!]F&ÁF&6Wyt y,rL%.6K$= !2I_qQ b1~bϋ4#|/&d{5ӈ"E)-blA~dӭe8gj.oEχLχ} ?*u7c|1ΞgcacI6FČz:vcnY,{%:9kw52Aq%vK$ftY[,GȫnD2bc|>_{taE:KA)L>fXϱ3pV's; =RSYNUum:2QN )7PW+Лk ޑsg>U9ּj_2"qwU659yz}cOp#a~u?0ۻԙřmŸ& <3AA>+s:q[YnPÓRF>_Κj[T*Wg(X>7{knVM|guۏwgsnml`ݳc:r?'Zؐ?T,|YN*F~翝Y}{X~KY|LLmHeV4Ɂ^m5)|68fC`[ ZE8nw'3#]`0.~:CI ȞO?ӔKpeYPS`v/-+.y+E_^N!^El' g໠MFo'}+qlUep&%yJ$Rf7RXL!l~)4{ֲvd8Xŕa]AjǺg2"Az):=1#o re%#=0wp}By@I#UBTBˀըȞGRV:aCno$4jȵFh5@~A"}eד|lO)ēVy`@Hi]J'Iie:hxhmf?33]ӶU}tq;IN|BN6__o07aG#G+.$VdS_YD+A~~Bpw'ϒVJ:1 cc4N&3i] &u K\=?7A8.j%0!A &8嘇ÓV0E0I{Xnco$x4w.>M4^Bj'!9]H\v#Qj8g-A64aett`cƔqz-oigNrR8=cu|ddJ_)C!;q8YWʷ3mRv|WL@ ~z2ZbåA:1Ɓ5'̳); Xp cT5nAKlx1"cT0I,S^5IY˻Ɔ[M3+8hKOrib;9n٭hWAh k j kpvPfv]>N4Rbk=*6,.vY)I+zwwgvuBX`fC>qґC:͈;c8-v@+.Sw5_6a@0RpJo8#IXN;6ض%P-2(M+tcZH_qmf:0=x$( h_T&PIGUyز=K$曤y9E|˞H(WU_&pA I9(wVմ']*:u%`CamR%i_7Fv;=/Z $=yCW`! iX 4̠]VBsfY5$w:CX)ZV,Z&邯u"U#-!$ HB<2/k1Z~@v= > ]>8S%)op% !UU'>U:[W׳jr;c 󣫴Vm͋8Ӓ$XM4&m #g&n;= (uK2$z&b yi"לsD6A\DLkyF}xn.P (xd벞s i_4ϫWw1?;/7y:XmLځokB^]<[E;=[էf4/ןo듻Nh~@/y yWݩoKS5؊T-1A`Fb2cq2Qg[K|:wŧ;9k={?;ۨw9|ިr ;Q?׸ƻY$Aā4נ%Z%00eS F{L-:wǽ0.JNeP/R7]QЗ>2I3Svz`|[kArS 8Hk&>k'7iL<> iS y)0)KƯI fјji=P{&T;|i|1 FϊWZ;4lD>f y]1ݟկg}n'zɴXcy~~o..Lg8;֛}`;󁟞09;(UW>ؔlmGOyZξ>B>| /D'[P'1& ?y`9 TrFוg$1Hu$8 Uyr|ֺZ_Ҥs@vceG62Ս&gŴ`/CAW'<'@=x-LTy6Hb &_o%ʨVU͵5{ u'me/2M=)uAܜM?3o(?f*tsC:/uQwȆ291rP*VY+h!T)4]/{DvA@Z*+ƭ 9Ho|9_HNKsOWC9Cs"w ,;*.=`U1)%$g0=HLcMƪȩrAqÉZt<sUTK;[sXʤo-sq'SsfƗo6ZC(ڶr2xA< u[Y\+VIl_ sSǤ+XTKj,t&F/-o4U露 ~@%@ :Ei.iD#RA>>`Egg qF+'x$5/p\FH+x6|܄ y&;QCnHBW2 pZV1bLC)5tKRBwr1Tz(G:jRsBܦ>NY+]Rp܉%7ߟw *ޓ8ŏ9:W_VzʊPq$O)#lfZ A6U$+,-*6.z~k4<~r|FO$|/&EvpsyU^'VvXWVg'4'Yx'4a08'Cu罻ʃ@^aJKI:̒a;!jWr]UP"ZJj`)Z{MNĂ'BD,Dc Ic$:IFvu^ּ*iPB0g'\A iYK4OqrL7(4(]<)I_u$S<mqWW^9ƢIݽYg$ nOw#WGv8x 7]VRh֋'>V뺉N齰yS&dp0<g޵_)9kmJxh8J 1 Y$&ѦW%v.2Mhe&T\MT2y5:oԾ 3]omРy%$퀪1:GRl,6)IVY'R5Ctk,Ԭ]+)I>յ$ `9\u*X@P/T]# !'̔nn6jioXEӀ"ܩZXȬ,56od+**-K$rm )IW+Q{kLB*ԯNڟ QK:[iQS ho?~4 vgvaW v;`WʁNn۟aG "0qՁ^# 8L, 7a_Hlx#E=Nٷ& !Hg!mw3BPdpL[%I0ި 3{t=nzǤc >_/zŌ${k`$[cQ(7 X{&W~r\N7G8\iG~Xf+,9HogΓlU0;wiK0V!8YGY0z]?ZNbL$ 5z٤}bMCr>u# 92t?̢Ib)Gk}H^0H?8[|@|)ps}|c-c(AHo,}@>Lп>t0nrF||D#?緫hes*z77Yi~~1)::=&ZA) +-EewF>2=Q~r܉3q @9P13M4v(8=)E`?{0Xp[( ,ּ 1E%gIbf|7ԣTa)'iv;!8l GHM+~7 Z-s 4ܐl5zb)Jvw|6\mLѶY.(:mb:/Tcn]w@W2J^]%7AW2wFVk{Y5P 90uhЗ?e/>+Τ_ ,dtk3c#beL(b r1 2hWgO18,=z!(x <Rq42 >Ogq RߔN÷40tq@<]}\0,R2]v\8 c6YWW>{z G|V@FF' h 6Z7RI쬚^4,s6v^,Ί  gS5jv[lRF9akTz}'STuT$'K3c![ct0X%!\qФ㥣yY܅9A#fe7ER^ #,)1Þ,TOW4%K:O؋uBi%ǍOrBMa/rs?%W2&˸ё⓭AYy5S)O ⵞj(:&&Nt?[`5w``?7R"\]MٷN:$ӻw% Nuv|Tm:vT`|{ e`Q՗U3/詉m%g *`c[S''2e)?˲&|ң,8)%egɍiZF%W 5%nrhbD4,b*9=:֪q{J\阭,% 뎝\"LhU)2b -a-⁷JnqN-w*6WtWDR P`x›Hi)N( jPkvcJQ+;5+XU/WVճXF1 -%0R+EHz:/f'l/jr Z Kn}.f ײ`5b]dS8%H2NWۣ; qZYB`aCDO{¦(ńT>X! ­ܵ̍b[܌֩-nj1ho0GXZoZRtCx\ሺm۵ mp~|fhZ%>ְoikPem !OzS[CieeWv{??̾?_OVw;C0.;xn?onP'=V&>zF[7m鷫~/عXyh7 b[mv3PE FčԯV7[.4跿#ѫ܃9ۏJ+/6W?,O?̾?>(Ao8|S_/.I_|ノ맬}oHp.1c>ېβh+VʝHǍ]n,77)s`꥛f﬙R{d}F'|?bE!N=)h*+jq-eA""g>RZ~1?w6}vy}]7qT,ݗl#6c4>^^]7୮O"e\jqn<~sUE_6q yg_1f 2b|} E% 7BFH8|d ~ﷳ`&XJ*+"{?6#WotOM>XEX}]5 1VL7Y/Q8ggUk3AT3۵|9'!$LcjJk eƦ$1}ֳd&K06oXf;ncI㣭m{*ZeܑCK9L: > M|iO?C qN&N1}j >nnRaJj$vm R<<tPJ#ZeDYq@4l;TNXgޱp|aH|Z;bb)D zށ@{Ǣx$}5%R9f rLW-3 VT(ksvHItUT!<|xsܢ5\.+~0e_RixVnahͪxb͔e?m:DW=?Q$F+CLr)v=XL&N%݆n0𮺛av㟖><)-`~cx3kZrS\iV(;T$>fNA+aן_t~*SHF(3ylxkOY'n w8zWim\Dѭw@>KQk~󞗜( ic"Xqfݵ\3!=BLr零?vgq /߯HFMlK|ȹNJxj-P-*mfv'o"oXߪK)eV߁"39R[x-)$ʒqAՐ(3Y%ʃ[=BH]Wg?Z$zPJWVk(,I5r x 9G!"Jؚ΅b374ZBUF1^L.jzq7tٰSIgxs;dJSV݉0~Vt; %XEl5Hyp8ZVa0xbe«{jo1m~fw7l:ePŕGQqf~Nn#iʐ ǷwT +0C׬ml3l,TR326m;}W4e}xh ehwB C5dm06ijջ~1$Bǿ/RVcNe Q/ ^! b$ cl X=bF"WzD^mz@GO9dΤ\О):f }{HJ$_ZhcH9l1x|#ɰ#dHWW)lLqmZMCBJ憮\gB󛙝-]C4A vg}~ oo߇;0~_[C/C +8 m.n};$ҧ7w)6 8kkk`e#<9BR6dFh- r _BREQֽW[W./rw2YVD*Yv_Rrm PANO(LL, 0Y|'():&GY]G5۱nsQ]!p2Rgc/aaa8v  6(Z"y?4ػQ.uX|xb?\HHJgrl'72\fP:볤BZ.{\p;m֝CXݴNO:8-J%hG< X5DLa`/Gb<  ɴj K)K0N/l#l 1)t1=Ra pt s,uWa|@op#* $/}Aw>RS#3:rO_@i|vΔ/nψ]"wHsIb%:L RlD|!;J.3(ᅭNd2DBDBHrqJYЉGB I|-x <뫼[Ү]Xz~3,Q2)X6̽aװQԻlWj{|&K{z1\:?M/'g/˘%sī"2 5VZHY eX0}/$ӳɯ j( d/`G"x: $1n ˷t,:ߊ) ]ygvYL 4eQ▗9+14PZL " `WU{ó_"Qk6N;ױ#ّΖ¢ [yz$WRk^dۇ}@h6~JK(PSTXz?gaThXij-G𮋥0E&-)M7#:㋊𮋷tg rBc_"g ^~O}@O՛j_#Lb6f4e<6Xaek]w {l+6Y@zE#tK2Fc۠H 13c,KE]+HScsr-zut~sZ.;'"=%`g)^ ^w}`=zI;S>s2S%Z}[ ܪ[8h,6MQݾFsuC}?bP0aGᬳUBSWSR4AJ͌t%k,}-yQTo;*s(%$;vKsfeV8ti7$DW[F<={ Su71mh J ~!.igABtʝ73}rǁ|:O7iբsWH2 =`L”CM0B; - [[Q pSElu;Q,+{0QXC--(1\{T@Qd]wU ]$0&S!7Dqc}Gl^?˨l+828`RFWLȺlJ^䈄N{c,9bӶu`{NR5;4zH޴_X^p/qxiNTJ/ B.5Aaȩ\hDӬ܅爔{d$B'|wK1)  %5  sE;4B]a`pB.~AFdk\3 ebÂ[?UZwwo͐TFݬCtT;>WIzPζ~f#][.hjYXjqy[Xd]K Zbx¬H s"b)8Ae6X'ҝX YZ'Uf~^8^5(„!!K)kBŠUY4.8߻yOKC*t(ejy8,-{sM9+ ,r -ȀR">mn^o5; gz(%mjLe1:xl ۗnB?iZQO3Hc&)GgL0zrx&hQVMZXN0 "88òhD҇ o<$%-<`h&]14," ,ro '~61rkLMu#^CW&= \َ{]R-[ݙ%$?7tF%dP.qi2(.2om\ucm+%O0V.BI =+1z/Qs EiZıREM1;UwSn Cហ3$cW^=}CB۹kTʸuB1]¸/{of0no٩ݖiiy5Y?>n>ȓ~a&)Ҍ<N1cN 4[XȒZm5Ș<ƚļF[j F8b/[BrIyxiFd]RZP?Mܜ?G] pHJ,R%)$w[j~/ћbF?\+Ky%[ 3*mk5H#*I帇يK=7:tTX55D{.Beё9M9@%\x}'2VZINco|mla+F ӟ Ɨ,`X&$ {#2{z& LmV<E  Z*;o ۴v3D=>{@HYʋXT(TG,bχ?#H 7B֊&hu:[ǸSN{iO=!aH4vE5ggkSQE#"'1)ϔ55u;QV 5A*UWMkti{_dqSH;7o^ CH/>.sKV/ItQNS9W-l㗀,%?=Ӄ-lҳX:['єY5Χ1PmekZƜ[bSVYqv@ff/6O5M1HU5Oȸ\SKDz1 #ÿ[0?rRH2r%(;wnj8L Fu B 1 GwsID_B e_IZnj}֯,1g?=e.ڪXú78oD'RB $\Sl#KE%^.4n[zn2nMP8\NJ_P3߹i:94wC#9EGPT2:.|kzѯ _Ig˵DZGO 22'Ŵ:Ph0Ll]D!u $SkG\3~fT||l~409S.!V]X;iX3K…K =4$^uΥVz-:+%K 5'K%ME,,4kB_RX5t yx;TIUJS݉9%FǛMoαufs#_=B$> r}jc(Jʘ~퉜3E33,4VȸLU2&Bw f/<_f&r{S2 z,e`c/Рd}!y 505kQ"qUvmJ _j'eדtd_ .G0E.y!u;pBPu; Y#TX 1msRٟS|@[o57-\'؜ȧ`uOیf~6DDlw[.o2{[{y[ j`S$b!kD6@Uj߱S w0;%?!K^J̩8[0zp!C]Az K.i0$J.;b U[XT0X;?wZ  +ޯoo"߷ZI i >#kh,6M1!5N~۵oGlYI>SV42%# 6nQ% ъ 0M=g;_?diZG7EsB5@e+&"NϪXFm)2]"NZT ac3ݍZ[nQ?B=*d|Cԡ^6*D?5}aY9Wڻ`Og0⊪Ձ_S2/3&\lw2g4{K>Uً*1$W v?Z I;LrRRBsY.yVOCOvuF`,-;iDYN/4煖VT^9+N_Hθ:hvؐS$R\뢸 7w)r 0%55AXYH  .QV:S2 (~fj"MD$N.ZWrqIvޟ'@i]w6 Bjk7$4 9>~3#Z]ԍ8VaMu2èAGSoﹲw~ZK^[+D^~鷫'n$ě+D`l&)Q]B9g8[JyM)kL&hL%ʋ] h#C@nzGb,|Q]0=UN֔t-ė>ËqCVKi,SKu'ml [uӾ?m;kvPX'-u-z< Ns[<>8Qr ƾL+[`k`p&';(58[Rav pNLPz zd1ؚ[У4DJ-j)K7Vl~^IiZtc? ʙ)bH8ڤBCRRWSKl_}LaP\tqgM!m[$V\|P]cEA{ߐ*;M$Tf]&чs4"H~0iVN0HR_e?b+v. 2AnWsdLV-qN&+^͞B kou aopT QqY̐usO;ΧfʺkA)&#Q%dٰ`hln"Ou K޵nlj#wѱ}<vxf~E;M $hw;NBTj*ShF Aip.tTRfv"}e8!܃b=l.𓹿ǐ9zu?rᩴX4Ւ&قyQA=*?SS&B_%TX~W$Wma`'1ѤWȟ [J/@Xx$܅ؠ\ bhm0*\bSb gg}Zrz.5cv@na2TK)݆F޵L" jZ5_懻qj) @笿_D*qÙ S &ـ/?s(Mt@\/t7 2_b 8% BGSY5>ۀ ?OijH3'Sy{X^ZjoR؞ۣ|IJY2)4' A;NW46 Ӭtqzzax kb@,ûiwybwuRR,W8'[TZ3ܚxqRWRG76n*,+vh0yX4@j<$m>D4vX˖lyE дpbzXL94qwa(@\iZށFLAC4]UQŏ5GF!x2!% k9wNhQ}z3@}H QJPI4 j%5Rޅ*,~ĀaG<-6v ;DcFHdž'HK07Ͷ'7V9Bi潌 Z`m1ѥ3&htK]P=6ΒĞ{"J0oXҹ IK+#܁uo}gpйàntkwQs3[ǻ-dt@1,X_N:vuKbcuj_JJ4At/2v1aR 1]ۇb(8W5LL2V>?U}JlpjtHBO;pA6bopU%Mи&hB8 P !4FŌ=Fm,` Gg#mSJk']0"W|u+)\!kX,9̵24ĈJ$VDF3 ^<> :Qf ",'K$br5'A&XPr!7 FX|OXSxX)Q;q9gNuaƧEg %Aq|L}][,(Wkq=bkanHcfb 2!f":uՕs+~PGcV9+Z,H}^C#̪Å0 dn1QEhj-8zy2rT4A$%E^ӍJzKʳ@pq Ϊ/VF #e81ZS6A4 x-zFVWV**Xh~ ;U(4Ac<tހYRa ;Ahb^CI8X ZkI{,ߟܦU`÷~{_'Tz3*krAH 2=B&rŅƅ}E>DƷTr͒o7V1G.N‚׼ATeKэ(~ҊWߚл{[T {1IkaUO&[ۏg ѹ"zvTd; qܷpW[TϨ7E$ڗ;n$KxnY G -pb#J+}š"|+$%5xe,D>PtƇKЦ3yo} Uu tVӭnO?N<)2ziP)"  <}jE] 15ݝ?mfŤGB5M ΢鎰:0xs;/5yۏ%pqJ x˄ѧ,AܨCH?UF2:+SP_hP|SeU]Bb*!S4_\l)x23I!r,{dNߡO}e0fIs3Qmpg9- D(p1&+0 PS(X#ET;kt-s<'S 8 Ωһ ѧw>,\w KW*krED* do)7 իˏt9W Uz/@wPi#Ϩ{&kY/#!KKZqMF7 ˏ򁝜7:8q&2xC,ԚI˜b甯 6|TZ>si/qPDś(X *|+z(.FJע+'Sې/?+ѧwWx9J襴vm]Rgu1$z~,߅f8BNr)ㆡnMhXChTEpg NpiO|)]U?)h2!0>H4Ūߙ[)zy8~>\:fX!6y',G)dDTbeTh2 gko2La6]~6ktBnJބW8WK۞nM?c oEeh[7ήB`)8w6c:X .]6-V> Xc)qVhes2Z&.;q덙6`H6[ - wOhY'*["^O)Q-e#a・K.-j?t,,**A{uЋ :|EE.33ePMBctUcD%)*Dݢ=߯[rk?>x D|P1:SY܊k̯Hi) :!Ja.L8q Նyz99;t? fL<4o"w5,TNaچg WCܠBd „iġ?N'[)4 38)mi*Yq9LKrHD^ B91oN(7e@bRY]`cXVvF j°c!VD!^( [s,*DPhvl?_}ƍǷu|;[x 5gƿ[X~e/%;kZ G2En_ޏ9/qo+4`I1J8CIQoz-㏰⾻p5'.\Bn|΍<[1ӗcQJɜ6%2X:[f5-!>=_>0FqO_[ 13n*ONY5c1ᢌSr0`YhBx[`yrjwWgB"TFPO&!|NCB(#8 4#]w=Tr=ςa!XQ \Ʃp]PQ1RߦJ ]RJd8+T˧qJRU.Ԇ _ө8 p/L.:@]yO eo[^khK#>_mݼ%\3]Z5ٴB6)f-%k(p*K^_lTo+7Hu8PPb X"b8-Um!k&x~OZ3@h IX̻Y['Wd;l!Uxx畭@a,̄vX3d̰h׸ȞyteVi٫QE?:3AmP/@ Q 0$fK^QQIH۪pG-۱fuef#-iЩ8O1߿|7@}tf& *ٝPIL@c$..8Øȼ1!g|м9%.B h@eoLcj*,3>fƈ#MU9?ρ5bof Y.)e;]bJ]kČuxj^q^ߋoU +alQsxJLJkMX'D D";Is% IsAefN=,iK:\x| =||DRUaBPZB4m#u.$ԗŽzɦ-|dŠ:%q7Cq2^}cT!` ۣM)x )XYA'p77Dp߱/,k˥3Ki˽!T,6"r7Ku|si;H%]HA?R' $=\ DF1eYɧnU.!X"ՅTTBKB=nj.?q)>7m~7mWwf'߽ΞHx˝21VP`4לbLrcNV.sA~C@3Vkw缄6@mB'(ΘZS+bfjV(dIv yڇՑξFgm5:+t7خ2ӳ%ͨ]rd|PY!CEuR=v5[ݑmj'/Q= >#+&/?^_<ѨaIF&z[žDO+v Ѩ0퇵 9tŻFNstKl)kRU5ur1f *$F=L,,UG3n `i0A@o 3&ju.=8@=1h N@V0um\\%&-Ň ק**1flk(a953x3Dbx2mc4BeoRֹ\ os j#E9Mp{IDL[ZZ+pwWz}*lB1*6u.jD  )*zƜ*9K'_UFC<꬀MFdڪ$s+,SG˗I\%`J )I?&~Z+iJA*v|k" ڎrlF/1Y&{cbbx.^0IiK)|U̖3B ux2 {kJ*k[S:|vߤΣkK[lٷsі5`f|4.رl qj4(?YbU8rR؝][vjVVaG$62PF0 Rك.v+%KE#΂ʠvm"+SU)>aKDBe^C[a?pM2ra,tL ^DrU9ӄ B(?_[ϝtK7*(Z82=x F8{BRj8!(qQslac)l&@z>:LC1AΉz-Jeҹ{0:uPg˹v–d3%U%朊ުxPY/Q 4ka~$gSHI#hPq.C˩I2D*]rȉq88%Vh{ 1,2G< lᨘ2^R:I ‘H0YqC UpR oL{Y;{ƅ f4-'BJěTpIOV: " ڞY4;Yy[ շ?yaiYuOYLV׿OF%ϳ[k5MWx,MF~#tQjOeB=oU绯п| Cs)b' h_t=wh7ɷ=ɇVSK>Ue@"|]l ϱ؎E0kee.G`8aun4m?3>jmB.+Џo9=#dBl,.񚬱wNbCQAP)Rs2nn|Am Kiw/ lTϊ?Ae@EkYG'{v[N´JAP2RCt!Y9@"KgY *D:Fuԑ'ɩFn[ )zm26S1fe7S:C\g> Ne hpER:B4?Qt&r}WEJ?wVj[7$5# VB9D!UqU\3rf; 9 UDd lKɣe⪔䴮Ҧ5.S;{c_eX.9nIGk.,kOh{Kz͗ɓa,EZy)#*:\%sF:MGT𞢍3(=.ʐJ5]aһ I$.8VKT/PVדexP^dcKh3XVcrY@ RB7ic4qc.:^QSUּofJXg]I[6e*2NiUJejftZĭRVr V 9b"[IiRWa5jC@8!2:!7KXej!7H61 Qny<}xEvܞߞְDwyPήVl(7V=P8:`K ޭ׭ l!oqQI LI97KQS'5H+zQOuQO0BnBƘzx˴~QQF:Ә+P`.G:;V6.(j._c${;;s+':ОmA{Q2ħoUsrݧHe/ӟҎyܾm-]b ְ LG[R%ek^/L^F@3v[^ƌ)~-҈M]5д~pk*mY3LM)rցH[ZjFI&ϑʒ܄ln.dT4&ftY`F/60T2f7F4(Jd&m8ּ=xM"5;o.?S蕀K"9*$Vtɏꃰ6P‡ ק:>^!VEp݊.,(2!pN?)v4jCGثx|?pbAh9#{nAX*愭7[U)ƣuU VZl.3*8\ގjƚ0dCx3VRUe%tϭ6d3X* T'O1IWk[D:r;|sF0}bߪr|4#6Lkb /]a?5=Ǯ3|$^vґn;}+%{KU+3 Dh*ǜί)=}6l -)=) 5]A+~޸|~Њxx~{z+\;}7+7ŗA]86%PH7DELIY bS[K򳣄V|rƒ6a YIR#*bT͎WcTޭt 4bd{[k[Ѷ)ݚ9Zo['ÈA :  !y Y;-_3&\`ʨ+_9})п:'ɂBL(e~.t~.?> ď"/֌p-U;ra9}ݧ/s7kuͣ躞o޿~rHV+qnFD{[9*^~ا1ṶƪKP&Np3*cKp0v:I2Km5bP6Tu I];/auv*Z;k*hoчƆb ؙZU:NukoeJV$ǐvM l%Ft}R'ha/rS7sطH5TZ.QVAQNO)驆?R,; X94[s Mu|k9xpXŶѰT 5Bؚ@m MЀ;xѶAXrgL7X-\u&Qd{)o ]Qf(޽]>⌳2;.'`h`JM-7PJPEB}-ͻ$%"~9ø{{]9] N$Ŭj^?1[@pg=֠O8Z3?;1gF{"E.!EQpC r{ׇO]x3uDO=t^bˀM'үm`_>) Ψ8o0>]r}1oV(w(kfM5m 6^3͙g.=wL ذڕ#c$ifttFQ&߇Oo@PqۯS #%:9BUPG!)S.O b7oQH;}s٣џ@.O3?&K1p3IjZiaRʬqM{|fޤsO'v kW~x~ڣ ‡:fs ˉ)TJ:K+`uPQN$^K&LR`Pvn' 9&E(gtHNP S^_!|  |}eȟ}k|Z'E6hfZAF! IEUF@܅rB&f@>/g5qP/k lބB }!,rhkVϔF$v:r]3lÚ^ܛV;Ly{^>??'PB5}ۍT|c/l!F pJn࢖/bX1_7~{ VC'VcNWp ]^ 7$T;-ioXvK_}Cٲjj%tO*NcUU½ɓNF^tie T@XmKlfJ~"6.&{k\5saD̶UH7DE+6Χ 5rI̠^mIUOl"+H ^QgS#΄U(\Px;چ}L] v,I\vw(f6÷NQ+-l e xlf$E֫ݾSvĨm\HS J۠MLUaw?}"R! $@^Ƃ!a)|,6hg_yu:oQxi_v jpafLgmzLSW$Vp28t3?gLr K|y+KfJ%S6{~d67SPC>פan5V]2qW[6ܒrkWHu) Oۆ߱W#/oGj#"6{wHr*K5rIsj^{ᅷ-l(6[bk++JM*4zu5[oNK V׺2ϵD+bHoDj=T[]ޯenw@CUȤYΫ`ܲU'To\lٚ<RD >?'4)XzAorұSevU'J2CX^ӄ7 n ;|Ql9g< G0|Cso(90>u y񭡽=S:ZXej^3.5啖ސTo#z$it8 S"B׫nGj(2yu̬[ Xy>8@_›9 (L+ך޾o $-9 &u_p\R@e)t}o0Րԁ݅Fj[GcrBY#jJl_3U3U{?mkͲaN|ZBʐ`.X) lm!r}#&v E۠ɖqyh:G͢fy](1oIxcn+:fl#a!Urxm @YOs znaf`URRCb N%zzc=|;͛5U(EmJrb=~n!4~}~VGz bX;U ?YÒcpP]Pkf [c'7;)@Z%z ؆FI Qjѐr%y1O5cb$~J]sʂXYk8<ïMS-xZCpZصzm9æw:w)3d7KP5 kkq<юw@N,Յ`[K᪓ijjZi۝Waq*6['akY%e5&TGxczUr;b%]h߇䭆iYUZ +]:n5yɂv`NjǚT)j&D-J1'CIy!bKmR2.APR$4ܷ.jSY!E};5ktꃲv5ٽdhjaZk gd0)*Q֛[ug^jndsO _Ue N5t$< t.j +אg ק3f S|^w Fsczcxz-mZ$ѽd»jM^zԈj/ s˔N_\4fW(!-.:Y U_qLow'b yj)F^]ɫ@rR:|tt_iM:6<1X `7aX~W72[6hh2ǜlez 3g{j3&ڐ"yWQ& u1@ }%Q.. BT CjQmǍ#=ya^cw CE.M?zw_|rYLH.'‹~s>:x W7;4Icu 82rTЩsQyS iK7b FN@Zw\.HO~6 JvT9ՄfNe#]'2ҋJ^k\M6B0<- fDҶHX쒔AйyGq@X |]}O㦏hvೲ萕N21hjd]NXʞNnM,1Ig3WUTAd_'=$ҧy4O+t'TK.єH% k'f >p,6lډ}%H顽+5bH9. + K:!W)W69Z09UEdM ▎(A<6Smf MY,Ϳ|-bn@?*V0lHce-WA6au" S|)Z_|a?Ѣ)ۼs% ~ymgxIx֞ ?ë'1;%18Qs25a4uLxvp6!}깕# ҷ! .6o[eyjg,ɯOr,64 "V  d9fYڵas9t]muEFfE0dʼt&_H%׎*f չNf幋U,uT%bq,R4:1ZKf ciuee_˦.7E% k8G{%μzck#K]'vGJ)FZgF<4Vb)yh*㒬)КP- "9b|Fӫv7b?b1%ڐ"i6uNT&oevV䍝E UH%[P;R'_ D6@Eyu$) x׳{1 qx>=m{eRp$~ϹG99oN=$xDf!@p֓XfuB!#FECGCi; z9ХEԡ';j҉t,`UI7ɸtF~ϵ\wyk Zku59V 쑷e٨̖RsJ?l]3U n 򱷈~^DJ)լ A2bK?Ϙ!9qc[DMX­qsAo0Zmΰ2#۠xVF|L-sqށpq1vBxj-sō٪ZDMcyLoT\cea>F?΂&17@ΉCr#|svRc!Ll /SkOuJ EI'VJ&P9TRg%rWT^xY8"hcv_dsN E'}΅qxa=ӧ7Ϗ1Gd c'jiз,7Y)=|[{>ZvC e)tԬ& itܲ}}'P,9%EFP"jE"xke<#?P#R~ͱyq|1},otO'_۷/<*1v[Z|.{ɺAkK A)9, #GPr ':(r*N5kFam"zR+Ymh.RWeSn~:>'g6:f{(ysW 2́7q:F&)ΞڜcBhj;c%e`|k٨ 1v*+yae6Wվ05GeA܊cFE9Pec dC1yiƏ1~U7!+WE /bTIxt9n ٨^K= AڋTaeB{K_/:'9ڷfǐԭgmk:*V rTBVڶ`G:LNtu 1u2+!*j-ɒwO!\mU\or-eS1 iQLXF}yRlE%w^7R_utt֦(GEjiɐǫM/"'gGH\٩'Vf6={ ~-skiCF4G192㑴=9gn l\ z71p΃1f(Վn߳S<@Pē}y i%K,v˫8ҫم<8㠝:kv\P-qlqR#լ#7/N60'OȠ9c+a>~/Ƚ.i:+."7R5z:4AYZuhS؂pOh49gRkMFҦ*[0Zܴ 2Bl*RCķrU.Bq]LmxLn.z:tY)݇V E2* 7vz.S#U}?("/ezR3ӅOsҮ{[`b,4R?_)?/|^@sy'lNL[dw^>N:aGܢ,IlVc,蒛tJZY%gP˗9黉d-&Һl ))Nm`?p抡 i3U-(U5J1ﭚ+?L>X*( ''6Ӌ\#p|5zv5(Zb5 Z=g|vS{8 ìKV=$||}uF1 rc @}'}P%GxKV|GyJvPy!)%nbQwJOK7`FJH7&ę‚ Ie₷gRRzǹK͐s$S .mP7pnʨqw%UA'a"**Rmre)PDŽz]tCk>&f+SȬD{oYQ pyt0]zd+p~χH]qԶ"BZج@eb5&>/3N+u,:m'a@X /JHT&E~_^%} b$M i0"RA,`In25/Y ܇+):s%Yvi 1i,͐YŇ؃`ٟhy+}}y:z#Q;ޭkh_KnL6?; r[*;RrϠt!u:Wꌥz{"MA4Z:I]y-|iyeWg9` (bR|{xk Ե6; 7=LٵR})\]k,ǂѠ l##KPA49eR|U悠/5vQQUړu>R窶`cdM@2Msd_i5Z:?pWg[, 2 S Ɍ(3^)^U nb6Z@l<eը`eru< eT\L5"uA" /Jɬ(~^5_d0`k"A|S= Jڗ [bd;[O?Bk#sTNJB쾱qlhfjDә0.hUm$a2_%?լ'  FX%\~pue"AŠ<]%rQeAA VAP^E+evoM7ּ,2-Q%WiF92AcJ,z_uwE묷<+r*zSKm ր^~1hKv~pED! 򬁓lAsceQ>}o_7XۻknģWW.g -@$B&6؜gn rߢdlD!UaCeGߨ]HG͵s}F Z>>v^Z ki=vjyqe1Īy *fhsz f+ Ί_㧛o`SK"oRn4W7>旵ZʷV-t}?㇛`"Cս %Eλy7x<7P4%Jv p(iWْ izrfTpAH$ח/}:v]sowd9y]a!3LMˡ%7Ixm煘|+/`Жrl"]"1 }9`j(ZzoxWGIdrA+ ~Jw{\5C*| }P%g_ L*e_ʏ+39ml1>`|39)U`e<=.A"CaEc(Xke( ݌8q/=!Q8vVG )LjƘ+i "OՈsqFjV;N׌9JXYhCpc1 r#0CGyoiOloIW"IÌtHɤT 怾,Ɩ oaI9O qڅuL_DDݵѤM^Ai458%eN[y=mj*A!9yfUnӻƆZiXPyGGh4+WDGuKGRSՆ!h, =9^]x %0{Ќ{ˈfx|{d;]DNsmpYp f 2OP4<3&D7Xqfd S5(&xNR䆛O/ AlpzK `|܆/϶j?PVY ^2¯+Dž_PcxC@{Z>ZRw>-F䳏JSyF*s&ɓ!T)`lL(cqTk>HlEM׳B0xElOM?,^2+$_n'&S^\uA/(E)V(#jb$ѲA' V(ڂT2]-)s!%LGJ1~,>{KJid&,mgjؖ==]|/>uK6%p;.q,'K)ʵS^ No_򘍼V+$aѓRp&cO#7;qBAl6Ji.g{mcYPk'l0Z>>į;ucXav>t &Ixq FRX&)BFwkϫli{qٸĺg_h  o/uFA'QB+#9vN̻3PL*6NwCBTLO_ TR˪.\# LsDYQKVW2Gy )ҴEy7kms<ANzތ 2]lj.).+\hUC}]h˒}vmґO \8EvJi;<82Jg$d5kŽ ufO`|JJNVgƜ&3>Xϔį'exȇoWyA7\szd}n`I[uF;XcR)G68~OR-vǓƳӿZq8}|i}Τ sE<ٱl\?؋QgӝlFgSFi󓘉_g\QdQ]^J(:KɃf27(ݜ2i$-z&.2LxKZrb-/p60:elz}ٚKGCٚX&~>omx?^Ԣ z~.Hc %!23aDžᄃ"U@@GcXÌ6IoơHeʑ e먞VSoq0.I>ӂt 5wR}$ґ?-Uh B.oe&C2TEF9ܥ{m¶[ x)J=㳫zd;ߏA!vF}VeV"%lQ *_s0SFpz2A"sOoM+لMzM#|WQ^qЋe[PYo'z,z7Xxz\6홍A(Axy+9E`,خ -bBQ,؄9=&6+^ZgDZpȱ0z3Y2!1QYn=J4htQfE4KaZޢ#zeY?̑=(|63{WV=Hxo䅘ǵ:R-(_GsQ0]׈ԙ-x%+ЇѢf*xa xfsHQz׎}6s7KL NBj'#{f$VBn`NjA ơ0P^MC7 V%*+8j0]ң}+EI~-y-LAo{ r>F+T5,(&xDyF|zZYubD5nIx :%[(~1Z /1Tids5HFTLIʊ!R-UK^E?X@g;gٙFd":/KX36N\[ q/!#x>-*idז|E ZAϡ5%h*h5+ӯEq}6+\I^+4+<" ;kS,%Ov"c(nLv1 ,7%Wk o~c~>) NV»&o"N2МDf-nm8ɸ:M >h̹ի)A>tb5/ Jڂ0H3ʑY])C٣y/H ؠэƤ=ESyIE^je_]Z L&к(V4q-@znX][?ocgf^FǹRyE*`R:XXVJC?pѼyF:Cjt h;tXO -[/ /Zጧ|:vL'~oG gWI3j1SG0ݰ}[09qPDh0\K'STߡ>9&in7cH+d+AT2lT*us`_+ucvvЅ^KTzaM]S]oĤlIMuD"m|IΰP$Z'y|ֽ&k w !';ި^'63"=Q>yqyЌUM7U Rs.<kILS]QA *y.K\곋Xள Ve2VU8~6A8LG?|ax0лK^u2L倀}@#3u=^Dk.Zx= Bn_.;is(8Z[B' xMHc7::2'} 3l0i 6G'CPGs8kdJQdf6{{w3.T]+Q |p#%7 U79cWQE*D:Ai^|oso?`屉wT!< 󔈊y!Ay#2oJSto#2. &ϨPMP}[:@PLd %*bPP T KQ,IZ(.dŷ|s3vǺ*]<A0)b .E1imSmŦh٢9f:H13Gy7m%2PGx1e h=BK(Iw(sMe4No,weq\p%YeOɁ HIU*N4|]]ƐdΎJW9_ln>wj[ze+zS_w(6rYꬾwAL -y9.ҁ3Aa>rlG+r,gF&B:m#9䭝=EIA:ZUFIj$ؒB$nҵmO`#MP*6;0!-bY fN3jˮf Dz1a# Lˎg 2|C5]uNˍ&`)- Kf7[<~c_mi-$mu[kA:w&h&$5\9f iI|_TML],lYBKt{XTK6/Wi;C9 .%Dn0BE&h U~(Z#KK'1f=lhwE Nd-u4ds)) ɬq#ed',tw[}Vd5ށ܍޺ydx͏ 0Rq5{&/43))By^S8Ӆ3g[9ϥ| M "V3Am-+Ct\0YCyRd/12C;R|Ge G$L[)ֻLLHiFvL;DqX~GVz/0DJuQŔrp#)#{?Kz3KڤHQ!hO-@C{~ _[H\.|m=ŅF(?G: u;k4>^e_JRΕr^Fʅ~T.mO?܆: lMaq;jRgR"gUYեN~Qr6/ ֳte X\/$ØGz71d7<%QTHY(P$U_j7[3a+r^ѓ\ u+3:cTS] b4PBCzNUrÒꡤ(p3P?Ʋa4]1͜HYjn r5ٛXN^J]!jMTn e r6q/\?-OғsM؀e&ܡ%7 hBmȷ/[7Z[&7[n=bv{kZ/ 4gO)-WDEC+IA K$2Α Um[+[%F{:U1:T'O`c,Yd.d}Dz1^*Mv5س^Pg{ɍ!6͂:Ih_HJ ʭwA!/NbZ3EPr U:h̘4ZPg^+>:bHP \^_4$tl0YMKݪHx,uW3 {}9 *}a/r95gD%)yj&^}4gĦU:O=*v\ _ 'K#ésQei,td;75 8Grӯ-E%{+=)Ew=w;䳁&B#¢9*O?SXw [@hÏABcH$֞*NFv\0Ahr6;~"M/:*OwAnZ@*wכ9dmsmb*IH9d6Tf &X`61KZ^`0/w+W͋+Ehd҅*qwZlPTIͽ/W潴&.=o,MtCZo1&\Q])ުuT+ C.Rn'01ZDїwHM@KIa9BR,G8}굄/)}"!& 8#.Vf a@s)ϡl p ͱ#F<Α2IE-GbMP rTi<,4 [ݻKzeaYNY T"CNKLd=,e!:]fHS M5G5e1TMC%!hDkO)E_۶[l}Q yF$A&}z#4٘k Ɋ+F ;:=UYXEO1Ekl3$IE`t-1}PSU̕_# \KD+%+Y(J_-=`_4rs[%6G?䄸{ӹL!=,'?u7F0I."ZV8SBӗv/hk&jA&h s`\D WKA&fؘAnXxVHu4GQ̵kcgHC1UC WЊxz.[us~ky}1Tz^7cβ}+6&1޾jcC+zl ˤgd( a/&^܍PÈ\XƵ8vr2wSDAZZ9oQ놶rEaQee]YR=S BoPn:dRuP5&AHm4)--ƚF ֔ğ?/_;su}IVҴn߲`0$MM0 gv- ҌF4i؞A^j-*=_K Y 9D"pvS$b3!P\PgU +_x\0چ\CETuIkRP&OOœ"lEd\/Z~Bj#UHÀD|]Q xk\98y܁b0qQ4nR:*6]|󡁞0Ope=r{ɇw[KUI$ӽS(>]:sXo76Do!d%&NÙ3]f|% m؅$'l# dQ4P}4ضiCk!4ƥ<6VT2 ݽ_}A{3TMG)E6֡CgF$}?J+He%"C8<b`H$HɜDZ&3M%|ܻ@,(53/Rd}kg4۷JPjIQ e)c [agBkS~I^qO?o) !M3jzy[irY y|8|؊ZZcLW]:a)׳۷KWKy0fь}0Z's9Q+0bj9GE ȶP^9y1 <'^S߷!5?uM~zߣ9?vPOَi$(k̈sR,qEpx62& o{{xt v$꼇y=oqմTA*(P0et]l F/{,sSݑOnMk*EeAqө}$u1Y1;Xq,^gS NOqnvroobLs>dx>$3U*(-mv q' "_e ѩJ8ʜ"<qpl9cᑛ7#9 n7k_[iE7x@&r {T}i ۑ < GV)ׇ i<ǨL ɱB?}sn\ EIBݒ-%u\U5MscQݹm^VUP@2y[xNu/2_ v:f4v xՔ3+rc[)~œު|*&)hդfLGհ6_=?ex<١]wb4dmaEؿ7W!Wy,֡X0[>&1>i7p=5Fp0Oo G$^ T+UvL ۍWD~JR ) #;A>" Dz,],] Z*2p c,zSw)׀G.y;Ge)جtsQKM'D&iSmhx瓟G> (SKCǿ@sqΛ)ۮfY lS1.B$9HCT_m5d@.l`|[]ٝep#vHДQqxr^[ ˙(l}_?@ tgB)/xt3,c* 784rtV|e)b,Rrw<,̘U1Q@1a@T(AڶD cd&,Ru'K;?a3Ԉ}Ve<_ nEɯgpgC9MsE}LPPdGFəGY_ٵCcqe&h/Hǚ+r\06~K/mvXƎd#bd"1 !Z~-wo"<$0ewۜ=%lLI@d=vZri8Oc=Onӆ(1i'.=RRZR޽ʔ{13['cr&a޼Dj%WrD D]u"S+ \Q~2^SJ% J@[[@kBzAL?=_ptY5#SyjMlI"̚[1p!n-̲FUkCR-JϢS\‚_kvOG`XLykiՍe<̨DǨ,yp^Ԯ .2=?{8J0/bb Qmt=~)'dK"i 0"EXׯ C/aSxEu-^HqN1H[ hgQw]J/`)/syq*0#P;*=/RDgPi?κC2őG1jEr;۞Ej?XDr_I0ȌeNwqa l"<&t bz5e孁Fb%[v!rin´(1MH6Pvf-q>n`\w^8pƹ.e1A>w I_~s;T)9;AI5a7sV+i0L <8`͈1Z|1uRBQVmfbvcvebO51.UuާRWyG' +&NJzsl?` -ќ$݇'026A)VҮ #xGO\>%q/.H(=8}j;{DUlys{G"LbJ zZDNt6Á_cmo539: ).5`iLI]ekt0)$.;M\OI,ZŽ3t_Ⱥ57ޛњu1 '2 [gP,u6kOgstZGiZ1ͳFG*Mo2R&؇ )"J'Lo,OWިXEɒclXNA36T5 LQk;!e^qQ5gDbSIY0I|K 15y*8T9jm.5e9# W51[)zVoVIH:lngC JZn'y#SyrzRpU‚㘥(UPr5g472pF.\dz 4gYK{|ŗ1vjd97 (9~F0lX$JkpSe|4bs3i8vc(Al {|VPSJ CO$\?%S`b: /0km } E@i?ȘVTZH0pHR1sH1jL(v[~j'8h]k5 PlQItjJ$p58|@Qmެov bA᧲5bw 5<{]U=UP44HVDI^Fע`ی=VՏz$j}7%h4q B/yoDS)냖pe+k"kI UwuS9#jI'{-]W7ͺM[sŝp= 681%%R(`V~tW-Q5r@j~+T+/J #âEጼ5ΙXGstFN>ύ+?* *&'JR+I D@ɵ9O"ɨXwneOu~EP_ "eO6zj y7w'%mW cr yC@E*Ier jX*T3% UeCS׍v3g1K|}!Aȼ!\&g|}2U혣@VrtʳS ϐ?o 8-a5v6W{k*}$TTѪvᚫ D ' M̱4j\Z=<0/2eTF湄̝$Q9KPbEE+lMq!r/tU_)+>YXrIĺZJQUs|~I'/ &Y?_å#db~|B7%h=6pwX6zdQWN^)ݬaZV4(yi6jūzϚZO>}[o$A.;؎OS )yu7׶ض͍ᢱêVq2[v^2z> 2Q"mk/gL3pymRTq(bW&6LWUa"U5#<|r tAlun vڟx`o)J^ՃE~cËaL 芁E E/'7DC$@a)mсN1ȩL ('\ ,xx=-j/*F̱X֛hK'VWr@;^Y$bXdvԮސ'3IoE11!Ɖj ޽qKpq+!sl&dd{\)vn#p׮m/J+z&K{Lz˅;wkxh&d$-D۝irFOH*׬hCPK\ov ՘ʉMO2\arDEqog5[Z`S#e4Pm` DeCȢe ƊE"liS CLӭ R6d mLΏyo2ELOGZ.[0#ӎlu s䧼-(^R/ مm""W;ȶo ܮCRBX)쾍R]=ʞ#B\X(/꽁dTK(,%eӷkL+tt0d6Xܲs%iͱHyɽ96SV&YѺyɲQy:>e$ tR} =]Jr"g+$E8iSSRV2>e$Ƥ,eP #g0xUBZK6{ &%];t. \xVK /,Z{Mn (a1qڅjrLĢlLcs\9\k,5h e[esBȖ!M1OAU C8(L1,!˷t!GjujDSȷү 2ؒ8֐uI(Y0d.1|+JRI5x&I!\$ 7hwƦO`2W 5j),d_"Ͷ549QԘRfPen),Hr}|)]DF(.r9B9u:(Q z^([,9;-ԡT3=x \, E4EsIg~y;]ft%)ɰÚ*Q?'njJ"Dn0TJRVW\])jw K)$P瀙K9S a |~>a.H+\Ñm9{{Ln\o7>s38ȸC ^N IsqH'{ o[tE"DhuswSo]NL BTZkg·1gXB57'BJfd d 1 i8B$@E@(T8s99OP$zƵw(TE"z=f ӜJ dW3Gy'=fA͝`| NT &5*μ;:^ͣ(L6{ul7ٞ :/r{VUWą5HW"/˭VkDF\'`fKl %JpC6$XIl8SwK9Y*ɦ, V6(ʵ ? qUUK.A& <?dDʽ" )vA2=ThM^F/^]?6ӂu]]ftuU '&5>`qOO'8(ƜC0'mjB-=FzS`a`Y+HqťڄS9ZՍF8g yq̸9y"s%1 Ihb虨Gyl8gT5a2jDsVn-{} #127$g('p+}H d매nJG]^:Ӛ,hFac?/H_$zǮ b5ASY˴S憈ߒ٘L{`$uu(&~3ʩT#!wpgdz5ւZEME Ln.vTؐ[m |K3bDcAi9l3Okt O U5=Hi'8A$iK)WBɧבr0pH"^S4f)YmT_I:kn>"i qr~w^Q] ]xe*TBaFv;>QP'o'fyd` O4RVя~w$q, 9iS*±R% ӢNK6+0>p~vR2r>e4${*f ͵9}-JYѴҲIkw"1'[nAuO[6뗫)ϰVe,4;8>Dv38QdpJTLKJo ovtKQ7z7 ["g>@TFJE ;npvLVX#ySzEdRyX͂2ovb=ȹ53ַmyA=3$en&_L4T7*5 VC\ c*V!`ݪk>qGb.  ',xD-KK>@*IeZ|Զ0k7$9AO6]WWn{_olo >=vW bK4eƐޗno/~%ʇcg57h?Q׭.}g_^mb~-?FןUV}P)2[g3RαbX'zv믯@Sn-~{5sgN9 ^n"}x?nb߁iHm7j4ǽ#Z5jk]HЪFXӔýM N--AĽ>6byM3x\p;tDq[9iBϘx1="i{_ȅD!@O{^75Ze i%*ms nS⽡lYIzNq֬9dx|F2K DR9,GWC?!@gc0gSp:C:lH}SufQxSB5JKmc|YG4K 8@Ջ&"u*I&82*#f:/Zܟgo29vTh&9 k$e-Ő T;fu|ҸW{F <^Js{Vxju#7n I@/7h|8>%ǔX?@p> Fq#ŽsLyC2Xj?)5*~~]}e2zFuQ["Uk}plvY&{Y&_ysuoL,{{ vOZ@[+w&ƙ:U[`!Ѥ| $ȁ`6V\i: h~ZH:9$ vtr(?ziNA"?;K4m#+lVrqsG ެ-·>zIs dCIm}[Nf@)e$NR/I^ <7|P5,Det~jb7 6O1('?ԺWtZ+qI ¶VO %݇K_( g3xTcᙎX9*9"{ `Fx fŚ2j y2]׋9BU9gX1-3f΄5h 7lJ͞(ZA`&P5k] Sj<1b;OTp$U=]6jb,xGOp,c+st)U [y 4$X Xʐ2 %YS$Cg^gڀ:k2e<,Dl^]ce LwpeX\h@We:1Lb'084 //^Sܼͷ~0~rr6ڳW@(Ku^E(U(ddv}lw_ =% RMMgF1S/4 &}V\{xxo|dIYΟc͎Z,@죬w4u9|fx|4Z_iyv'x~q/ W( U!_$`u/?o;}f17:ڤ1]ugԮp;p< k_ČI֘g(ܲ;БXO,r ہo ?6T-5"-bg(4p4oZD~LƥJ ħF%;YhoGFX&6ϕA1&-g RN B>I@g]k[H۾y,KʂubBcI)FFcS'nSѐҾUݛi,nNJ]>J`5'C^N&,sVr[V c5Jed^L蠋Յ\ѳeHf qQA,Y>\$AҮqav GEJw]yw 09L m:SC,UY2)Iz9+DjCgrيM_nZ&:Uӵi51'aÂɌхl+ˎݺAkɜ,Y,՚KRs*Z'&L@⎥T3U  (,y )Z[xՔ]zisA;_cƙdY>ƋkTC*w`\c3W91PS!6P)X f4IۢCV=>8͹ݠk<u( vDܓϞ/p;{vpycX<0_cd$f"iX~3B18rRV&jpLK:3֚ԹP,yo?TCd"6/1Cm"ZQcY+zL^ylmE[1QXj暙*0RNpH$mzڎz;(0;ΚyHYPP_9<e] [U#Ysåp"|LR,f̺BEը0 $mh Qd,X$ 9|eHktIw`?a0A'ӼdfR5#fyx{bʙq"6˄rux?h!o 펪<[Ģ2I f1/L/)^ IF_e~CyfǔTWU4byAX7g3׈XFT2*lBD{K8И"z{U(S#)@SXA52@f&tB5;2H @c rҳ;@, r(ǷN|v-, ĻO2}R$%w'X[ES)h'N#,FD2\e %`5"0w H2h{2zH"kq̤% c эXNEβY<O},K2IG*E:/Hy!(ztǒ4oR&l`[ 0h@2.&OROj#Ȁ?=*CqOuI5GRvȝF ?+I-IFfcS2YKqO@ >UET YzET[74ղ]!]JHҬp:=tG]fXؒ@S,ڶ"m5Ml:$RPn$]KӒT q +6S1=mt1T- & jB#?CekPsG(-ukPYy3p>t]awic 9+YJT\+wa ޠ YJξ(+v.Ѓ a]꜂3 SR3gBE'Rn͆v"+EQhh`eNUH9OvK JhPˤ`vm6Ѫcz/tDxXkseYti<ϠY RGLόk 1k `Ik @_ZӞk~@k:3>lL5%QË`E%3D3S AϲH/'s̭}Oxb6{Ͳ)z(~93I"<A\{d3Ug9g^:C Kf}|.F$QQto8j{2qhY~(sgiWvo# 2Tl LkT>TY)КL1RН=:2[VS+(t7՗7l?Ua_xc.YQ ) cr N\#ߐQ Wھbfs xL=8vs&eHz?̓ۙ;V$FrUB$6(@"}=%w&̇K~'ވ~P] |B Ջ!~YP^?ܾ#z3| ?W9%rP j 9) 7(?ft&Ć sEvn[n_ ipCʼ9c_?fxt6ᯬMK$M| ~5YaV|7-9)bN4"Bh5VU3=;9IHV6-+>PBܗ>s:؂}u's[s]K>}k sC7hڊrQSl< m+ n2W'pcl50znC8F[22rsxge${$Q$ԟ|ِb'!eOWMXog1}enp)֊үx?Uʊ6:8yV@|ƂGx+8vܰ U6 M- ATrrӌdNZ8(zR 0ћWâL5dV:>\b* Qi8 R'1dsfI"5jvsI6:6 tYxZ9S /Vo2P'L֊?J-FbQ2QkѰ& >9/!ps J>h"ZoFXQȼG>+gC(Zd|lgg}rhty1!hCI*)$+, a?֙<`6Bz O& XX?+g4fa.,RC"|Z@KS367Afj{{՜ВK>׳T19EENe`Gfgkco^1ON"!P3O}89¨:6r\L*{J;'ȹQ 컂H48sr fkݏ'!*K223hZeu<a6z"}ys.&E,X;ϿJ*'ye2Z|5ŗUV@AЩ Y3Tf=yGRsǗ?aW_믑?SuI._^?|??O߿xW++7 GrxXh3;]sôbN89s|"ҟXq3ĆcSxYF5|O_HTO& ݮߨ1jO<P])Рܼo*7- 5 sa娒PX+ : Sir *#ñTvckYޑQ9Ps=BH9@ݯ<NqYFAм`2ڂյ&#UdU[;?1w~ʟE=e8K< 6ƺZtuL!׸6-ĪqB[.aCJ= O3fL,?9YS fϹ3۾0x(#F@Uvڥ1Uq͙ƿi?t-0;~ʲnBeL!/VܧKa9+'zMѳe`lPrVTcwAxAxeRkvJ9GnXTzI%!JH"HKe FxߧRVƈ% OύgTE l XT7JYR<'RCKnWbЉvM#x`xi/:GP7tZzT%g1wKyP_i\Չt$p.l4/!59p;mۮK.ic.RVr [yW"4X+=15.wsX(;?JuƪwsUx͉ɅMªn Sq**ACiAnff˼ZHQldžnt) ~bV`>\#<-EJCQ3攩Ǯ}3d ;̳['OF};2.S⩰;)J$KFy_M2 al?f-stۼq%-5(nZp<@`gpw^;葝]8 ТP :X7Zer{WƾsQ|~!&Xt2/Pt=ߚP-zZ9smi;vFPbsw=n"ՌLP&9J}ki,o$@qA$́Uo^x)Z擵UგPH \k;ZmSdG3묵f7n:7<`#Mױ.~9yL!mO#GE2{꽖`lr:O+U3r< +XV \h&R4M4yh>9P"S VP/MNN M1U΍mf\G3uyyX~@|yEJ NNX*TFgddS@;u8O}Y:jT c߯PJQ](ZnUuU@! MDRohTHsXF}kzYB,3yi.LȨ(ӆjHaM(}PzZ'7k@\:M:uX*tp&,lW` XʫuM&jJB"V%\,$߽ф!)E`FQ@q0γzDH/ԝ _(*Q 6(UYʻέٿY A; ^/Jj#XZWf,,ED[cJ}{1I~N-#Ҋ0Dt7d RThycҥK%4dQ `}#H Vf -WYemkNؼ7Eoi;8 Rv<~ ufk?u5Q΁?}_<h}!GcskԸw}Acr~xbf_qMv.cA:a8?|uF8;>f]oG*=DzS%AbYyBrtZ~TIrT1Θ< HylFEUy(:'vv4ws*jIC)HاM˂͙t݆Z왯ͮ){1OI|, 9U%XxWWujczmk3]iv2Z&HQ[dpfRWk;+ 1$Y7dH"HOwJM ,k c]Z\UeW*חT5ƕ1{: ,vemjrPneH |_pib,Ô]/:.{W0UGeVJIVcS+r\MZ6S(\"7Kcܜe]^f7c{}iY7lPrVT#nAxAxUFRkj*:O舱*(׹;>PݔLѓgh%$V:T# 9h~(\0H!:BSNqjL4brQQ(ȻAAE*r`gqk3=KJ6#R@x%钛b̳f}f}Dsp*Ao ~Gcs*XAa+KF'B6F+)i:8J#EJGdE`&3JU{Ocy[Zdl.kEig]?>-Z,U\$J*%V OPPiދQoLIr\m2 IH!'fY ԪqПIUkڿ+F@~9)񃨃`ٝYzsgk+g}0u63ksrMUo#gC)U&kysh*`Ƞק Dvhr8lMPykH Mr` Õ=vR%`zb!m&E+LRiI7_$g HMx-YQ &LJGl+N{FamFx:Ef >Z"YWNt2Gij%A? k0(_!D`ͭ\Mhio!~ubp80rHuUT8ϡ?3Ζo?z9q&p緃yKӴoHL؟l+9!pw/$UR -aT? #tሺc6;Ddt]_ W~XEqp@]! }G270dл;)%<슬 $սu ~ecHCBO؂=Ut,1̑s; ~{1z^QS^cb"'ߔFOrp!<Mm^IÓ%UK*l?J,e]̒E32OzGqfmDmKVi6_醵fE ~iTߏ/}E(*}EdQܿz/U7ԏ8=/pw yl+}r#LkE_9EZDPO,徿4:МJ4/-{EԖ& vf.cYj,oHWhCBszX̩k O1v*KȽKp S9F;,*g]- YvAϗz&]]4el8MQKPZ 2L>ʍ}!ZpTptm_or&ߌcqrnR*׬5h vkBdsɥ)Q]i3wY|vb>vu*N96sV`5\N̵[5haxsa&o^=>{ꊂj9NGNtcڿAɵSdC'хY770PǢ忢# ~ 5!1~OEX$s2trFxE?N4hi! i # OYh4.3 /.tvrh|KA%Vo3.}^e 2lqS|5Q~ GL71^huPXl0)JA2İO0&9iFPۀ9\jAe=OS[ph4yħʋ- )8BۆY=rI\LKQW8$6S*'_Er,=q=NfCp-TW5V;pDS4*ūpN"v L4 S *"cah__dK5tj<٠r\[(*(<"pG?wRȩ*N3̑{Jȩ{FS<Q5<]ֺkjkD[Z}@=rQ,?{|%39!E5 n"稈Qr*R Z7-v6\BJ]Cm*'T8r6"XN7/ܖ`}MRywB~qVӢ(+E ,qb=v?c?~?$bQh>ÄЏ崋Ǔ%H>=_qv){|Uݑ+h4w"( J@۟|p7|ό QJs tcXӬl)VNT-٪sر*AKGu!\3K!`Fu[O|Ys]4D l}jidM͜;t ؇0IPsfKv^C3Z}m7wOaTG\Bڤ4PAQZM6 m.I޳\0V  )9M$JɶiQlcs g{sJ`FΆӠ.nSpZ7 5EiI Z-^%aPK!RHm-&h֘S*;O]qHLpSQ[5 x^-=u 䍲tUh".hX͚FNSVC]tJPA0 !7k QEk C]Hpϵw~;`x`]!Sk Ajcx><(d*1&R<vs>hDCMv&nq۬7E[Dݷ7 w4dԲ!'=[߄Q&l1q/8Ɩ<Ƨ$/yNv]$ccԨJiUyqRK*9E7/y٩!8:{lIYs%$FhCܒD_$"GCRʆ)½k/#O_{B+Q+Q=V 9fhNva;Ê ԚWmUa+VbBi!``b7%5~yb^oF+c TZMȺZ6bу]0FY.*5FhNskoLԹMo*&,ٳC͈N4I $};"yߟBкxp!8=~[$kD!z(K/1 P6,"#c "ɐ3SĖЧ03꩏ 6%+_^|+8GR5Pв%ه^4࠸N،XŵZ,V6;]폋 m^+]j.I!R\牓Vsu3of5BP fO:)Rcj\uis9c2q[G?q$;u>";cXO^TMP|2Ct.)!,#99jfr@|jca6N0gypY~gbK^ 2WyEUpf//e1熤o9K &<ؚ֡ӨM 25>KpFφN8r 3>dߐЫJCtNmn[~l ^&YNIO]I"4ԟ?У3="-(g<*//f1tý*+kCn|5 _ڜ>7EBi1"s.MA_ZG>BN~-.!Mv׮$#֕A@/u\@4jh[~NSQB2XX"'T(4o09ۜ|3"wh]ьe.-}tZŹXO%.5 쉼SfʛO[r)R}9{cmIȢw)<lR^R6hmR.@QZM6 z;^I׭ݼ@{jSA4#{- /h Z3ktTK.5H*ാk #k $[k Z9T˝k FQNg @ ; S6xZw|@us2$|vCwЬG1ogT )*( ݿ/#I4pefƁܖtIdX~Oo2$ieғ. 51 {!F)t|̲N:M(;cOEv*-+Ԡ>}EC .RGzAn5(E^ \zg#|? ݐC} P V+aҙ3~f1;(ZOEݚt"9TTVp#߂oh)-; Cq5eZB!E7@!OzJ8qt0w c _Iױ= ER] A$z ieư81lf7-Z,U/&J*%V OP+1GWOіl:Ĩ^fCC}Sg5QCvrYs KŞGnh[?iX6em2 *$ξnfQ;N2ԭq^wJ##B*-0:D-5,~t^zv:a.ȎN^&@'/8rT5@jce/x v"DiY$*96з^ܐv0KߎtĒEpɧ-ciIӿv<?0jo=dVRV$@w*͙e|XT=Ckwc x7/Y^qvꇻhK+//^x㛷bݱ;o}>>V727ؿ[U ?+G#BZPzC2ڳuc'*|g2>|:=of|7ǵW?%++T c,L-@amn2PG<?_b-l~g#8EkW: E)8>ȷ_{VϺw}VϾ"}^嬘;Q {]NOz_!V+ku7Zt?Q1>}Tz.n,:|U1Q7'7$4*_,bNOuAh %cͮ9YA\ B-cigw'H{pS~7Mf!͕hұaH\3`4bk֖u1 1c2cѨ>#'JPQ5 f;Oa| ct0~B(!J(^CCE)'-Vr ȊcX+Sf˭bNҭQ-]ԑF; 慻>/ g^Jd;Y)d![(-HH]:{q@QEGAUH1;U%$>ϽXeSey!&SٕZ*Xŋj$owdqe4FP M7n 185uMi(|ˣ}]]hc|'67#%M9rfMjm%-{GirJpmtHŊ9lwjm%EojIHcyW/3).٤Ukwl&)l>8IJ\;GI44fY 9dFiySu4-*{u\-eP4Zl1}6~ѺʲK3pV}1t ZqLؽ}!ؗw]{p A{-Wś`i&N ǐ+ ٦vlcS|L<MXda$cpQu:{C}[.Lc3]%RK%6Xy<к3tyk!FӎJU"TkY#̶ٟ[ ڃʩkd_h&?99d6dosx4/-9hyz]6˨fz?;g7 ]<Kiܿ e?= 5AI 69|j΍lfqv..B(POt^v|rWZz2g9MDqi45 mgǂ%CCaD,=$"D_C< NB=ZY`aNsam&"Om~߃ݼY*JCFJ&ļw,T.{fk'?!tPi18yY>) NDZ8)M|nvUcI]5T)ؒt(EtXWbJjMζH.Dm/nZwJk lP#X3+\,/sZ)#m'2o^QkJjTR #Y=rv=X"]DtYǠCwVi䫒Byl󋗜VʤH Z֔)›f2zM!0+[7-=&Y{Mp˓rZR;EmhT>v\b2&LfXOԭ`K)& lhy=JllUR1$OI@N\%_*zMx:NM 1Bvhh2S}U2ar Hr-QFJ(&ە2BsЪ0w;a6襴b _o lE);.otss(sq&邗5*\.P,.С}\˚kg s̳ULTׇ,pbtTeAm@Y(oZ4x/uV'gYgD'kEfQг q ٟldܝjJtP<n[vdO*;Bc<hkqPYPm}!8|b]c c/7'!k[o.CF:C7eJqtc*S@uE*rAezZ0C}+: Iū_~/׉#c9򮷭 9&αMczҖg%t˝LJG*A9%X eʁZG}d!}>_[EWE UJJ"0*&Y< n8hek)<(#=%}s_gKu!Ŝ-=}6 [U 5+nopI궄6iB :!]as Ĝ ߡeJm3^J"D)=,Y𢟷6w6"-ll sTʐ[gV_ 8X%g,gA9KA?4 ɔC`S5U ePt\JFc)vo--/d[ Jq\3@y.Njmtk$0V%c 7`Xc #sV5eCƻ'[Q3Gr0t7O>f 24<(Bmwż˥ J]rC;3jͷGyծ"#+k׏; b;f*znb'9Wf/e0 6P!Ap,_/\IPσq=1rZ</4ttNhK_s% Zmħ- ?%aϙL?UB2e vߛIdH['T>G˂hJӨ'Pr0':-1AYq chq$u8{6X4 SZ`Q'JU"Tnг)PC袝rGT.H7{*Q!VBUNha&&Q{7kL o8-:jʹt x*+6Vk`У=}L}^@h":b16g23_ YH+ W" "Ut6Yz=O7AЄ#KQl,[U)¬v<9RlB>WZ5':G8٪/Av$bʚz/3E]M(W gMTSY`w߃ݼY*JTFJ&愊{:<ȷ8 :| ]evUd yL W.M:)ϳt{E'x V^**V`fIٙ✴6;lSrKeC{uofcvY>NXe}QvK 6.Vy9}\04pMhMP Bzb.p-JM@MD7EPMKYh#[|ׇ J\t?[],:ʈ3Y~0S,EjвA"i&M1F $\v?XCh55+!n;Ƹ&Vs?^ _0 ?Ujk7.>YJӱ?~e_R`P 5 ([ O: V :2 +&]9ž t'ާ"q뙶O旬S؂wh((EmtHYhZ;ސz[]RI":a^XU RPԲ@рEGqUHB"ҝ!L-JIV0hN&0 >ڜ&P tvVmʦs7Ù(HcIYԬ%7y=X6 {([.FMo,&uR1ToTM,dd~\FJ(wC1z+r/tLdf]uriPr NTןjFcFEH9]2`yc 50̱s+ILppAs3 $q"M N@q@gmY{RCKC9hZ1!UMvIU1}+ ?1@>' @qlc1Xx*Be,*r?8!3j07:*v(!t)4 ق-Umؔ\o Fw 7U㈯W*>L Aڧ|S4s\3??EI~wmٲCـi&tYԽYy{ASQcɒ,jbs7_bit83DBd5{]VdO_u;c_ c콪?'Qo_3&89`lp?Ҡ˥ f_?Y$/N>uN!|<_6%>>xMZLNlEy- ClWAdٓ؈Ozv%݋xn'}zm5Zt*FËyAm+;Dd*K qUy-9W&7}1@|a8/~}cg_T~/أWY8)-mK+$-HN9Irl`N[᱅ރ {y{ch:JK5U! 1Y@cDڅ(ƃ@Ŕ6|銬*jߠܾs7uD78-rdJmk>hy85;s}Lha D{Y9T4&$E{N<륮uϱ;7! .fz+`@Ql66őA̜-+X_NqUGmt x'׽Miɬ@]ɍEȍ+h] bdoSA5w;l}bB]TɫwcDXv)wK3.(L5R-*KS$ѹ-ԱG"-\sQtήI郹[2.tq&ήQy&Q pw6l1%դdLS>\{Qm]w}v?z?NxnÝ] 埤^?%'Fr wdWr.ɩS >;e<_H\,sBC11% lY[IU+)"<<'mӒfTM{NfzN#Ї(:g'N*r?/.r !rB3U >pyuDʦ39;\%(=9S"[>wZ4)ljؠ c>L6R"qqCq+̬ )c`ZTVIǪM̅DpF&N! 2$>>+)B£s9EȾ`)><c`J+Dp 9Ǥs 0]E/ӈN"[7YD3`3pt @O_b ɬZE]pAṮ-<\ Gtj̅I-ʏVpou6Wli^ƈ90TЏkg6S^̚4?5/ZVɲvB/Dp[\(ܘV:gdr9:vo(z ɭ662YΓ|V9Cse$3¢y̭/hW%BP{EJoJ&ֽ.ߎQmNPwBP}c㢕9^\Dfo([U +9(r>)ov,mZ}`S5MDI%ے군%ֺx:NԋV^tvcضL4yHac*5l϶2~˴2GCb bP$Ǯ}ij_P쭥eC4Z`8NTSIR>mBBQq]1YNt;Ha=Jp \+;wɳ[l]"ˆgRC2UN}e),MYdsglװ (WTԶ`-86$ښ5sb)q==w-F9u^Ҵo.[2ú$jmbpEuJN#y-{erޮyWûf/4#FNR$#A8;5wQ/ʅ5C/~ltEdR K5Nʾ۾뺽$W΋L kW9% YirEN.ԕCp 90F9KYEx{1qvE}&tgG⹔'Z@dTϧc!e"̹, _Q}ʆUl8|zo-9' b|Mqa]R~z?et8:s=8Pyܣ@y@0owo͊kz %s'8'QߒXE=xO]bT4yUN2ᴈ*3J hB nSFXA'šzxi4A/]2 U _+}FkZ! f%JB<*ܼئf J0&F{qϏYjubKc/ɾR}MA*/bT4aCMQG]9k Nh-o-yNi!X9xjE-“ac[|9&L MUՄ1g9 n+Ll/9̏GᩧmX5`N՚5{|K!P*"/LjUPy cƻ50ρl#ՒW͏RS|)#eqzsINZV+5k%{)p z0 t8z8 !X Go4LBU\2ڕ8gRo#?,Wa+C7Sɀ= `'QCzZk}H@s.4Ze+|]I,q xޱmmRz^ "P)ETyGE0;^Q }A()o< IjO"(:2-&+7S:RM>Ȋi'K'j3:}Ķ},F?:j V Q::ڎ\>&ձek1ZƯ8})aHN9IјOm`N[eâۇ)nb2~Jhs4\s]%A4squt+,xװ (W8LGm B\ŭ-}NpTUN7{]; <"g1扳FJCXNdYS}-* Oe+/!seJ͑aL̫sL|UI4=rU]G,-7:_1 J#l[5{^q9qi*1>zlMt-3 z54ʒBDPv'N):ual{qԿ\_YkkVOI:xP b ִ,ABvgگ[ zYN#i:yh-[mMkd*shZQY] O9H{. Y`xV<18*kCv>w\PƳ֬iu`-FK!P*2xe <-se!\fxY&MaJw[syxˠ1Em#_T|'0$5o갚C1> ;( rm:DJdZ(`R+ZU;h1;aW|sQm"w. O9NsWTѩAb*z -tAk$Bұjs!XvoBlTgՒW͏VHE }ꝼ@Ղܥܽ9{ ב 29VX^k w}v5Z5Lh`5l X ^Y&oΨO",7ei}za9ssSjA"Sy=) ևnȉGE>6ڜ Ɲlo2ďS+owo,MEVsB)tB)gvZH-ܜAiGux{4-v WN)gioIsH펮??n]70 X+E#:K]xwokhbz7|]0r\W{tSN$>:uG]~Sn91yZna30')fPVϥ3Ԥ>mtTt8H"?#I.;ْp^a{^ލzPI&!YPi&~*Y_ZS*XuD ;An_DZ) -4GmoƓ;XmUfYVcC`ynDpwr,]D3U_""s:#pNά^s؂ㅴDB芆GcS̜p#|exڛR^-FmbVgԄYwV'jsu:hČ{XEe۴3Ԩ4_UN[,םt P于")QB)QUKTyvk@ädTiꢤ)΢q(.#|."62"Pj75ZY#_1݇4(ƼG0#P2/"LC}窼<,֌!(>#K*ќ ļ:\"$ޟ5&%TScJ=cM쨨vDu"˂E;X3Uǝ \5.B=:#jPZ="B)Pg_GW1ӵlǪwSӰ iE  G]0*9U.4z|ݝCK8:).Y,<3iӡ2Si3cHic$X2=?{2-{_29Pn &^5y{Fe,Aqzv\Ӗz34Y5=H5';bH(=9F ܒGܔg1ө)__2>pd{s7ȠiKJS:/~>`}йpiͲ9sj$͓& A^A}Y%KZDP-P'-a<-;}[)38qAZH,]zIszi\%eU;8멡;$tBVĄA1*R=S%e]E 'FЖAe bFjT]|WEZt]9b:7HlWS9poXCsl5Q#Dn0 B`0 :b"nQU=(p{߁!5#sRƺ:aճJv KEh`jGjRi-DaXU[C涆~Ox(KO;.lD9<*.l\"&p|JTo3fK2Yĺӷ2Yĸ =e/n0[B$| "T:_"vՓ qTNDϽG_:@gfѬ2c&vX"FT$erYǶ+v5|z1Elj9ȾuI1.|` KR,O[5Y=Ɍ{ *%n_Z/*L*JD?,:fC~k5=zQu3SPd垁TQ RüTͩ vSƊF+_ۻߐ0"#/Tq)&A(s0RE(V %xTE7u` |隅GW}g5!f,kH&  ֡k|Zp\G;TfY0( m19U+0DI5vFϬ72,B–4~R8ӺbycЋU9sÊRrvp]skT9|ie)U18ĻJ䅐 &m.|DeyN3`\QI6kOUf2EIe)Ҍ'qU(JNR#d`imSPA^ȹ@XpxY QTGa!'ui4;hùSNv!U$xI&Ɲp1ÌB sZ  q|Lu@m߾Z9g,)$jk%G;d HZQAPE\"6InϦ`"O#R!ÅAY˱0wuᙪ/dp]\tioS(iR)>|= Ӧ6lamC%٘ R8@M WEMIf|Ə=<tyR1HȜQz9f,cWY|I\jGVFS<1YM\6ig ^IHi$R*jk1TMۊĚ(ۥwFh0t]_FfM?FmafR箯S0n Jyp*vHSD+KXztdzrtG^ȫK!t (2N$8@&Y2.%.[Y5KϏm0hE˛YE OՏo\P],N/zebZzD`>i cki{ sʥ.)'s%륏;/"(p5ȆpJ/nq>isow|6ީ]?M4x8"="G^4"fLyE`wxA~;yuw6T2_^-RFާf 1=.uwwo٭c[yT,_~L_%LۖF`}K7׫>?_[[]>ᅿzMW1 ȾfهȃԮYtiO侶b؜a3ӗWr:􇔍 Wo-<%6Jz=i#g \?\۟ 7XS՟gj߽ɏH뿅+[31z~%ȍY_OB|wu6fԾ6]zi*7ҳ=Q=ۺYN̐˳9#Y]Cy(J ^ze/ZH>"xځ|ʁDn*Py^-p4m 'Lk'k_#&FbO< 88n+WqϞEִqD DI1^Yǿ7EaqH<|& {J~(wfsSԋLX<Ž u졳.M1F\̸qCMCZ@Ue=TT~)K[͔s5db&ב'caBrcxiߊ7VKad؏"lwI8$MA ʎw__|4RyeP>z9cu_Q㾕L>q!§Lٽm6g9vސ(TMѸ{O6f+̧& k RIksOM;<7JRMW*;@lۦ> 5lYm%SD)kNyN+N6Bh:ɆJؤVN6{bls0&#I.muKjJ$qK#Q$\kFEQsk&%ڔM[v9ҕ]51 หh;[. svX9 "Ƴ*j"d[mHˆ\3FdRHd]*9;?ܥ-MdU;IbXDѐT")='b~Crx'2e.J F2ܟ=,o|ejlE攲)2RP4a-Kmeƌ^|K{XsL#,_/_t>|C*PvaWtC{ۉW;Qqڹ[ LJF"g#4 ?;DF܉H HyX)B͖FfԍVS.eX ǐ(6w[b'\~z;e ކmZH>LJw?ܼ 8oΑCK_J%u,ކX]C+|#ĥDNrwS$ȟ4mU=fWM9/oߖ!z/3}r Zw`Gؔxl,^ucSQt'q8ǟ+_[mU{tp:XD rJ1rzr`ѴX=I=Y|MR[ֻ6t&U,]@Xb\櫾V%ΏDk3tQd(Op 4;xIS-6Rj2۷"!nMt6]<EfPrk?Rl+7M|mP_?e {O=exd VU=?PY-7KҤ)Sތ}xT1A%GHk(qHc&g~ZrnUvw,o## {}4~>0S[Af zv+| . "Zp@Ӄ)\;.ܣh>'y,*TNZ'܊ksg#-P40nf#}#}[3VE9#Cةm"%`V5)0KSӨ']G67CKx#"8獻ψTم%ΑW3[6]3,Oi jN{Z񵀅kg,ضRuC'OQuYʁ_hv&₄xqO+!1.ĒP.΅mkkS6kv3jnzЊ8 ʯ<pLk:YBҨsm@0QE ٟ5/</k 2R+ٟ!̈GLs>;RϐnalJ\Xs=#4lytsm{Im>cp-#dzJ}Ɵh IzIL8, x`cQ&]\M.n:%ƀr/x(!vM#Rsy ?Pi'DfcxrT޸3)>ZB-{qdekl#$Hۉm'/(G$-R(=3t"jߴgF1ޫe[-)^Q(3u3k跇1+}MN_mZDI}C`.E3ѥ~}_JTjM$ԕ舓Vp4D`/js.ćfd4^}y!K:]68&ĢC'"yd8MLDY[<[v؜x\jgR0!J&cM"YѹmH^  a\a ћ_Ue[ d+eY1y<h6>0CT zm3.~IBSvGAWq`scoOd|KZpi6- ^薗moYdPś%F` $U;B`ݨ xX^yȥ橳/O(̞,4ʱ^xx1BY1{J?jTR+;'Rz4eFL:㴧00 QY=E&6}̖+0[`{nr£>pX'T̽8k#c@}#K+y>#* 2vQۀYbUN?/r|a~z ab}Rc;T jqi`LJs Ԍ"57 5s@m1-xwdÌP О1t,i @w )Y ~H +i R: d~  >;SUqb]E XKTż]UV˖7{(5wo nZ7V i^ܷzB8@(I>)LJɏ}Ccm-8#rS/Oxh o:Ie&RiMju[?ڦIƁctZhc3y0X NCabM&%-۳)d<ƂX z D+b@1<1F;+lYw:Kh-y畅uFwsD%cy_]22u%i.?eN]덨K}}4 O] 8ZϦH皶r21PCx>\n"Ϸ7bÀ F`]c?"0E;}'('A2F#In+dslL\N4DG[kQS Q2J/.վ$\L{p"']Ō"l v+Åb"'#c#':$Mļ-{"=_qՍ1PlqɑȼبY7Sh Gd0b]%;IƞOFpSZvI_fZd8K9m79j^o7j'oHK=îw]3#ց$V]鴊{ܛmt'6:@Ct IzP/1ȁ eLZyubCUs,d_PGLPyP)#9v0iiʛ+XMtǤ#-"ݪN55H ʾ9 #<߰-ͤC]n x7$Mz,hqns){CY8p3[KPY6C-M3PM߯)j {rml!]i"i)UZqTR͆=rttvO(Dlf`)IԦ/oQ# "f.|%dž}*+VQĨlIn[oroR*j;26jڇ81|:$oLP (1i"+HZIւVRJR=<vՏrSJ;}2oz=8EqO`)".2QʚjKytNML_Eb^Uvq3[aghUK](J V^#ƈ1Ap@|@9Z㬖Z?aZq: '1fK*\; E3 G~ae!#cH+\G X HL*@%`y[T#쑍of/oC;aUv%CCjZ•tskt9ZdYm(^̴ L|U`|q1200"05_w[ ]# 9/[_9I[:TܑP2V͹*.o 4*H$XWgcCf 7vGp[VsVVrEնm7(i@Ts^-Ʒ8'ZMx5GK{]Z)il 8q `)0]y}5;dbUxz)u,YTb*klXI9B+J8`1abDEPJ5"mfEd~Nw zH먈߄+ Y~ 8 ӯ&dwa/ޞ!=bvPV҄Mdm 6 Y_eO`N_CNIh'P*kUgx=N'uxi2{=6囃]=݁kx*vzId;+s/(u|Aq^3zQ!`Pgz#tY׳B=HO|pRʇKAV7!fe&fPu%.Ƕ߳B $T!dU&8fov2kpAY0krqRPLXl!D`GQI)2l|y9p{/,Ŕ GXo=5͝TDٖռ 1s8 Zʰj G펌 GGgƆ@<3 RE|Nl~sbٙR*,c-QZV%[-[16nn~12HcjݨZ5yqߦ00#@(I\g5IqXc~_?8{9ڻ'ukKn m";.&XZʯa:I, I̒WuH8+U&t-;rN{VZT׋AlIz ?k ݑjzeo`MsMYR.%kA4ܫIUrT8u筲Sd:9+UMpmΧ4-ؼ_^?/kZ>I % h(lo;{%m6$//:J!>dTj'$1==XZtȠ5LBiBrՇid*lw w6tv+tŤ!mk0L/L/Ϳ''5r]x?Iv ЬtlςlITFV#$FUX)0.1jA߾x{5Q͗j: (4J2jQ3J`ya#,dH Bp]Y[YCK8xpa9*XvU?Xq΁K^,GHJypN9&[.yG`m?_Zm![cGBdUkn=+W=k5RGe4f 0>S B2* ߷fFW흖5I"A5u6!:ot$'PjC/Ӏ)J,`}ę_%7IV4Ow(jsX]+"e]-][+||<;.4}k{:^1@75aq(2g, cva!rVc펌XZ~gzm!= a! `Y/n~}rfTPqL ۥX cJ64-wcydF*;x&.̸{umo^''qr *։1dXkcXL|.qqUv\ c_R`}.0B0>wui"aG$_^ߔXtK\NG ZöQfUgXe薗նč&yLUyv# H乹mݏ ĞЩt͍ӤQ F`]/4Z6eK;+CLЎE7*E_)&PFӛ[Ą ʌ(rRdZ/^o͗uZLG~omd!5H\j^̓;2zjS&0 *AB?>뒊+ύ<(%DuuJF!{}}^g@je-'#њ8D60sbz-ƴ-kETO$ )yKa 3qۤ}3(I:=Cf\UN rh3܋0$#Fp .tN3dcii|%zg9٧Tq4x]LTcˤm;)GJyjYuVi-:MS0@)T8) .ן63^*a.)1) p³ yoS%9y 2uēlAc:*;C˔z,-5ODXp2 m3. <\خٻ-Q$/lN/0 T}q_?(ےRdfU̙=vB"Xn] zk HM)(('F')+ -8hEa źk`\[2􅻞^BH%"7@6ɺ׈. KC=X>NƄY*I<|`sfJ#zjt-7n\uZZʛy3jX:[鮱!h`gNsŽ5٢C#B,5x&kCòZg /kф3UAt8 lvNe*wN^ Y =:>h1sbe1[k .1a6A@ȤU TF8$'rv-C6tFK. [ps%.rsSr n '6O&uQ*,㈳qoX[VYfhsHvc'ں_anTΞWlRaG((F=:ڷ2kK^b^穲XogV5!L7lP]sѶM*gmtFprqА{W\%KbtHd:p!mJI%?[*9ZԈ 5D+-AhvݔxKĺߓA6F%1GO?4u&)b~]?s {rv~ݯ* tœu:34\s(d14@!kyz8Ӣ繂Dp-,mH[iNb1!" ugY?MvNK(dڣpLo rEgĞodoF~iPEH;gbg%*`Mrq"5E;`ݻ"1Bi0J,vKXfj6HQU^-ɐ  E!?> Tؚ#缸+%uRbt0eۏWT߭1iZ^6Hƛ]$1E38'Uڐs( -5G`V_LĊre_$gQĝkJVVc^BvkW5~ @AH,I%2 \] Ҷv*\ą7zVQRW=]N{7f-s~xx vBv1W+pA ;'NYQFC\نJѓPȠLų!Y&sY5eR[-G|zf9񷫔#x֤_#5K>xC9/>r_|ٜߟT\K*$j\!YLMcE]0 []#D6QI*#DAơZgErwaBe6 @y7de} 9s?qzE]RaݒaL-ZBKRy++CRX#kʼn!*b@|q#*Il: T6>R3Lq(Vm U\ctiŽw/*j'n?y}+n>#lvA]K*gFxŬY n{ ^hBU  \-Rۢ5b]{#TXˑȠDʈrS遭 cAcJ%Jm J/O`o..FzS2o$X+Iƅ <:<$"Lk,%i9 ^!UҫJS݊QAHRz~^0t\:QҡkKt@(Mre$V)}fuxzz@fu2L9':{]N2@ qfܤXdh:loMDgu-/4RѦ$\DOH=_vS2u% [OǸ)U=dB1Qc(! `#8(Wy[č\v*̘ IMd8VdE%p,}XsW:t.Zr0.ԛ͂,h5HnIԭa tG@"}e6$8N_17G81e9H~Kڙ+ IN~7 9tje ajě"6 aL9C i;b=VKn2fJ*i+McfXH9߾yPH,Tp,ABÊ&e2y)r\e Y$LaVoi|˽ " EZ\$,-M׉1fjɮ EZiDYHsAOwUem&bumjd6+BQUifh;(o>apݡ_+7Ӡ;Chx񖷰+jޟT鸛+M;ǃEG}$&uEkB֥|Վ;T5amZe) w[qlpNjkѵu>mЊn+Wl2H\Yp1+ (`vЈМ_pRT f1GtfeWOڤm9^XMҼ#*`X&3RP)lA)4;ix$ PkaY1# WwN98T턐ƿy+µX ޺L݋\Z!j ZL|@jS`(fGA_q#`*GdZhJ,SeE`;=[kZGɷͯu Eq8V\<7SCh&źSd2c&n$7pd9pݒL-Ћz 7b"b0bH/)a+#hf~q%1 x$s ^lʗЯJ[!ȑKԲn*3ƿ{hZm{dlq+Q58 3%[/!mD\Zoi=~-rT+~ĄPHoaU$?ZT_5.\Eg`k.K}a"lMCFq5nki!^7j6S$Q+^'m(ĥ.)$Zu@mPX-m64bon?ø*QQd[ZǚV=[a@aļ^JCЙejNU(ZPJ~R!&#Ծs.W[v2y9=D@"_6mm|iY8ZxbQ[)p 4d A&,FjdW4/+} 쑺}]D*%Q:W$a ^!\s)`04\#GrIu5~Gx*Et^N<ڟkeMGr\B(0ڿrW 5v֍FiY-G*G-k^WkԾ>&_RWs46;63es,ePlJmx'Jo}!#"9%\v{;ucwycas1)x>3rJwMDJܻRS=r=&wkSlYY-YQLcs]]9!+`#g7XH7h)>"oLh]k| ~)%OäKې[r%d5 ;{=f]0)s8.x)8sÕ&u]+yNfj-Bugŏp9%ZgmWnNyX40hۢ1 rbˎ$I~dIm&Y,3C$A$^uOKiMxo3Dz~zԗOnOcoa*)Qtj];WdXtZZ'âW$ɵ#=զmOmTw1 ^!rFEѹBW"9mW)B@F/8){)efd>VwȐX,4D=Ju{ 4G,!+,"7iG/Q ͅRk ~w{e\u*s&Lp$Gn2,9}/Nhyr3)qtPp nmDzPp#ws83*]ET&mw,*`w82El3|ڠ#28gVD9Nح4&R"LrY)+Xv  frP5zB.  ho=:k+K $r #TfDrGU`ޞ`vparzLAii`AP:J]jw\Mi*TEI Y4.ӸÎRW@L4:ڼh6ID%l\cB`ԌL6 `yY2cbe,P:xc|lYi&PvJNs:;QI 8N^-mPz5U $) I/':q p@U7xC5j;ˢmt>+zd'OS\ Pr"xOOlyCI2ƨ !g w1Rq1z%<_޷aϜUB6Gk)͸\/M@T1osroos7:]> O:b?);HS>z9Ґl#j@`v$nblp(*`Vc !qc}'!t-,{-5Wk%QԋEtђx_ +Y1EmL8$O3FHdђz|wGL ZiFt+WA e2'Eq|w:&ϢBhmTm$:q6o݈>yqWj07AA++|E2B]y||;¸KA5nq~ Q1nq_nm!8^Q> OȇKWe}{a%$,}*sQM{E5VEFK!&/,;&?)>Hq?V%`\侚k5W_㨶KmL#{Fd֦qs2N.YGD2ոAk9 ˂Ҋs?¥gtǷǻӯ@Wn*c֭RH9 eτMFȇ H[́Y1@{ )3 EkLhay±Qcki)W#:umk%܃$c:b5XZT+DLV2$}f/ 11I(e81Hyu:z\Shd'x|NTP>3gѦGpUKg<$J4rPC hJJR\,20m\˨],-󫎶6Kpw۞?__n&t)]K`+k] hL H`% QJ)9@!jSmx0ɢZ-l2\Vy(`Pzх#[-"9"EVFouJC9K(jN2n TqaY4gFN5X+Sv}n!M ~tTA RU"lUWB磮iz{~P AfkqʍBTUP@yM8Ͳ> +>8e0k'!iJǢZ,Jh)W{݆H"O&X%FBiй x4.8Ѩ'D٣4@K+jcZD(htbɌnPwF DyrTT 4B*K4g7ٙFqC"U %T;l'RUJzEz9%{]aKC:,N`Qk m!6XhzrIYtIQvo''4ۘ.6)lGk!meD3`.~yh֝fXǼRPSaG% &PɊ{f\F2' tYvc,˼.2 :`O~ӻ,k~aDƄF+&Y<7ήW`Qͣkwko\Ft&&\{@g{E lo8ÁgPE v@ּ7,|@.&Z uiB:j|}Ie@;#`5c՘qT%;KG+Md" Lb(H1yJYM2H1#חQ݇VJͣ-ΤPY b>YΉR R<\8 J!oaWHHq*T:3zض:"z |xJKVYS/F4[$-w> Ի\T;!l8ќqlZ<bv z̴a8c m D&-' [IE&i>c7/Q$*_z#,f>>wv\E:Bp^JJc5׷WU Nu xخ.cx*gǛU%>6"j`c @z&fs!vyYkq.p3l6/^.CfY/-./UN˥D 'VK S6ehAYK@[qzO,5hɜ1,ٻ%n%E9O[JhucӀ n{*"tC2YMt{liM*B)x)]TrMu'AרXDߚ]HCGrT9CkkUm!lq}Fv^"#ucsn ڴ`m3Pm$dC "8O[ N1?IvS. RPY[=5\ V}b=-joTT+|>8RSs0kfD1s2ر!5*rzfDhhSvatm(2a؋Wl`-9v˰^u{l)xE~: zCNCuXZхvJGmC Mri6jEv+ޛ)­D є hvzױv``[GKyeS-d :LrHbSr +!A- }5D m z-l $Pjgär>-,G\73jzPq-*[9mAsTSΩ$zgڍFeo%M)ymjhI>i6\{zZvK\fv Qz2}\W"oם \ !9ٙS 66MfM|_$Z"ycX۬9Ns5!jYsﲄUZ:9bl(&ad\(Aݫ4ٯ"Ɛh8vJ&Ah*H.6.lwyxtmf[]zTɪB-%msS/ "tr?}1ATf?مn&ߡ?_|oގmԉ%Ƌ.b4T)s(s4O^r/~:eQT1l8g ) ȪۑVRQ~?> ~޼qc3W+oKrwoI2Cݖ\a$n;\|{])5jWOd{D&^uv&Zy7-4㯌Bw!*׉OY9Gyʍ̺r75uRvDozA\eƠ1!9żf|v=RtS)<eFS+Po:7zv@[jFe(eCgW>uIqeWo<h+;V)k-p~ *oʹc0tm$SB慱>ڛ޹%EV5OW 1ֻڭd1umJ \8I!Eu-ja[Zػk.lOIՐg0qx|*b V,B6=Ϙ(mrW5ΚK܁*:ԩbˢPz~o$aԘ,ƅQ˴ҢJi~vO wdK$Z:ι3eUcPpk])! &u `M[u"kp<^_1"<11c 6.,r 굕 rH?}1rYхrx-MIPݪ^ ]u28KeIO.${qu1ϊg>O-߽}; ;"O^LH}ʚ_Fk^MIS֐&bm{6Sd tle!b")ɡcd_j6)"BAG%I+\|sK'1𗩒o'f\_e)qkvxuv7D=珜?eC$՚ =&{M'(AE(s] *NtzQOgk2cD^$ADo /́f +NbM T,9uد.SΡρ1;Av})h4jIߔT`R1?sQ>ww21C wqߥwpFwLQp0ƮeK^=u\HC!ctEB&'BۮҿBiZVٹxo4HOI&4ۂ1+pR ]O\31JO4o|nϗC5|VW;BҁMETyeS-P*@i%Yj+Papc_Qiw cJ5۳lDhGL}@Z!8׋8pnZAeu*j)4b"8Rux;ӓw0V bw;^|V5*]XV>nw"y*4 ԫU4nܓX͢ 2.*:9pzll:.lyԾ_*ݮjto O>x9񝿎]+Ӯ<<}\bR 9JCƚqL5ؚ EubsX%ȱSA[\MYTؗh6'|廉P'Y0һ^f.{ rqd}:.a> 6 vQcV~f[D&pbǵ;[7$?8t>3qϓ?>ϟ';te?~v MKOpsW'u|cEoV$fh'X*o""ޖy. ѣ^OIXK1a,g rU箣*sxs42DHGb=޿z0p5SFbD]EC,!q%k-5{=CPr\-Y2sF^=P,k͊O |z\r9JEvz| 5c蜒^ ,<ۘޜm98mAbU gboX,ȃߦoOƠ"8.ŲIj z OX?rc ]AM2gjJvNV)>ۇ?_PSG:ё\TEg]vPBa X 8ڻ¿`E05ױ`>998ڑڗ[S$ Vƒ\Q$B5UedsF`]QIC֌u@ fT뵦[l2ARYG"iHn+:R)k-Dc0w xs^1-Qʅ!Y pdnt6Ru+ڣ d1 e\l⃁8eڣDVn $o7ST66N,$9Άy+ӌSj֠]kв-Zk:4ZΌyPOq_p+W_A;ub;pI $|2|>WWdMPNa*nQfg:z~8”Qp5Dx#S9I\kXL%:uxcpק)3gFh路ܬzm.!:*t~YcênF~nNpz&'E)D1s|%#gU/)#r>W,9upjϬy#x O>*9vGOpZGO$+2/U~ɿ~}j ; ߲j9+ÓK.6ZQwsf`ǎԢކ]Cb5G,[%?";S񔡢ع)r \!TKJpuJݖ^zLkT,d 9GMDj}[Q/4%T:Bja}/;[N>tT׼gG.;W%rۨ~);achQ9:儮!W˩f{=#B{)5}$3XE"tqQs\LR=5Uܜ -X[+(|u;'VF  FCC_֯jGBH=˘Tv 2TrȊ tVXuV2a؋Wl`-8 ӢD"%ſ݃Ec+Xɶ6! 실O^}APHyP9y!'&V/kMLF9PF FZ`ze_; nɑk:`l}'%j(qO&.r(5UjZRO"fZf\7La?}шA8eX} BO lH,܈ԜY+eIe%֑nE9c<ߦo5oDv},b{yD%ÉW/,Y qnP/dc-La| 0O3 i0y*j9uR$hs߼'i]o/鴣EK6dPzGG]P]-)zh~ABȧ+cPDH>#dUr."(*{AsDQ3<%%Ğ:Ť§J\ PoD x}Swv] ˭^fEɣco--rpڂ2ѮA.YF,Jэ5|*VO`b9 Kk)j϶so[GjT~}F[!fzݰ?{WǍkU&y~4훍q/lܳf4t]ER9 9ً h*2ڐ"h.`i̲dg-$Շ:[kWj͊ZC.#DbI p74-e܀w|SŰHf`ޮ-6^d'ĨrBfJΦ)Ql܄$6_lkP{/u%ДjX*7Z{;M\h%y8&wkgVJ#ސs+[k!?V͵3qW~vX}!,C#zxMtU Ecxw8a1޻|E0K<{rT#y '`RCr%UC["9 38T,M'_Z9C~Z}{?[}wys -_w 9ӸM }ab]5g畇Sk Et"vxMu0s:i(gduRgcti+kGĨ8Ph&hp㢚h90mx.Э[;9ӪCZS6&DoU~Zo=[GC-U6u?ͧXF9qaG"7=vFO>[ Z 7|MTea}x=m\nEUeZ9'qWRrSP)8"T-N9@+DS.*պɸUj:qJ%TR|Nj[Ew/z?k䬍ܠ PM \o, c೻D'l1L(b8X5Uver'tOS&0z2Cls6NLdM(EvMwW :ws2z̑1ꔻQN ӱI%t2G'"$W~vGzu,@\lQ[Ң+*$FkwĚ=.dאJ&rVc;΁e\t`_K:oSw+n T*j@C$1䚎2iv]s@9qnTx%UCSN"F/CԅpV/Y/`"~i}v qS{vi  }>ؖޥY7̟/'Zx> |QdwK%ZC$\8G3oª4B"8\8{t%! @s[Ɛ*|7)4|i[`N38nYk/Zk>IsaL•] vwu•}aRZ~bșb>!{PӭX@1X+CfEoXҠbv_p`f9w$ʐ!p1Cs !$u' H[E VAFY~A^7Nrط[,,V:9 \&Jw$#1#gFr4U!XlY~^!ܷ`4L{̼h[v}(G8'IG"U=Zֈ7*}!W:1Y@ C)ucA\w@%G&@3 WaÞ[4)+I~;5#ϟ~(I{m ^?ŒLՉmR5E'zɬ*d\,UW' 3z/r$lNhs(CGlzaYsQ^M-9^ᆵtYBVj!e_PYkjOk_7o$͏NkܬG^ VΚNT*.6zȽuEh.6K ~UΔ +hK'g~|L }ʻ*e~A5V B'ع3B!HK+ Q `1M)/ 7d'&`vҊ^a)I,j`|ބh`;z85H!>'k`':?rsIBNol '1.9syΓ"_ʸWV b mPJy1si;چbʝh 8C"oV=%dOuyܾuj{Tҵq`7d9#Qya&6RKsjs$RuA(J#'Y! k@qRԐ /S Ω$ 4dn  iԹ3bbӐؖmhW}v?c?Utui?+?M+9d5{sx"#7z+ w0A۱. D0>1lBu/8JhDbBApȆp>+ g%6D?f f"==1?$O(׾ ̺ݙқ4+0%_L&3ܝl\v#Uc$7S~C"\- ,.q!5@ jYmwCnMQ\aAM՗ e )xyd-2g+ ƵfUE3!\jRQ"xͼ7̰6&@壚wvMSMÙ2Wu0fIy!%G-pIlm>9;tme>irkϽ5ErL+}$k?8# rdE8)^Aw끢SMRX\H'ٙB_Y3 UNl5 EMW Ѣةɛ(!o0TήRBr;9ʙص$V)k- \C¹hjiڈ0cs|ak=&EH NssAqT`$[OZCv!haְ~&xgW2'8`h2;]` }^4bV{ހPb8Ks1?,)=-ڐz2iL}Fe>Z"/ ZFn|>~[N.s?\gǣ3pEE!;يp}F>d=fܢ==RmHE= Hd{&!b=@. ϖsĂ[ëC %:,GG|J) \>*wēO{;c 9(_UW{m1I/$Og T컘$$hKQfl&T7vsٴ4gդwI5Kd"И eQ`N1[ ",0<NI{pha!ܪݒ1 5!EՀz5pћPs1,ֹYnM,t~єL\BzYgP,{vbmWl#5s ROJJNy J*ƙLmF(uJ!0 PM\j^ $KnNԋܭBUk'(4y!ip ;aT⺩ܼ[6} !5<\<2Rt%ZSBD]i9uq`)̷n,([;-#kO8}N@Kv(@1Yrd0|ƍ/\ ۱Uq^qJ}.Ğ+wF٥nExy pm(ӥgtT3N*lT֓/"6O<@5^J傹Jp &(Z\_)+tZolL 4n6,w&>Ǫ\s7u(klLdM(EvhC @"<v~x1C? qDx̬I2ˇx8 1߉HߟZ,;p^L9җzC&WMi5屏,:Zz\ q@]319ZlmDDeJ]k\r$:{ߝJ4ZC lm|}}Oeʩd+P:יZv@ qTQ$B:tѵG =U>߉oAfMuGNRGW*3̕727PI Yp\,*-ŬvxWypK3xxG|E7qPY!J.a,\rٻ6rdW =s~ $gb3/;AMvJlId'C$w֥[E_&c]ZUbUXU_7yga!0kZN-mG5 5m@Y3(Nae s\-3dKPnsrLfa;وq!r=6/&ޚ9A%2NpWfNRIVBYGڇ$5!| P&Ћ H$KCˌֳ=Jl%*d((AQ5ˬ{L(? 7W &[;_qjDr w;SAґW1Ezʔ:wӎj( G3ư,=lэ R xWz=j3aN3k|1 e0  OOj%%o:Á% FW%d b^K)iogFJ3FfVh6n 0CcX  b,yiN8lځI>{?"y K:\νlnIӼa MK5SN[CLl VCO9v6у t)BsV.@n l9xp~ᰕTk_ leG>+N RGN ikHpR쫵Al` hju|!DE꦳~!zl:IVs%^<>P'#/ك8 ɍ,MS4i<m5sf(ݶ@8]v%20WaT\A) c9Y ՘u;+r CZoy,PsE"/@ҖÄ(Ƚ{urzI6˵EԮԘ@Tze߾fSf+@>H kέ @[>WTorʜ#Nc ۤd8h>&]S"Ә F0R xeY&x!Y j+:rRQ D3ϭD{{sxIHD khq1c Fb4-/Ζd̆z(arrZSav.%X!<&(uR&$^F2{IHlrd̽<: ;B$ <0#:tiDaC3Sj$(2(E3L@t"8"w\߷<'l8Q~9aK%wV"ȡ@`m IL&uhkvlc`+?GB*jB[D]{;\MU¨ۑ#oGTvB{$Pߓ|qq7sYGVKuxqοK`idiaj~[ow$^WfJqo5v{?G#Obn q/+Vz'/K1ܒ2+ T&.HQSesVeVkk;^`8>%~|BƴU-;JL?g=lɫ_ 룏vzs1?ډh@Bx s ?W߳Q?21¨韑mW$)=3ynmy]q v p;.1Fņa|~OjZ(ۏn:$:+]Կqc:"kz}wS6/1W1"87j<}6 =y+O!UQwJxp,S݈ǒmNldv{^r6Kn=:e:)Wߍ9$Au0k'q-%I_a.$$VIǮa#&IU*1 +ظH%! $#,bf RjH.w^ 7Xd id ['l4kfFE!d(3Ʃɻ;ML<W/ C ר}`j 8]!|Z4!һ@2T8E}bAA0="`/yj<ݦQ`Oe[@!A8g. Č$#B3Lx 鹞Y v> NBܹw+z\>}! >ΥC,́ҽ-q"(NsHR3Y5GH|{;Ro$^6 S=gmLFć"|Ɍ" j,hD=H^:2̨fd_oרCRg9EZ KT煦%ƖjZ/"JyF]vRDS;Cm6q!]V$81_Ή G!Y1L:wtf6$4قj zIR}(JHC2O:7ޛƈ`ksn #2mm1TC&"@ٸ=H+JL+闿˟\{G \>F}7z q?/O`~]wޅqoFZP)c V͔"Iʊ\PJ2!rլaM~ ) 9ւjq _@g"ʘ+bȤmHSCRr\zܯ׮|:3⿝>+;p0fG$U yLwEcsHnrB,kai]ƐPJ '{c!9xt+ৱBөDlC-[pKm{S`kHpR쫵AjG#,1"X?,s'Bxhr-{)3L^їjE:R֪2 tik {P`zss7/"ҟK\K'6pZ(.k L$/^_/AW57 Z cWyncO]WhD4}n)AAD?"[uˊ///mzPYnD{#^zqOl7}^zOP9igGEW:MsOt"W' Sy;QmM{NP}_]$0\zF'Gͫo)갷(aNdYps>Ji6MDT*ڑ|fpk,H7Ohs|٭2Z9$O)/6322_u4s:tަg9Hn^P s{ g7wbPv۶TtaNRhtD7"Nk I{kn+~^B{E:0j _rBI%zfԍߊg۸_ae㽁8T[ܤg0ϒ aņBQOD!xc7ͲL% ® 8Q7) B~яdCxE1uQTOdC[L/qj7&.<{9Ɇx`&SNrauTCjN(K<繬/ꔮ]KQIc]Un6byX:t=be˲QY@'/N^eAChf?LUq9? >]g}m˺fz7m|-ANSY)UngiC! Z'ˌfR%qKsۏ_XX{hWUvWh|p -VVw~]Ϭh{{SJY+|؆s%Y,3\gTmauWq\z@3Rf Ď4/ .6DlDP+=)ǎbGk?DSf}v]lzPel,;]ɆSi@!eCN!MdC6lHGP=2z\0 î Ḝ@NMrvӸS40P\ShvӸ>TMî )ym@P\ShTtax>ihaņpTB;=2)xO|Z au<N8M!i\OS*aņpTB;=:ِ]4njeɆ°bCqy 8*O,=ؽŎOt°`C$.OD)ON~(.6@ZR]4!z(bņHTB;=2rvӤY9 î )GO-˟U4o\nAo.>}s4W8h:lj!&)Bbe.P8ABZ7";RI!*Riۺ?ȩ? EO{ORn_"nb.2[B-H Vd)DeV1ЁAK!Ņ2\Br1{7 qUc2ؐFn8Ty.0Ab9)!c)XΜdkܷ|6UQ !B54 Ŋ}e ek^|uE p#)rXl(2ˬD0ӀB3"7E1%zLʚi~A[ڔGlYx  aS8%MaYj3W.>2{ +J!'E[pěe)Z 0qm#>İ'3z/TT59[@gw|Ͽ˗5?峦x;/G&H2@~*%+䎺@O;+X^5?x!LcڻZSEU[~ظנࡨ5&D>6 1eLsv {iGFYف0e@ō韞{|mkeGn{7;u,DP8 dl)X2Pڢq9R fQ?٥Gf#m9QοI I(b'J"ʀXZUٻBк4l!{}YN=)|E'g17V Wf rBPS Z:_ Ji9Uur1z5r< j`On,&cSujt}Sh2{H1_W'9i$;gJM}F 'N=ϲ>UfHq\K!HFz6Q{G5\ATOࡾN(N̦K70XD&?Έ} NyR_Y9wܓD-h=B3'x3܉ypOi>x7v)TВvs[rF3{mSjf?g_X:Z >wHr^z,hD0wtF]W{9pB bcٯ&][Lk:wY' ^Ԭt3-UL:Rm܎e3@$d,1AdҋGvNva?ՍV+N99L BX,XI.`iNFDGdxt M堅[z`2]X)aK/d]s8lZ+YްyGSX Q* (υ h?"c6zY$+%/'nYˁuQg3 ~`8z&v1+,.s ZX h,\JXZÇni_JLEk#a(&&v9nԄ$ݦİ^\zW<+0"%<)v#ZxΔvQ1`d\]1 J>psoEt"Ķ]aK"e'rɀXùtnCT?b*xjZ)1*eAKi^@P?V˚*eBKZʠkc DŽ {EOzJ=U{。lL-YOe| ayca #I5C䙲V;XYDKkrv3]Aq8iwxHL)RfOUkiE=L\ kJ .r> .go  da)Q(w㛏sQioI%*rZ"ˈ0?"c,7X1 c8ß' v3q1L=EQa#۳_Igdг ǞeLyib%kaEgA႔Jfe 6*GzBt_z<9锎 Ov?V-LbBþ=9Q0ړga J 9=89>.UJ.՗y?#ۯxIIJOc/ k Dg=9Ve5*S%cgzٻƁe@]jp^ 1`35)mٖR #舞Re 94z/T`_B$N`s:%dR@0bۣG^tU?]fk֣K;*xKlYz}xtf .W3n3>z GL'Υaqc]o&6pͼ\D/ubUdJ-h+ۯ@HƔ̃}ԉg;Hq㚟K)̚\,* tX&؃.zz]e~M ?z#yx]fKB孻$ǫ k("aħB\KRRokOGr^) 3n`Ћ{ 2-S}1~QqgX=U%1sR"Hx '{WaB٠$=epe\RȆ)UUBr\\ _{;CIjH )Nx(r"JRt_1[f=TV.FQ1,zKJS5i*c߀&)3ͨ&{+H?, (J.Gw$>47&rPy,KQ=<Et .վ,CC4>ͯՊ߫7]φ 䜮nnK7nvf>Ut)BY!;NS ~nwoo q3ϳ{]lFޗk]W".j-  ֏;7Vy[t(GaaNP953A>-2&Ʋ!y\_N 00c _v WP,Lp(R2-#!Cy0:j T:J0KC׵^ٚV4& :D0vΕ3Pd~d5R:~֎? I*o˛/\ERV۞ @& ~'KQI he>iIF[+#%>i1F0ڥӌgCZOT6LC.ޓTe ֍3'ي,WKNT-vQ6pc<(jV?l05(+SMIgGkqR K"㻃i$,N+àrdaqn&$c#uޡyF-2&,3"<+# .DMUu<["\tך*IޥD2jb C,iI|^.?@h9CUJdJE4U!ip6N'.kɪ*R˲0ǨNmuM.m$a>_~$|J'gGҼ~X ~e f9V '^'M\ҍQžm7! l=&]:0yԩFiiSi do}'_'TBTq̌;&&̹Q@cU*5vn0CTjf#WWT[2o֤TN':PEUeb" "cDip)^ΟzD~P/$c޷E'GйwlY{fBB& ԢSWJ@3r:Í^Gҁ{SD=o;#ʾ2o#iPLEH6#qzT9M"n%ݮ*ub%6BܭJ {FQ`5eȭqöSsL tV<͆1 Jk|&'7豚Kcv9Ai0R5pC,ެG+2=BbULlnbP* pPC͆cQ1ݡ'e0e'eeqN,Qr[G`R 9VR*%`Ɂ֞e9uCEz4wyptXaIF;wgg~L56HY6JKn7~A%[\o W;c-вc;#&3eL02eWGKS*W1ufz`mX2F`ʭ-z]BcO0% $#։gTE$j]AohRT'*>ׁI`"9]K ;0=Y/s1 ZS8Uq3.}WNz yҼ [ ׵t%ߥqTb/ zOEP} ة%T /n# e78kB\fYo09n@NB5I6Jk9ѕ OѦu;9< svCMԈ#a9CǶ]_g\|;YOz?%g"f֣R׈BȘ4sH .`S]p*H+wxQxRXT ˃UȤ&/˫:? Fl";>vè﷩FWw}[[ZHzJwI>^UEg* ,RAx0%;Q%cAIzP:C&F^2.{)܀Ob_:>Q> FT)EN0nT`oSx#V &Ґ k[0S!I({H׬c%ex^F`~k-0['S`rDMJg9ÇE=),7dyg] SDTx3 CKs<ڜq:t(8g"+CBeYrk.koe߶G={}gfDVO*(ѩf=?:xJHɲ y鱆8.SuRCB[X7ɝ$T(oVB6 J5,9 > mxFEN s1; }_M}'hJ!&=gY=VOv*~nd'(1VO++{) t_;?A똟 *CcN>uh+MȞf̥2M^_ )@i!ʧXmZVzM Oy6foa^]cWPSj';P?ft mCȷU$.cu±셖5ISp_t"%WKzX!ccku]^aI2;d9-ٔH\ .m&RzQDR9pJz\=)?34eҦE#)}zЁjA2Eo5)4zվr>dlۧmi^v0rN_m脱!P:,](L#X ޵%*ˢjL@3(qkeIUeN/fO#gSg/ mld"j5uޙnTxoBoMDM՘mZ ޘ(PK`u`*'%jEƾSQc$ !*KyǼZ^JM:0To]e Fs3>-7_4R!AD^c&",{M˭"]إ`Ĝ҈`OXU|V뫜UVRJ8CUR 0$BjV?=^VAݟp~u ?yn^WReYw*u`_}Т3Ceq8FPPLЁZ:]iZ%d]8J f^APvȴ!cP`K澞#E){RziC3u. zULg>Zۢ! "!8xkRoئOtR1RŨ__>C5>Ea#,rDF%UNgCk2kNHh+zih$/gC[zi/ U:qFl`` _r#pUneH V%Ai5hK;WК.zT ~JhG>OJ6?N*Y?2c+JE=RtHY,6Ό(OyAar} 3fj ǴJ}Kc%'|o%?:0rn543q/TF31T 4hrIG!8*l;U}*3޺sQ1 -m*-4 ٣YȚ60"114lP8 ޠ@[dL06~5e>gHE JxY*HZdlbw/ٝOK _fBѺv)=4Nt7, U1eI( `%_BA&r"co j1˘ mTP ,z)`*$m$CߍH:ƟIAC/|R0| ;9ASϠsʧqb=ՙهKn:9IQ$)[(5v 1j9)xCr&Z3D^McnO}4inL3߄ԡŽ"㛓o6JnzLKέȍGN*v$x=aWH{ߗ(M826=Xw|I͍WwƽhG;=2UeX)"{=,&TfT^pi (_,.uwT3'^r<7kCx<(ʁw'*,/IW] N֦Z"0˽sꌭ2Z ks[u#d6sJd&Un;V6?pB`)Aj%`ΣU>o#h)g pEA0"! ұC~^~_i?T霆gmFp\iTΤB:^ˁ-OXK=r"x8|.'#lBeo GE:rv;-h ʯ9RN tHJ*G+T$?{׍JC/,ޫ(O < ]e ^m9Yݒ oQtKO7[܁[RXꞴ)i#)^n~5XјN?,eS'o9%14)-.69> iKgYI I.:13? A\B )7Om7=/t*v? &hb`9ӑk 1;6 S,aX]^m3h]}Z[YrFtraۦoL#|L^7^i|:=ZL\xj+7ecǼdv7&4?eO3әX/7>X"OXQB1!C p;F P̧u#V|mfo7|KO|ct:;Z?i +?6n05um맬zm U=~'_KUU=lrt-9o@˼MU0:ӂeMh9ru*{<7]e xH*G cѷ+4rLn^}k5k&gm4uLA >H-&yx eZsX;ݺ$UnqM Ph("]Vu.n\a>wԠBQMQ]eӀoN/Qjx_`F59Os$庝`#=Le[ W/%YV S76bTc뭽qsS>x+厦q_g'5*J3Ҿ9~zc>to˥QnG9#E/cf^>ƫ~Y[:W{<U>"rA1k r2Ѧȷ474\+@1=4xg-/`p':9ƒ^PrҾ\xra+QRB] 9]\ _N#xv{]9BO|YVԶQ/`v+eU=G>}1B[4x%2"YmB]þx2Cr,]%YpPϖeQupADQ4©+*;IaLGbƯ}Thg]6qja>JuBX]'g_?Կ|gi7c:_ }IrsMssXu}xqtƂRA zG=RЂ\HIrm g|m~-G͘g N7_{c(Jv_>6n"ݡiϡ s1^ut*cv wi{@;1f}c h# >Ӌ㠿F׀n&4rzо6GS&N2ΏiEG]H4[.S@FlB8*Rԧ"9'ii"%}2 <(oGiM;]MY𲎐072/]"n> K ]8B ϏlMFʺ.R!)I o,XD읩 囨N>cxߢ~˳VҰ[oNs*myrjw=o: _R3|Sp_'TiEg=J?3齿{ "w,}:Ջ'@;L~?z}R b/srk'Cs^;-IR]y\ )HRSqQ5$/^G)%+,ŌNJl]))u %tQmuVb8Ek3Fκ̽0(bѤPZU-C jiEXRpLL6>[jŷhEj wW5] gޚqJz>aЯāFMM2A=wh9B\&fX jQ1}2 [p.ȳ{[e@zoG~Xxec1r ܀=nx 2 } {]$sܰ Jop@f S{*6؞y<`17BkaB%>[6lA ]oa]G(G}1O'ZDռ{PZ?a`T̓.Z_6N)|1~4}D5?;9<=Z(/a n?l~ٱ "t%t(@q hԨ̙[q T>{sN l9*rɱ9>,J>G;.t w OssܖlV#)EYgn۷%Ly|*lprJ/̯9 Cٰ+ ۵&v1[r}~Og׿fR`7O|?8<a5 ,V@0 j }%~8D?/=5oT9%M7NJϻ<1VXc5O г5 f7sdQKD*O5K> )2Mͼ3P!ֳ*146giLzvP&_O8yl!"$]CYb$mt/)i<WQeʀ`h C㳙S>W):İD4ш!W**C"FI Y@NL3$mH:Dg#FL2R#:LU %I#7FSG8 T5M^yVx1eז%ir @_'+hs9rV4ԚWHSTfHB BFHZȄXGseg=a-!:_ZdrL_TCVhJV9]=f80syx{aF;qި]u 1 IMD$1]^V{ګ^lثw s$ɕ`yh"Vt$M“5M)8'jÀrs8_I[7T)e֣kV)KYshkҸ)!GaS$h8H| $7`F Y!* kmepVVݣI }^kBO#񟳣e&[vY .V|vvO&<-g?݉'?:ȳ_r#DΈagx>[a}qtvZ]U ⴛbg"{ 0c.;xt&@Vgۃ]o󧼸<=g0DƬ`>~.Ts픙;|n瘸 R-(G09YPQuHxM_XV"kJ"LJCXTvn)z]YQեLE:YXIG3uKںa/25R1yJ e1y!& d0dM)ZYlh%AI˝٢"*CGp8"u07pԃ#90skHv$5+nSږ tT^@&r_-WBI928$,u&h]|[|#!Z$;r Nz)53-u&2kWlILG@^ 09ҧ\OM|e{K`5Y9X'YX0:IFU9DOؐ z8UιfXfXfXlh͌V9OWr'yyA’'gTB&I:"'h~I[8 H+28Gtb).7Yˈ>ѡpV5qʄ#"@6P7dOsG:hZЙt1ǚ JDk$ãȥt>k#f:.&*ɦq9%dWT.dfox{9%ha5yQ5o#fĔ,f FВM@b4N@H&(}0M$%P6~O|APj ޹WnfzX5cC8DaAf2 _L^kD-[d5jl cc- fsF_k `hE Lĸjfêg$h#"N@|Dl i6QWH^و8H"'L2>d0EL(&9 ߚ;,:_NPH^w9Jebse@g%@ "ق53VRZDKq]>ҫߓ4;Zj׸C4:XY:Kw^g,p;̕wIM `vXOd%C@!EK\8;Zjŷ){9m )vmm󟣛z֗,Y(BՉ\L O޵q$_g~ ~7yRE")Qh(u7Im٤$@3=S]"#ɌK <[fZs"&cB[ֱ{񏷎@9eBWScZ4t+_k1mG7HNuoX$=p@!ImN߇Ftn:X38V{`S/kE9Ž9׶8H J k"u^h:v/֑$cNtF=Cs<-F}򅢍jW0ɥ'g 9s mN; VYCQ|3gc߰jBh.ܪK8Y+w943ix" 6b y꼈Cn&E3h mY?:4'Ë)2JbJ)SZyﭝ[GڵuА`cr[L ' Ty6]V;߱{-ؽG[ +%zwNӄ9'%CXN H@mceMH\ncB[ֱ{ (DQO|dO|t|:D% in,Y^ /ٯ"ꬂpdw#cǭ?/#P w=T J(e>'uOl>;'=dksgcAig##6.a,2t=Rr~Fdѡ`s>xRjͶS][sh5JKi-4~35*;>9 a1 B;a6Yo-:.Q}z6}<3nٽz_>Y*g9T<:8>g % C@G[0ZOߞ{0';tη0uB1G!f<_hgPz-{4~y/]pCM?|@e3H\(8+GL(=ȄipR+ʄ{2 oZ5V@Ahw9[Oj:0wPecC%V68 ib5p*Vf>.u[v27Y+sڀ?Yvvdɲ;?Y>dЛgpS.WXC5FK8,}AmIkJ0dATCob{K5ۏsr뷶IRB*kSc7T'-6i B! a= ǭYR*}lYlV⧪c/m'35r5ӂ40zMkp߾/31CBq35MvcK мe:H&;XPOd)$Q|쩂u^ї]XX4 `TvZQKDcZlSMH5mV(?͖^o{, *0ҟۭ#@G}aA)̊+p%ZeZZkS-HE2AR yl ȞGLŎSmFRf-.nqvb lm!K E N_|x`&=^~k6YQFgE+܂֗[} AM5|^pjpD!xk6P%pm[,+{*˷TR3`?g^\ᯈe6 F]Y3PȽ a!8?TL*&D"Q)v#ɂ8X8*|tuINls0i1\,q fʾU6C)׾u$Ж+w{q,Yƽ{wq(u3G} ·m/]U8@{]⾩sV/^Y rrf[BQ #XJv ĖGD_Tv@ek-iKnftyC}Oۍ}=D7>yAo}vbfZ`2 X&W"gβ2i$Ihpa٧t#- zy>|]߰{՝V|'iMCs:yƏ`<8M*8\d(B@HFK+CR|IӺUK,L3@A(Hy 4~gVTDŽ*d6ĂNvPŨYs˺LC-l}M7,E&+E uo=̪֮{ăU1;CeN<e>jWo e#8z4\aT-אcxV(zÛkw$R\5YdeGtAڒ4|NP3YX,ExDؚmGYxϙ:YZQѠduXRӑJF,mG/Ao4?$PEetmj'bZLw!U:[,@5wL赬DXlssK TlX%]lZܳ]qIqhpߢZ^S_aYvhQR@]6;6>Tx-/7Di]-Hَ 'H*pMGZ[p~M| >|6/9Aٶi~EbСVsIg%!xڸm&? yߟAXǗf+Kb@-VaѳzkJ$#{mdYwh'1r!JA}5QDo y1ч 37\?^Lg] w/ֱm>W?拿!_}FZnuXtPmpAި fN |[jRtiR_)b|A㟷Eեކt}>?ٱCmP.m FxˈWbSc11IT7i)6#i^ҍl)-d.h 3 ! ?||fiuj]6L[ /0bV[/!,k:)6jpsp4iNo %m hLC̜[uM]M<=߳tVdE\".m],dM 5nAtP <,`y! g#6Xx'v"9 ׊s춧4cr[g Sq ]ܯ4߲ um -Ԃ*BK2ӡ @>Hi)8cp N0DSﱡ[EJwhe%֍\38 bEH)Zj-H`; \昶&P[GxgP HBeLDA[ŵGRZ'3  ޗP^T0Ȏ~sCSZFƣ+Xq.Xi]+.?4-Nw75+:NrB]6EXȡ}!8O?9OmUս:t+nm5$I'fUZuյ/o߇smo(7e" ?`y50F1Q >$HR #g(oq(d]!=2pW%D*)6jMFD0RpNENYв.5gcI" KN_ĈE6}S`Qp!0WL?hc[C\Ώ[wǭ[7c~Xet>ALcRMI5Y=U+뙭kp[AW2#T7AײSɥ\dKŮyHw4kA$\M9F{T^U5^Tc1rXGJD\l yy c=U37`Š}3asFWI͹[ܕ!Dtؗ0oOF[[\aVhs ͂`/ʲbe\%SfA+S$NUY1 eTaqYBjW c>r8baL;YP^G4^zS::ƣ4hͥrU^H#gϭHi%|ڪRN}H-K쾱QLp} u֫}1i "j QĦQG/s?S|22U&IPk1>_#8NuRi1xp2@ dR!}gFD-d;Cyehjrc8)CSமpzh<w䮸0"P^P^ɲ?`R*#EzDM\F(x+%쓁ϳfW]|z& uɅmP~+z̫}>nO]9&~75$00wp[,HUjn&wrMNA @RsB)[ـ{ʣmp"~lםFY(Xݒq?~v_s:p;\ E1M i`hX-ʤfW57%+ 0}L߇ǙTC,Vg{sj\Њ Vqvij=["\?"V,Ґ#mQ #&ZZx$w֟C3YE*jϼ40ǶK6Ta1/셵׸r"SP(:˒t&N (FhGkW",D*rhggVB8X[F5pŕ Hsw@lE2xܟ k`7'ktF5L!}|W?L۷@͵%g?FK[?fv9^Mkv:Uce2?.;BrgDTL`Bk?/eqGd8w*@wuSj5\NqM}ɷ)2%N+t@Y"Amavx< ZqbL+]/OBcf.~z‚ P[@gr7Ѹ ?O3; 83$<O6L`: {ו P;yPQUbwcXR:u,6ܙF`S4ÿQӽ|nEo.%(^_M"h> _ ~J˽͆I@$GC[ւ~ywzوdd,iMɆ(s`uO$H~y$1B.G)Zɓ* cYxwA"-0#}X[mK+E)rW!dXUmr'-B}0c)'t[u_~қJQ'7&DIKj=b&iDI*8`vJ"#^Ȋ z>x:ǽM8.B&Q9S\Yٶp(q g?+, BDJ/_uixnN5-RE+]xfYP>XU@ȯzvz{u=H%ڗWf~rGҭ7=7IqR$HJ/`"a.0UZR*ղ`;@sF$ 40B[s$hꥳr# ^pF|$J/KJxz&) `L\M.*ybGmMH]Y}5* +#dB2 ;PiΔG-`ʁY *F\c̕zu:%-h&- z#R1UkT|$K@؆Bց*JQ4JO{c~B$>6y*YΫ4z!zYNhIt\|_'L;xĢ\|`Oԓ){ V啨NPe4^QF yV31Jה<֋kjWL%MCh X.uש&jx;bN% ER&h-1"ѕHp.yG˵E8]zd_[/AbiT ?KNdp"6K%?E1NҤ l<5/Y؛kHvvP?g,C!z4-4ExMSck1D LoRSNK?hǃ'h>$8E<bJAۉ 3 y r6g&2//Beb0'ö́p_1PIOֻ:jK>8ுFp :t1JiM +fF /7׳,mg͵U+'6H\2"qr3"4u\i-9w0)Vm'jN,ِ *+I?mO6J]Xr>օNжVP}ݱ;GQW%!kq*sp*N΋s" 8"SQHJ˰_e~ D;xل\'_KؗȿFH02±p+q鄤&F V>QFV') f6O cRo"3NJc#8~1tuRǨJ:F7<"HԙHC *U Ykt}=k5OXZkҹ0]AiT)s}2ٓN曝]x:F}χio̴x[I n9qr}C70ć5a@.Yb7 -"`:ЂUR6*_L>܌&X ሓJD._遼J((s؟>yzed?)B Lt4vavEd?J `zm@+*0&[ᵠ6ƀQ %fN9Q9? J1 }|D喃G"r0z6f/o8UHsI]w{:!?eeLj \[d)&NFmkTWC A}kWx$:ƑŹb du<}scp1nkGbYv-f&*-);T!K 6Å3C,Դ״, Vqvif )O׊0a8i+ x ?EsE̔nYO6Mr J9O)"`BRG`yD)NK\ %¶(0,(f!F,Ye4r&H:;*]YsG+>w c"఼;c+Ɩ`ɡD,o߬@5Q d,tW}Wee@P9XlSݫ;UɂT1k+Rvi\2I`'jm%U?P{Ym!D#oA_ >t(XabU*(KFӸfObb] &7OA -"\ 2b aNex. (R#/g#?nW_hS,ϜoƷO77nEV ,a,.t@Й]@5S{MW4O܋U@?C:E`SS8dXe$UN \V{30â܊S\^9p}%_-NUw8MTf-yc  쌢c/`GTI' ijAV-dfKxW:_Evx19Ta\w2~x)^Ui)W-SrZ' B?:8SÂF7> Oه1(wʶj3;?{)3jځI>Y^ŖW ̕Zt7F*RchmP$"f2&l%FցRGgy !-7V(e/f@u0Vwӝ(j,޻ ޓʥx/ԵJ/mRy'9#d.jK[W 2ﴩƩ :-H~& ,0v;0%'˦ز!2sydH.!aDD-VR$,w lF*jZ"XiIt׏9lpIkF4HKCND&bi#*σc.cJe{ f%5k/gLs0$ۗfNj&֤4頚TA޾F\ -XJ:CR.tcPh1Rg AiP n֫ClH#(1"W+Qc%LZ 01B:2hTA_" ^rA+{T!-B(=XNg(ÕDɸJ_8N ј1Kga5$G@M@s;UEg$ M0D"e8Ռn;09X':ǔMlD;ڂQe¨΁e/ЙC8 8P! <,8568b>zxF9})ElM{j{)ځ:>!0x cI0cT!)q(Bn ō""JapvU \p:X+\ާJ͙6ޱ{2{AgA#5qsd?mp3Cpz8POXJ$K6Yrt04ȠȤcT(&3ŝEJ>bmk6^B'" 3*X_D6h=M$K=XIl"Ocؠ@wT}Y!||Zt߻D̥co3 B$\BcE+U$\1- L'tW+JԎtm\-ļO~<3?u d.Qρ~ (R2, 3ōs P/#+ū\#43MXXJG `F ~81~|L9bX*Jl鬃u[̦ٖnGM`t'`m0~ \.卙|쓱,Jur?, =K?J3/gKN$V E=hBjuPWE:/({gf~PUT *d_=},`LQэr5=[@ZTyqt87tF坵]2rn>#칪ZO\a뙜ru7qB +Y @P!HCZ !+^l?_bwjP2_KfcvV=wC䉵:,tFKD_lݘU&RNddw&_^&;,2n Dkad5bڰIqلeRe=).R0&]0?w?VSWZ8򚍑vrJi; w\~EEc\;$tGA8)Sy  !a&1aC+5~+Z-iu+NiYu)_/WdP9IC &akNr'iְ`ҩKQMoG!Dk\!f@;~HSѫɇ$qkIfd:V?d7{]]߃5+~ ?'xN)oo(gX:P vggߞWMq-hqHZ0'Z -}5+Ď s#^UQ}uܪb2ӛzecrew( QRT☨H`< ,+9R?m o`N7X~av(mT8N&|c>5$šndI(q#^f!zͥ5qEC:#>9Y/yi.vħfu3Ɩa?AVy%['D܋J_=Ci\a*bŵJ&6Re彣lmۆAo!u +Tu*Uq"ԎF$Nn 0'+?X{Ώyw6?6ZQVnE7JRՔ.H?22C܆?EJ{^4=`sU8y Zj)B?hKDDODXd̟n)vR}}L֬hvs}OZ{G ]VIfI$FR6 0cΒz QOd{@){dX019u~^ҺYS(sȑq#'${ڄ؈1asKFL7vXd# [W9 L~9RS0<`;zuv.Ë;ql̪I8^sxYz%qP U|8)@gYxmTUZaCo*g2'Uykm%l+ԝl-9yEH׷MH`(p=mT20=#ݴ|ky[3. @ +Dg:B @Jt7JJ'7^ne 62].h)v$QvQ|]碮嶂PaUUeNJ>X7WeZTsi:QJgXy}t] S^F}ϣ@9@ki5Hlt bYS&a s[/\q Fa//]ѵ,V1_Ok;Chss/nY`ݵ@_Uz)K{TomJKruєϲfGna8ئzE$Sɪfs' 6GW?al"`p}= ϝI^o'Oy9' R_.r6x$oג3*s̛97W6?o$ w3 O{z~;sZY֐o5$ f:NޙpO? - ,po>z,>oXٷjn`d(JM.I#v5>-Ej|,tM7΍_Ę?Or O9pcEJûs0Aϓ7?n[dVdv9߫yK tr`?|w7`nx1CN{dH9F~fDӇ%# L'I*T9x*;+bp m UQ=8Ni'Nnmc N bT=()cY|~-2kx"Li,?{9ׇsUC?ox)JB|mBh9ɜfyKax0\9º,Ajg9;uٜ5f1,ѱC6Њ btS_űy+U¼G 9;ܑ,CBv@{O#uEkh_1㉸H,,Gli`C%Rq̂j~߽3QM-;jgmZd >Ƴ-d8j S+|͒ZYZ 9WcIGJ-{?$H+Ug%#s7ѱa_##0S78eNX^4䒤Ys8@2eSޅj0YPrB+9qу8H(Q? ȦbJr4BQW8T ?WxDhL+tdr.> u"7ԒE& i峯f=B[: QhA)x8KbjG눴N@VQv#:n%jx\2bg ӏZy?Ӛb֫{Va9 ~w,Y<޺l+kFu]6%tMIQpPyz"Az1և %\1i@X'eQH+m4|QWa Ԯ4hzJqKN_ zN_ˆk! 6C1p ϿYdic, %)BxbPNJӚDep8/*L[6 gYb%칷<m$R%Ip;A47AIgR )` .b{I屶&i %)5Id2Fb1]UhPU;4{6솮;"Vk" %xo@th+3T3u\!؛28EJAxdnc+ۀ9&pl3g4`}#,rc9Ž6*z'T*G 32c v%)aFlRGSlj7ajg<,1Y9`XzP/H )b> 5ZȘE{"{AKM9.ـq3^wt=ڬD,;պp%(͓9d," =n[ǒBeI(wWغ`pzk\o!%g#)ۆHfںHjJ5ʥEmu4@ J(jX!oGIEu9Tu Ùʡ̹;PKD-AHC# 7NB];bRcplCX AD\ 2| .Ԗ1Kch=inF4(h 4 ɒBZZ3-rcCfnq`Iy=Z$;8<9h/'MI`U/GaMT^__\?2 C]1?z3ћYD .q 9&l|K>7p'MfK2jqwDo;jv-Ks߫9-o''0_3Z>ٙ7d\Le\'FmEJZ& %$ XcGΡ,N1t )ha#P9>v:iaij-ʖkDZTVNEEd}&u<"bo2dR-n;]".K osNnL9f)Q:׃WU dKZh\0<: !Jd=m#FLNyp], ^ҤG٣|0^ ! W']f-4hpG&wm$xS4$2h˒ ZJCBGl6äV'xq$Q#QD>F/mJ.CH# 5t#2Zsk:y3;ōD$7$E$J;Eni5h!lX{iZ\dND} 2N '%D{xT$F@L>FEC |{9b5dqU|4s CsV˅x[>Zu;qO^ڒ'f-b D\J pB9@6F sxKF$tջdԺӫ< ::{SƣNV':ySf͞{Y %p%Flb{UR]D^ˑųE n ϳ{눧YpG(aImm,iؕeH֑%[Fa Kj9=n>^'hё)[Gb->ҰA;Rڑ)[Fa K=OZ{X.W{ְKf u$ְE[zQ^<3U%eYWՉ/w.[exa{I4;UQc ޘ}1;5{Qcz+edIg_e(a)SQ^ʽ ĖZ-=ʖYòw-[Gb [yOߞIpK}}ZƮwJG=rO>YRwX\Ȕ#-F:Z{bK(֑X? +(0mvhF"' V*_L$0p){)ٹ)kEُ_Q?lv~!)EޗGIyXj6;7e;c Pfu#t$r7ջGlhd=tn5hRZ B-=N'KN^W~-e{*\W1Z*GWURە!;r?8Mϥ,Oh.bІw|b3uig6vL!!TZgkߊ>kqE@s֧*Qu\_ m@̅D QLCPV{萗 7l!j^"9JnsEV,lrdTv)#qxu:AN\dܕ;+FDV^ݏ ${5„wǶ*ׂ,Pk"Aqo?rI뺍mf[6-C]6-Z}z܄B[m ?7l@'*x~qLQ'^~NCWǿ} xE!␠Xk_u3}.: с<Ǣ(s~t~l5ʩQ>M;pnF_Y܄!cT:\"r46P쯦 F-.;$ph;vmf6x11tgϛf$o@ܪ.#HRV(#BZp 3NIhe2df*Ohp -.1o߱)f,ySPo׿x4e?ΉF(AZo(iu5\Ñ3v)18J:v_Y%/LnÆɤ8]puV*#^[=7 Ba!7<%k-~\9= 0 L mA_n/5Sϝ Bx! ̰- { 1X-QtzX. )XA(Hf}&iC ^i'xW' V>WV FӶoWxAh7Ҥ(6imO2N ׎) 2ýIUAF<5L/e:wRY·3f&TZx2r{(@EgBb"Dƕ5 kF%M F }JƘ_Z4o<M$ WuY>,W|;h=Kz'C%FϏ&/@+yI4VBˣt߰qO޾yvaz5[w8=DW3I'rf3ѩcx8JrƼ⠍Œүֺ!]|h(״]L4hjZ POMY񡜛'lʹi</yB/N/KLx'􁼐{F,%OyYoeC!_>k:"_%O؄|afMkGJom#L}n;+ KۨԐ(bo#g9/mbXf*z ϸóD:KR#nrE0V ɈGIn3]9SBD+AfֺhoCԳ=jKTA$LѴ l; +`cN}rF(MڒUOJ/ﺙ@m,_)]!H/.Oz:iD88C,7ʓU ً8|:RTc/z_3sǗNM9vC/qgyR63 {݃9=2H`Rwׂ[x ' [j( z{P~i%a`o&~ڈ +dK+_VZ;Gs%e Q?g"2IHajIlNO0'ޏ }4 )-P4Qdχ !?4LaN2D|`sʵ7$lEsØmF6!QȍMFV.<2of0ZZh*vwRck`;YG9x5%AVh5u߉hswoYQ?#E|o;m0kk5Ŀ-rgY5uA ]\ub%ērJr.0rKGv,VpΟ$z^I]]s~%W7pravhޔsGl0ɉׂ|q/IiZÐ>qѣGEhpyr>Teؤ@SZ <dT%@fp}d< ]y;ah0mØOV@G0pm|}Y 4Һ'0iy>Ilti&a7uk4/ @ 8JìHlU'3k`9AtA+EZI:ZsH #sjpcOO|^|N"@$iݎR@H#f9'9)C\Cģ"3V8v#AY;45h bd!;T 5 R@egHTr; h1": Ej& UNF1L/éL%z"w#odCrY$#:eP& d Io"#߭rW9U ,N(+i(Gr[n^Bl苗C@Wb& PpJ!dJZYHZ c:]&k&[WX)XƄ esW6h*v"-zRR]ɜ $#|fQ2-$`< ,F+ _Hg%1ES0wiJvd`A%J)>Eb ;\v1~1j8I@b~14ŬL7|B3"`}Y%( jPX ˝Ix8kLI"~u8"!BN c1wO=edOꈃhPޞh񜛭z}9*@( [W꾞_Amw՟T@ZbY%YCB֫>Ti; ^ Uh\~O$XwRbUVz.L4 wlLa}uFc~|B&|#f Vl]ߧ񫃲D\_\_W=mn3诏hR ȓGHKVbiʦ`?MF,pvV\MW)ύ(Fm0R@5^W7z!fb}l#__Diaאߡ5*mK4%N\Sקp#`g؇Ck"D>@j|V\z(NZ.NpcI)y36y %r8@b:R0w6{6 'J//hK)4x5}קqpv>;rW@TKhY쳌R(%')*C(R>8Z>BS#=/ZaLW^:*/jUxTntaEQ.ێMN-Ws>}# 6vD.eq }L2du.Go xb'5 iE[ahK3^fӨh`|0#|V $2hUp)+,&)ˡʤ6a5iZ"6 V]`FRKg 2E&XNp(7wd$)r!0F nբZܨ7#& ^i0Lu(a^ZS.|<)n)D *DD1C CZatVX LUfӨ\ K Bq!5F N84}/ra¤6a5i̓ L ki0 cDK6`CIhsRaII7%xaH+. EIv`܌^`4XjavMx¤ VJ pX@S ˪V]4Vk6Mi=vj6fh9pG-9&E 8estDF:Fqʲ* S-V#l-4jŝMrg(,OMQy(hCV)Q5bUaFJV# Vo7#6 Vd.vOs`!).K guF1-c@G1K%!m0ip& i nDn/0lf'HGG *k,fћ(T\d1R}0*sY CZatVXMlި7#6 &˱sdia4!&ϒ-J*',lf6ƇܶAV"#)'w\b\#UݓcICfbQ(Q֜ HI`,MJ_R9s7z^=DC6Hrdiq*%2Pc9)Uu9K f R˥ڴLWbGLX 9˫σLywen\:z̍I*U]^Scg_C ~ -Ao^ K=fK#(tLa:311ęQ+͜v4'E\=7@8ei"cftj&_B|:=*O?U-KJCVb3ڋݰ7/@ Gt i6`gBEF}vZL*;u$M:YFiGF+%+ PYp6|R>/+S2\W]COqŵ\3SKyTx xzSNmPzRFt(j(,}lǯ #b pէ‚CxߔNjdzJ4)!,D=4 0'4MJu jpWQ䴁 ifS _N . }NM7!"9_]5 rb:FD^ {)?Q?}y~ie>>2k|=h1qwqXcKᄅ>Ba 0 9pwxÜ`%ױm/mvӜ}B(|bdS/R;چk[o<қ^66b>MiW?;{`ʾٳwL=Sf6.c X\Ҝ-"Db2ڪޥQi0vrTZgD,c7*;FPnKTL @D^b怓1f&g0$'TJRe2B;ƪ5:@c,pA;L?f.f z}ҞˤsCd1X \d@mi?}bd =қ4l\$+ѓ# {@zÏgvWZw[Vl;"lPA[kM(:!翝{$Qk p^0e"f{?F-_?|73m 9嫓;'8Ms :'W/!Ro"}Z6.>u-m'{L|EIVJl3OtYi)D27nmijA$a8[X6F ,"An7]_+-_nkGDZyB|^\G+9]-kAN}UZ5'1 ŽMHha\p 7|6J$pr<5*0%@*1\@ta)$v:9,HT'y˙~s7JI +r~$oc;wza[|hvbϳw"ic?wƓwW ;HHh/yδ4o33BL1zqE4jǰPx8գo&(EtP`]ekNSp5,~9U6z ~I-'BmB7L;ڰP{]HdA 1DP/2&GAS-!T`9WJyJinОh#&r9e2O ;)d$fnoC[=d&1~,a肮QaM^oheY<)%G?²YbU1˟3 qQKDUJW#*ۅIMkUkϖgK-@"z_ |OD|t$n̩J,->%fArJբZBINLRe3&ʴ1&sV!\8ryVu10. {n1Ð:gS?s_67bOүR;}÷WJ)Um{x% 0INmJ;G~833EO'{e6yb%$x /eybE$ L!,d̀}%|&LeUjkѥn>*|0贘y4{0*Θ1ˬiM~3ʼn'kJfr&k爛@St؛s)Rw{ wL.vR]Nʼn`DnHЎ@|#HAT3G͠B3{'D0f2AKp$e&D DA}@•v Iڮgox'c3"нtfsçKLjH" ^ͼӄv,SAs!c=yp(-h0Dj$˹&(ŶHזg%y 5B#i,A<`r[[2(k6`@ٜ5ӫ:C .QӊudwSjO`վVC-1U;{) ~o)Kܧo[ܶ5o[Y 0b&}[lT D rQcx/Ǔ%zL )G+pEAJb䃣 ~l'x{lv~(n PnRdqu+4 ITCQԎFsjVO 3ξ}l$6+Z5D(aT78@/eW `Dc)u;y?ieeWm"ppg)f;VDc9w D~(}7$87qNU#v"-6EK)'zpd oտ|w ^|3(#ÓMLka ·ώN*IpSl^wX9!V\wP ^v?}vwqѕTՉ!"#(#K2b6"2-3 c-5?FƢ9K\WԹT8D5NnxgS8#ĪDfw{v"볲iZ,WWT}݄jU`DfIzO"*nw2*%~zSXb^: QczwU#;ʯC6z.R-q$/K`uFYr;TP2kS $\davw*uNzjq"ـ0sz4.p4\`^:% a 7-V1)jR V4`]SӚ/R;I{Ƹhw k%H-&e$4$C.Q،΄}j=$AguBJ& xWK{ =?DM+.Rw/Z$뭇$/PLvц/q |H29h%{6 /{Doe*GώZI2 ݧXOZ?rv*c?#s|׽m=vvo%&xW@.q|=ݲG$]ʑ^h'3-*U:>AÚEs>jb0{j<9NN,>;81֘q~=LRޡt*ZH|ێ.#P,'T,yo:ssߩmϒt<\Sqi9gq910f8p9#*P-ރk0w#IrpLb=QM-aNLmԈJˍ JgI`*іO~Xpu C1rnPn7Lܭ(_q&Ċ`GoK&2Q&r(8*()yGάͯJIfceׯ1 uU\a/~XB9*Ʋ߯ƣw:,'OP8W ]yH_S.MȘHXAQ`RX39B?{4nZq3e+ˈV#B.^Am%9w$6%F d@$I1PfIR6])n;nvN 80$b,x䞘xX ˑs,X'$V؅:jץⅦcG㗻)KF7t|%;s>["(H F}Fmh݊_Y6R C4V'=\(•GYxm"XH@d_O|0${|oLH"$0'qb|lQdU[,a5XgL-)~AhPX"P.2G֯enY'ɛLe&.7er Ed\Q4+X.kK60꼵BwA\7W4z ebq?E2W]oÎ\OjOx`N*V4oI iB^SZu 2aLXL&I349)Es'ܙ`t(X;-uFTw{Wڭ)7 a$cy 9# U ́ 0M-I$Iؐ`8OlP`#G 1m0#$ C>Gs+[rH4osIYbHEH3ނ0!Pb*pN^h60&Lla!1 Ie!4NzRd8ri$@Gg $)8ky[:SD:ƿ)vxw71TabĊV*E IYbݗ/'*@黿~-/PrAZy QN-_8kmN0]K~t'Ida30HVѭ[rD{2Ad)bo+ږ|^Ρa%}'_)xO_uٱҏ;ު )}vꯠ.$& !,׻ I7yQQfjhmj8 S^a ,(.+x>-k[pH"J=;bi:r ˇkPq7SwY ܘز,86a2IZBJcciO`1ԓ|܈/+-@ۤ)Kr1<:6N2KeRn4 IEBbKiY"2r|fHkPa&@ocзf4-BnQX,hd @w]Z+ -L&ÂO)"e FiulYj$ \֛"OqJCĆS2d U--nRf`TfmyJ,$8߮nn\jj,H* "DȌLd X~F)֘+KIKl i_6a X SsPPM`U@! 6sFY0Y@G{mi!͌kpJR.;\`, snjIpIS%-T7' '2Y4- Wts6hfA/q垦gT4>V9AFe.;*kNX="ZXh e:wCp2 ㄰1q*P_zh֊{եBL>ԥ7km>*S'q\!in7w~T/4@9dmtt8y/ &/ DA ]T!Y,E{J-TD/+EERFt8q/X Ϋ*5v*HU ڧ dO;\l Wq~iwGžP}tIiߛ(cL(ߐ&dByC~"+$l-oq6ιu+57_P^Ξ]S[|[]H޴z,IegnaomSP<2uxs^K"W6#l[xcM<}Py^m淋k\5탽%4ȥ7PܫGazoW]JB)Z]o'}g-yJJ~mMbl].'6EcѨ~}mfaÓ^ӈ&_24\y1dъ미w$e!x}˼ KdFSvNEr|/hZ0=Խ"n)ؤcu C&iڲ۱ 1iOqMh7!n٨lmwX;wǠ΋qh[v~@G2vam}0ww0yC`ޞz(4&0OXڏۏf4o  17tRTB+p3fv'kG EDTe-ZT*H V:5|BZT9ꌒ@]'\@ϣPQaL-8%MTdU.˝ҥYBQQ+9R0&GFI&;2S!%U%?q1jG,<#1D1|8P9x>B&Js^P?WBflǎ[8:&l`tyPJt.Z,M;k^(ŧ.uquUg~ZdY1}OC֧{!,x{\_@?+Axn^C'J$O16`$UCs~wrwua|x |'ɡbcSC994ٵ{T{>}/Spwഺ.<+1QqLU=۹dU-v/m\*TK P8uz:(bv.akxM.`w,`m֧q_yDӁ kGiXo!緃63 dbf5fvSϮN<{D̳{ъ+NxRu~1ʓHrc.$-LhN'-cq҈ 254Gcbp:Q:Ւ8pn~v&s]zxO^6U++'d"(iCO^8*ϩMhDJ8@WXQ9؇椭pjmO&mp뀯r5J[WWhqJSJ$ >)ڲ3k^]`AwVb4;n4O;4tlRݚޡx*)d[^Xg\~5@ZKMoMNZO|~)B 7vWZT0Կܼmpցtϸ<8𓭪 >m]plA5šS\~" -`Jٽ =ZOG'QsFC_*N'&Dv߯<^dg4^F_"mV`1 >mu[`col< bb@hGs ^Sd?&q&-Γ5l30_H|H|H|H|]&nc92ܤ2I0jb0e_ fIT!b,fv~uNa ޭ@l4Mz/@"߉D7>bm&l)6liRpP?D =wܭ.TG҅vmV-PoL#pXnb˲,F >NLRc{YX6V SJy@ҕ`)޾kƮ?K?Jsy{~ۥ˯?_)4ܵqXvGΤ>.gwk۽7u)<&bz@~{}ۇ{u{ǚ`#Dkpn%&fm X/s$eBaa@w߾k=!Lx \ 4g " $0rN0 dY6E 3`u4$Gv)uOk!uLWzcbϰ*s6 q#36[a 3%N30xZY _`nz JWN&YU_yPZu0Ѯj[;-|.ෙeeVXǝ2+aaYQP:?88,+Z+UEE u7#lW A{-/u$_fdbkP#VyPJWJ|w?Eyj- h94_ nP]^V0͸B᪽ȚZQuU{n5^5^ZR'XDz!JWRspJwUbEſW3W Oqݠ7_mau:RfR?̃!4N4V'QW AzẄ́PVW{_n3J`CehT۵뭺sv.ݶrUj+ߏS=]ߚhڵ#z /ٛF߾=9+wz7+AXoS u5T[&ѻmM[CΜExJ-/4xmh:l;)8L_먩BpșO/5Fbh:l;)#G_;tjjr,Srx/4zpѫ/uwS4]Ǵ@_QB[ Me !g΢^9+kUֺ>5第 tWY*k!>ʚЊuOMH'^eMRujU'XY ;uWQYzs*k]emGWYS E*k_EeM)}z=k]eStRY#tAvjAAo*k_Ce dJɓ"Y*k{8?!T㮲UHtKt**YG$)"2r?Ǔ/o~xݷEFi&$Eg4Ī! jUr<)zW?d2$ǥ'SKUp啩 \ Ƥ\9½ *MXJIhjL4zOBZ BȝDMS5Sb!TQ 98 & )v:O. #岚P[J?Υ'X]ThP5&o12MI.~u{׸aV?4ԗ ;Tfr#rdW+޿{كh:x3!U\ ? ͬ`4]x ?ܟ&anDvqsW.Y0T"+>]w0nx& )yߗ{aUS؆oUߋp˃6̀yL u'=M<}ig/=ٳ#}FS*KɆb(b{$#HtbMRh) $8:g-S-ej퍈0sγa%PW7@ڨjE滊11.xrN'-0H-g|̫w*>jr~v#?>WR`uZP3,*7)WFUw?w+!g׵-DZc"d(dMM۶)@X!G޹@KCɔ%Q,A@48Ino㓗G͇bi8{ m ڧbUSQt.C3 9jK7gߔ/!$\*+gK%)-FY*%e͗>}xre\꣤LdS嬚4K;P*sK;X4:D5'ɥE;H]!o] ^!+Ivp4.ZJyS~{٧i2U-VTm;Q*'T!9| %LmE2"@<\۝da+@'s[5!Sk\  0 3rw`j?R*_{,K>04{F-"jFa (_ DIŬ_ZB#WNaUGdiueOXU$*rYHHM 6!J{]Xȫq\; #V1~tܱvJ&mWNJ; j|p57qk ipDiCIq4LQ"#1H:l.56;V?g LC*$MF;'0 FPLYND̉Bڙ<歚`sFs`$=g L }t>X!IId<&0k 6; pK5Fk)?wm48w, T&Oj&Fid c $X4ciڈ,$xTa @uv c; +GI\'0096 FZqdau0 "7]Dx$G>X^ﺅaHhpXY̸:g LKMBkT Ss]$*Zi  qDyű U&HcK)[1$wm48w, yp6qiX y\"ŔQFjb?M#!LpA*'G29 VKclchƜT&S)%! ü >0^-4 ,C>rѢ2clcei0'M\'0i0Qo ?O( lmCP:2bJ3BZsFs`uPIK>i'ɉl%e85(N.,Y7ww]-;^Eѩ@ny^K[_  4%zc4Xk G>1aiu=z2&@f,D)7Q΍Bvn1,` ~@ؤ%!w]~-fjyvp bUMp?3(6yv75)S?~h)Z$!`cA =^ЖOL2@w=k+.e[öj+8@4_"ظ5Bac߫A/e *DvҮ |RA9aZ0N@<]@(  B>jKNLH;T&r Y\pFsSK)ITj+=1+K0욭-DĢ7L""Er[4A"5![$B!$EriK-bayST(Mɶ}6ӘlKhLB!} DcnX#~:Ͻ ^zS L+TaZ A!6p*ASbPd`Sъ5fh{fH#Ɠp|V-/͠*ی04%uaޠAHW2Y ,M蜰ZnW.6qw$6 Cy5֙YJ3 ގ?߆ͅ35~SX@^.§b< oA)?8[ȯKasy&>ডZTH|f>J?NxUOb25yg*ͫ^2rLHEJ @.2łz0<0//ϋJԇ˵{{[cYHfS/G!)!. BbvgmT|dokdff6iC`=XLq:L/LqzȎ{X;OyϬ}hu i-#D UgO 1q >w (ݧ$+{Ǯ˺f$ AR9;=|>vrӿ #ӵ.9J*ֿ4ETXH"U X}UZ-y#FHJ0 8uIcR $8ol 6k[ԀǭQ2D 3ʉ(Ǒj:=*zq W`'kmS*- Rȶ-! h<:y[Nwͦ-}B_f#J,2`jFB*8^"f&0cJ5?n)TϩWP 5$]$]?{_S KP_z!O} Ebd>̔yˠaݥmMK"x=A{ e{8_nuZ&ϟpۓ`V5΋YhbPiއy~boeV(-~ amz)|j:O Nð܊$CH O֋pq Om>S)@0-`ЛY~ǢO鴼5u](Uzg [tʏULMͪL22xc&s_=2BwV oҒsQ }<5{y!_L~qnMebfD^t{ r>b;`O}٢ZOu6jmޑ?g%fqD7ɖp{WƑ Ycy>,0+aFf51UER-JfWWfgfdh:?{||_Hnv{V  B+2ˢX(gXtRFcE΢n?Jֹ(%Q0*(7oɬ MZ[9=M뜝|xOM]e)N2ւASO Xq9h #i,S):Y ;/4&Qq-Ofw))CNS}d .X|\Cb&GًW @3LȐKK8o5 mli* 旮Yණ_EknYnQs_MRzтLu0:̫7=M0v::ObW?y$BWso}?DPՠ"UVWCz:DwMOj8w#}ɔ]䮌G21_.6"KƖQ\! _Pu-<Ю)?\vMZuCxdނsyyOMe'wA Ȑ66 j~ƊChal=c,Vdz0X 3l̂sn<5v%]U^Uc׺E+h T&9jSzh* #^I.1@ԒXZtԺJ%)ND _v>ia7 r7L+}ô7L+}JX<|x~KÜŀSK .Q(E$jޖݢ?o{O&ecqB@bufs0# }ˬ<7?Z1e+גK+sk^'̘VjI m-%^ZEꭚӝvasU'A>?w9N|ZƤ%җm 8dYoXno47WONQ^P7÷3Y ̥ D.α{D%@x۝$'\qZb!^#poE&_Bn:E.> /+̈́`um%ٚ6tz(b b)Vx2QZɬrL !g 5K3E62ו:ssyYGF34 ne*r*[W_V{(qOIv $s(jqTꭈ,Fq5:fI_ΚVˣ'̱F9l'b"i\0A>MqU4'o9?ؙ}'3IEztxϻ_M֝x_`ETC@Mڪ~iVSsR$;yEV|^yl2J#LG(hh "w0/?ۻW@RŒ,<??8Bo mܶhP9ny-뒗ozyl;FXcugfi\^lQ$B~mӁ6h&.b).vJތRo%(")JҕaJW)]teXMWVy 0vDp!,XOuBF:↱t- 1DӁYiVoTԫ TFitIW07y@Wy *rW^ؿ癰L ftd9N8B0D$:E^FH#A"ߴx*!놧y~pN,mj2Xy,I0BU9N~#lX$ҁ@#b*0Wa8qY ry E=mPy_f.@8Ђ@HpVDCt\oM*aOô>!"Jߗ\j#醗l{V#!Ja _߭cJU8U$yt7CA1O~F _!$+r Js0}uL &a6R a4{\ޔ6BR!a f eC;Npc ap'D]Ys/N, ` XpNFHłxi4#+3Bp`iXZ=}Se)KA[m+}r~y1|2wBh^QO(5!p#`୦zG9f@$`^iAQT"^AM a֧ }e0ۊjT$Nڇ#:M8W*-8c*RiL(OkzlNaIāJgBSC B!)ol[qHRy]fkn 0RA@%"!϶C>P#/Q68(9pkCT)j[ :{šAYZ!2_Tm1\v;X'x*rR=mQ+R琑KBWKd ߅ ,5HIDž⤝O*)ܼ ]bdڤ^6v9` ij0g}}@#=7(7{>F[!޸;bQFָ.>|,M *0ɣHC5T?%[8,Mʢa7&C᮲êAo{A"hNIHOٛ¬kn6{DƷ r`Ƴ_=?2=^>qf?$4T:1|ӄɊ#J )[T/yW @=SXPAph bg$Mvy9ٟt>$O, 6M5Lއ8 $;Sql<~YPwR~|KD! 2AS0U"Q%bJS=O,M2:jnE+@ 2%V8fQG H Dww6"IbԆY]0Ya-LXՖli9=ZRѮz!uSL~nQQ!:]7gft n iMk,޾:.W_~.=8_Ext& =c|@@OT(L[֟{l@4U4x)_sr"b+ 99‹zH(MOkҹ+* fݞ!5P#TG gyZ iܭ^z5x[=XLjqBt|g!C9?iOj!ɩnEZ_ݓQJ{ӠGTMrQQ& z+*jb˭ȧ :k鵜rn^)0)0)0)鰪՞^:bKOujJcQb"WTDO&}0SCU *x~O5tEYJQ#2kSTzȑ+e@<w\~J_~$D#R&(1 cuVṌRX`Ff Fo}x@| àY> EӰ<xWcMUfs*Κhv^ 6+.RI0Y#T%3.Mǁ+ĭCHٜ`=K1b!mF2`ÂUjF㧁` 1 GԂKylPfPFEBTt2 B$3ZéU(c`vR mPgPC8f)upS NF [H l0f$S`pɉGc)8G KT&@4;WZXyP-hNuZ+ @g#MKcj]̀ADY4UDmo1Γ((lCN<"k!^f ?!KG+:i^D@p\իRg?vW!0Y )BKXYg=͠_m 99D.UPrX~x^0}Q [ƇY r8{)#߅hfwѯ S);9ƌ~UGw/}5ien8]=֪+>q}!=O`-!_UƊGc|̍%FiI5,]wZ^MʧMUGߛT#GH, ЎIn7J_D]{Wɥ:ogf͞O^#ۘuc8|H{PF&* %~]>PomNGx|; ֟ ֖CMWC5罃[b~7b>S`~yK0 sK95׆Ga;>C!jmy?Ӿ+v '™̚MӎlMʴ_\nz["-$]}ueǜ;EP'xf;>{F~[ ֑u1<~!d[/:zPB@4AT̬1;$9+G)l?AKgr>5¯u#417$>ť$z{~aFږtO$ OƤZRp(if*\-RXP+0H $>y.Z7 @ 4HC%:C]@PkGR_k82Donc $PcZF6ghM>JkRdGg4 x&#*g뾪%ߗH~`;$O5VmOcH ڮ JX RZJ0T!Atڢf?5 ^4 v+EuZzł'VUi{U/U.40'|E>ws0ਊM҃gA$`XR7Gdo1MCYH j"P":FʵQD)#Ppse6;- 0$3\:vS{XƷ&^Q楙h=9q988v*NX yxEL+@ꏿ:!\L]ь N+F E8{nFwK@E!vPZ/BCkؕdsӂ*L!;P'Nı9gkuZ@j]iBils3Ƚ4/z uI6xi*}'Ԧvr,ߜoT_mVzfnQV>e;.>3 XͰdsHӊ)U :~u%8?s US7 &q:hݹu9zhkgwZd L hTOhi޵ϸuSgVQyvA4 ώe4jڛwk,Gn _Uh"[+gI`R5jҤ[>Z?GRtGN3R;?^GzKik j=>];9̄)9w3SjN! uWRg 9a]'uCPAU>gad2[ +BzΙJ+F)ٕ4Ry_ ik_m-!qӨ(ą/3qP;@)~5 $^y$_اt_ħ#{\>yƳc;W]Y8c79( 02pǜ93ҕ/߇^yd4Ԡ kDsz26 }Y~M*߀uCxo}a !5Y/'_"1bP|Bv̇!M;>8:n?u} g0tp2dq}9!JMgՒWZD'C Y|O zm nQ\aW,E,hZ30[& *CN2u`S׫?ʹT÷.owZKNaZ9 kc +bM+qi%z7+tnԪB٠7=M63U]j'Ypv̒4y%~=M<{- b Q]-Kzs7|m+GGǔ-ś;^r@;2v+[8!i8{/ꃣa͗9㐙W׿뇉^m}j+)TIC[ 4(A4'` roD2d/߿e߬U\Y(v~f̦ dVW])C>_e2qES|l%3@0LbS= i$W4}暌4԰]ceP  "ZyV5sA5%ZU!rE %)= k[نFLLʷ:;HJT{0$qؠP s84("F!C#14Pbo$.,z/xtinCl$Eb/'dBSlh{`MJ Pw$9GN4FJH):dѾPh=)V0zs߳=uȇ_sٞ/n4gipvక8[VʀʔTўY`f,Dhҭ ʝN2 0h? "𶻜@+CRzVFlh{`M&AILTv%N&Jr.}Spau6\HA䊠EBC6sì*ϝR>/ٝED9U$+QbVj`l{2L~# tZ B.٘}Jb`LRG*ZRATj5hRDDalRqt\yG+ tS2EYKDP < 6?>cWJK}?wFiLk:o}ݻiW_~ƸIΎ챩 z/A9c|u=w" g|d2b3{`1:k`F꾺ք+J䁢>ڧ^ˮi?ء}u"Cþ_]'q}{dJvK^oz-wQ4hEѼfZ2!y,}E-/ (g;FBruۍZZݼ4kFuW*%\5buf\tA"לZǴE̝Gl46Q*<0JRMFG>V$Ь4޹I 7LkjцEp.1q0c pd4$HC4)p<cԚX=*B{lS`b\ƴ|O"Z=uf6{uyzsoW"^1w$3Iy+I U࿹~_p"6d;M@d"̖Z!/c@C_e[6 ;!̮}r҇#"Zuȧݣ%"3 Ytt$;vΖ+_L%ƱX3Or0[L ]+t3+@fYN@9R6s2||9Y.w8p |54$%"ٱ')2V^(p"_8=Fc{)dQa$l0)_P>I,:{`\sgT0L%'~mBCu>uSb0\+s'j\v.~.'I2z (WhIgH)>pF[ʃ2S^ȝkLY^ ƀiir;4^hwdVI]@ډV] [h+(N[. _rH e,"LΎ%#"%i.55ۈVyI+n /E" Ř]lH&B.TI9+b4H)I[^R!UCNR{P $1рR^ғQ ,D䒩 sC.s+U"ĀWo-?q=zX҈1$ qJ"Ӄ#(W B5 #9D?DI&j #dR(H(!qC #yHDvǾǒP3±5%㦻T'^A`쫴~@m蹾&3fh:ȦZoe KzT=+pƚ? e,|ٲNfap}Tÿfkڀ?}B< ճƂ^%klK2yS '̢.)m<'9+ HI1H TA٢F9&{E(gx {RJC>KZn*0,NsP蹨6xcF`w |` هQH=wg6=&A#v-M,Itp7i'XFX)͍ z8>La[jMO췧}eFuaϛѷT魄gMKX{̋]/O7ה1VF_/߮nF~?i/xhye>Z/ \0lܴ3DBXSd os;amI\lK,;[=k5 [[A#LHi$|D , hFQ4F_D]Ԑjėq0XK}ƯSD"Rsyt_ _NmԂ#uϳ[xs.f?M0(+8W.aDJY)̘SNɕXOIO0b%rlZJ7ص<>>k{'wtM:t+W04Dhҙ1MxG$0TjATìa)"fx^'sJz8ZfHlFKowFjC!d" \[ W;xxzN LucL ILۏ%9~@~EB\mOQYG̉3* <ɚśIf@.w8iIdAS.qw_]}d8EWO]ٷV.wwf<#*QD v(Enl- e hjsȺlKX΀Uͥ5TO I$e\{U*/9zH6A#- &;b0sqw j鯠6CTPq擶Z9Ǜѽ*>ƸkcX^ (l:jאtH- e/fH|?5 j.k(SK5p\V0\{Ik ^wX\k^ >$y2܁X{P#IIJjzFM\(:fEVc fCg5o5봗 IOtȈ{o/4(p!B!z\RzPk.)@q{Ζ^AOG 0DQAA۲'GVzj;s=ſ8r3;n5]ryK޼*@x),t>O8%g:"&r2cQqe՘e>cO/$utHnh&NS)5 79K9=$yP*ۍż1ҌiQ`,%nV X:(sRUC7=[f^Ѽ31Ou";ۧ bok%|8YZ2̯}yvkM6kA[z7J#!LkY>zBZ8>hoT}d+ǭ'1dȩn/xEv\<{fT>ԁlЖ57EAWܥ0ַBշ~;72?#j) bQ <0I*t[}aim>scFޡu#vN6@Fi3T5;;eh+#2L3⚛ݝyk1Ь`͝{;xh E۸ݺ1knփhptplΣtvl!ɉ*,RFK]ZEO9ڄ0^*[EjR r7AD&QZ<#A'^r|)@ _؋4L[yΨ5LTfVeĺDXngc@*:n@CZ-(q;J4?ՒxgſooD ׻'~ظ=vZSwnQVg?~z2zZrTVYL;Fg//onԀw-wrfvY|r5HXwcn3-KwoO zjp2?&|+{لBeY?Rƻh@rx708̇a3gK.OGAvx͛: }4;DjdrW /C\}߭FmY!G1Ab%,o;x+!}B^q+ѓMUI ,S ,s:)>6YMtOkJ6z0}+q)PR B֌E8D!bRҨb;w+@• -(XCJH%/J"Eq/%l!JI!i$.-B>VHCK5EUݓA*ajbGӧa剔B]Wj]K (MOf A6)*} ED!SQ5߿{]gq&&>Mj./M+߭}5}<j4B"%FGbݑICzEЉw|dʦQd5.$ER/ ,:E(>JZ9g%.W 3e͆l$ 0Ϗ'\Xh#<2 uy-5e^p rc+QwHL є58k%F2I'eHHR b)# &p};{>B( Pc!$P̒aMfpi iR"gJ Ri sESP,FI9(u ¯wYNň2 gmz5} P"wff=]unsmq]TLZd9[aJuL殲68KALf:5 Q35d{Q9q{fJ5 ~zO{PMT6ʔ%0\hqL3D֙D0Nelm2N!(jXNsDSvfi~W'Z{K !I7R!+^4I&^/lq姻{ \Dqu &rݺ{;%)u+G7w¹;E2u5jRSJ!8qF ?QNSJp%L٬ҏ?#;Q's1O9kGJ[urJw/,ըysoW@Gk_bX;DH&""'!:n;DS?c@!I6EW<^'8 2T|pAv{/h~ǃ@N^wj2O%ɉIlt'pJOɱ5<15O5G)_jHg77sAX@|wܝ16c"3!vx|)y|SBΎzU3wUY<ތHUk0q|qŹ^ : ^e=pԘlBQ`軚wTWM(73:\=ZE]:n?=TQ\jJzѪjA,ZusѪ-'CP/+JMDBǜ$a4R +Qr6JTPq23yN0JomVW$+,KCf'ȸz* j•~-  ?*oPDƬk(虢@Yu:%N3S"q4% fgVV)8#+3`41,(M XqA-UeH{ j+`m?;>Φ .j'77~&S"IsUh>{W(͈Q.\S6agtWX3z joPSF T]TiIiUtBEoYhM| ]o[i]9UtWf O{Z>y9PBRts L+ysWi\fwe "\9f2mUp6l_`QV&4Žf|]qDN_)uY`efwf:ZタK+(q2˾\9 2]!6Lgdd(~EǙvTu1XA'IHd}?z Llꂚ].'LXe9&_oݲp,rJ9^|iKЛ" "ͳ|l;\a=V1#UV ^Il?o1`⌎T-6efےvDFw#Ll Bw04֤?V)/X[aѤA33MR64kEm,`3۪%U44ʰ_3mՂ؛TT⩖y6+/xVSiQ2NaXR&`2x8#^DXaL[JO,kdMZ_嶈d!7}"rJO/d撏gb|h&B% ^U^`)_nye1H=xI'SY'croH$n]mMWI[4KEG#x<~-m8UxKKH"keqouMzqq}W8hk]GwTbJD.Etl.J)A9nelB䑓_޾0tUe~>;%z5cL12b>ÕĄu GpUttuA"B ”,Y2cxt"ras{2HQ[Da,/?O{36i~~WhT1'd9^8Y/djO^ï[{sΞuwem$G ;a#R^V-ޗq(`<ǣqwV7@6. #A!̯f<ǣ{JG@N>"Z>y'^(&fGgdnŸv$E/F˶@+4R'BblXϖ]K PyXѐaqBX0U>kVM̀oImNo?tαA|p`=!8u$j ˆZ:w3'U([ؽ,ڝ~$'1nĮ5`NȰ Kl2W_>Y'-C _ŔUBu6$aTz)*" #1%ĕv *r+/ _bSk#N٣ٳ=e* w,<[|.1_y*"8s'{ïN%9wUcvJ޴vO\1Fz6><=Άv΅>mK\X笂3_u(Z{\ #m,GW̞c?堄Z t,0,{AM 9 $9幂X!e\-)NSM srGuNSEjIf9`3(Om*l#΂&H*Zߠ9$>ks7R/5nl=r^B.!.ߍ\y^urwt}w!Y5r:׹\SU}Lp]}T ſk1vueE!~o+wor4"(h, CQ|#^Sz߫5Hruդ.)b`j9wkK|rl\X.%_z-%XFflsk?~2#mGTa9i\#'kP\ sg낟9kt;I'DS! =PH>8}j[vl{!Hnt9bb+I*bw3RssOZEc ji2'ı\8BzEi"Ih9ǣ6Lj66kiBPQ=Ѝ"i@ j+9CeJ2Iڐ&䢰*aFlzA\Ji a 5%gl[T"qJlc{*[6ܚ*2vJ GF#*$9c[5W,ǀȢRR3GvLw=iIH0i"#?6FK i<"[ +oX NvA0wR$ L+kkṔhDi=*nYϷr**/ 4/ :gZ6 ~NVs)8\}s<3H9bRûkPEDuvC)? ٠oSPX(Rql\궸m;%#Rm9:ճX뚋LPfcy΁\fLyߐr,&~D٬@P{t|AW\c8i4H"hv`s *z[/@.hZsfk 菣Mg]gmX>]LoZUФnv<ĜބOUX>,5*_JB6pWSҰU:Us8 iUZ'qLnB| ӹPG[I܎I^OIwQ޸|_d6_#?7DBzBWH$HZ>*C8#-MTvH' [Jp{6ګ5I+?^)4XAB¨B: t+U*4-藲bk”PMI23r%xtR+ՃD04ssB!?_ZA A~Rh]}tnSf `&:8F4w*QJ+QJ%A{f"5 Q* ii{&2 98|dQ:W %c OP h_fAtnSf  :!\ΟaF+@GNH=ia# HF\hT1r\GES vL60"JhdA0fHbI`4. %M8 u ޢm;|`{oQL( T"ȼWɲ_uP 0KɼVYE N@[wc_IQJ<,8toSg?ZP1oW* s_"tjwc -Uw/3Z {1&P23?"Ϋ3TWۡvDF>V .?c|#˅ډDš+w=/`@ngyDLj=F/laz!{_꼢b#20)Br(^RBrc}/\IJ}w۟!2?.ݜNhj, V>1cUNG'6[[3/ʵDy0[?=:cv/u^F~7ʀ٤MĈ(9uh}Er)1o׳90(<>J"%vq$])M c \.}TKX+{OÄhwK_s}[n/~LbF N[ɟUnc(K[C]DsJE;e, ʯ|/yQon`F4 ze[Mw:@. |.V\“ox}8m􍊟O/KxʞE5z}dLFf׏@⏣z?O?<|Fg?8rXOhbF7x_%y^|Ph 5@:Uh?}8~3٦/h#cߛ-al/ʒtfPOҡ !ݗ];iTl٢Z4Ts$s/s^_>5Z}[+l_~S6/yut%30F/kk]XE<Ļ#}_~!/:bz"y]gYB]q*>/\D)m5:,Nff鹿o}僣0m#?l.("\{IZjNyco{d#Gy p{31Ȅi~3Qq1TeE!9LRJ.|!zHM[*@Ao:r f6)؁cƚ5N1QiHS]A)&SM8..:"ɟ6{2Ǽq%"*^=}0zowwW˞ H"75!vTL7Pwnx iO!aCkzB)p٢q66U Zg.^]b:ؔ*ygN7>Et~*E)=8OkǛxwWj6)C5Jy9?]YS$9+e23u5]R(,`X !3PHUfe͕'ve Ḽ0D]9[ ~KO8y&]ŅA^{JkԿ,?ԗ9h)`^Ո ?֪aRSv@;>02?>֭n1j:ºp˄Ț, z`!(om6NbA_{l5s?A?BwSK~ 4:#RPRn;2/88)ml8jI,0.H.wZdb.Ob"xjǚueU<qVMjzG0I,JcyqߑZՏnr?Qo}?=TJxAc H xt]!|3 t-f׮U{|auW0A VAS-I&r"*|u5Wtv[ I3tXyVܽ7vہRG o9̢FG̬ Q,6G)܁ݸN(5cPb` t ȸ&uR:ٶmGχ3y$z)_h~Vrv:VQco#oW­}Ac1{{m/c8g9ӑPTNjy@x3b ~ϠbRKߡ㕼ܛDŽaJٳƁFc 6݋AzrOviٜhJW8sA0|qo0F8hZk (;b\1sȩ4 5]^ :͹TYӔ{(n׍) g/^@3($ʎ uVuuދhc~0PܦYUQ %yhOў@|b< f[ b͜S w3&N$Z`m<:13'A Xlg?~^'"\AIj<"XݰĖ oaZ0%]7F]ND fӭ_:1?fknXϳԻk!a7r`a->+w=}?(@džp ޯ?|~Bz7!A| / l[Y}ոp{V m ߁*z*KNe tQڞSa=FFRʝHb4g #h@AąHK;JƊ N*i56uJ 3 iJ)$OgJV$c<54 BCd7a%}ChL`pɴ:|нX֦ 4/{ns(@(7zZ}Cyަꏟj xj]woZ*~~\L:)"7AZ44 I`Sj".iJ17៼ΡH_sn>3"𺴭mţͺyF z6ҹU'sHk$0O[!&"iDo_u X_j Mop}(/$0H[۪_hSDSG)u44.Ϻfsַ7-|lHPtΨl5:c՝"V(5)19g-޻Uf`V/.}| Ȟ5O ĕ`cU3j(m5JΛYrr.ߛæֶZKw:Y\t.HPєpXpF%'q"E`m1 -ns H1Je UP[dɐ~*o=93M HZCDFxb6D\[j6i0 \PU2`ED89o2ٳKYpED_M\V˽;|EoU^ѠBCeM|Om$yF$ t>d! VOKAeE=O:Í9G#wzS$F! {غA ; ZKܡJܡT]3\aB!ƕAy6t[Flajǖ8R8RR#Fts"Ng5Y{'70TUKLpL0 ?O1i2dokvockY({w6aUy#)$b|*PC0ZBkdܯ 0gsquIƤ)2Թj4ZTw4@bZ`q;s;$5+Y|LHJmIy($Br<j=Kt`],پɤk1Yo֦QjT2F&]aO8yn)v3ߋyy*qW_<|7A|'m罫[_ozYN@8@x|ϰ'HRIz /ҫ|^߬уwl_;n'ئ+qQ%R[<_[n3JViirr+wJ:ǜAɜaZ]X#]Z`l #2\Qԥ3Efgwzt4o3VrQR-rѴB zQp^[ TT*e(aˇq蠕ytdrdR8g5)چA#)KY4Me #2`QsZ6-.ܯ!<ϖpz,_R#"Ȟ5$ppDa᨜5bʶA0A>oZ;{czG0;s_uGP&kXE;}܊&mM e`iKpYA*]FT=!Xr]}PWo t :ĭ, sŽc%jB{BcIuCVaȥLlqΆ . `MXuaI@lr΋Hw2KiTA4Z-: 樗A`qC,p]п<3IrLD#UR8!7Y}mUiV)Z}͉1O)NhO@#pۆ-'6eJO)HAZCN06p 9-\oqYj1Lj0/N¡ H oۜ@stó7[BӜ`H\`J^ϵ^0ə*-Jc}t)t/۷U8Wb@Kvzs1n#rC@a)t z.1f"/2Xzi-'~ЮyeiyZ?V%"ĦU̅*\חQA4vCi |`Q|~i>Oqnz%EF*[}ZtkIH۾û@vihNcE'0챚OzUjYaGG7#yrVƃ&{؂<%678f #WG/mA/Vw!T1g-cv0p 0#&<=m./Z {xI:cv0z+(=bͻ3}e\=kޘo2Ru@.'3cZC0Ij9ӘE(KNFғ=ʰQp liđ.GDbWSƎJ[DYc $~wF\Aa k=eBј8#ٳƫN okc¹Z/@[<[9cdNH1xr#k`̲ ٳtg5=qԥjsLDYc M@\;P<XVZ\e]eM(cd&j険T)"ϠmxNHLT6dpAh&*fVL5ƝLxQ6hҭ@fRb6>ŎmS@h\80%MD#4؁6qȡt-9-d(0?9a>I(K'E]jQ051rP1Bݿ *hI]ڟ@B\ jU-Yj)(k^lRؒ\@j%6,ɿS5LKnhbnhX %tb"hJx.Y>xNOL2X$& rAYF%7.;/f #(11Jg(~A 'p~T3(9Akl'RFNr\w$ {_=jf)p=2&鈅f-{16+<Е\̩B ơpq>Vp>281T9oqPZ,bS&"e2nRVir4>6t<=S(L2Wg8%A)fD aR#9;EHH\ {xӑ5YT?VPPQs:,-&Yc }X |bE<3nSlHyII1 ԩ 류VqgBiO*L~T! a$]xNY쯻H,r3`vDߝ#˥kЎ{,hiY.! $K\ϳ\XhvmS\ߦmRKRZ 9Cj[[ٶYY_])c :<l!K"Ʒ3A4SGSb2'+QO-[[v` KЅ3GLN0LTmX#9B)ّU.$M﹜UuDfFP=b n=\t5QV[<*hX\QR. ' -| XaW>duInG`?=_M{C e=/r1Fa0E.* Q/Uڏc8 ~Ec YTa;وSѫؕ==gDtkcΎֈc<\$tw1͚}[R]]' Aչey, *9F}J(4p=klANe&AGxEnB9b9kr@̟Α@1J>ŤB=_ wF% cS! GaIIdm矅]gڹVǽ\c\.RB%a.=(&޵7q,9)=%$TlN\bjF$1~zVVY'u91]$D=zCĨ laHhuLNc&Q*5b 'X' Q W׹7{uR@oV*UNkob'9/se$~co, J?lmkcmT0,˞FN5$bu0p&^f\ܜ؛8eN,tR.:( -4MDX ZjHk_V'/Œ:Ȼs gf(f(f(f(*PVŒfdṔa,/Ӣ8Q\Ո6yZ(C٤ =ޙwCP"4p|] 0brx׷?餒:~ I0' .r^8/R7X ,DH3'6ya.{XUNC)d)']xjC6^ {`v1I(e%dXTqA7 Pb5[#e޳$}p=Y墵rKBx?\H0%J;7ge3.H%!2:hI70Zz 4hI'` 4, $6T HTm|uD3`5Q)$t8eBjf2ᬒ"5/!jjuP2m" x!2U*5PXsZ,҇ 0ݻY4LƜi@Kff GEqԖ*äLB0KX$ib"a1 k UӀ\-{sGCXta\u9ٴ#ђ2w`DP4IAuXQ=!pcb$9湌I 6];6T;6^Ajj)s0>ߑ'V&vJ dƼP5&T9y=vd}! ᦞ~zU_#-D(| Ih"RTHRGl"Rۼ6|NRo8˽z`blVNo;1ŸJ*E?H;5oTݜJSiw˩LXK` BD!sq(MY  5 %Um]PPJ;+EZK!~]]Z%c [Z:p-^FDwׂGM,D2dDv:6>%N`}x.OZ)τQ..U$UBZVVvṳa5^F_rB%}["0󳂷g=e>qx2\R!;3i L % a5WěCEVз0*yW u;!p޶GƝꃌb.B 躈0!@] UO1OUmpa7wX:M>q98O;:}T{; O:v~x=6_w767^lڜCgݝݭ7lZf/oo7'8~1ۍFEEo/zə?:[ľ_v{}J& -|yyN-oi̷CnkJ͙Bf8o8qq JgfI9$ `ڙc|N:N ~]-Qޱ+=`4< #ٯkﰓ8+*'ugqV(L{'ΏA z8\gQ<@4mcǪwtSO)OO}?ǥ;^AOɳo?|7v!@Vz5Y>QoR$WN=G}PSOGgf _/x^tg;T.l ?m`}/ˣWp8%,“0SƩ\/{۟OS[<\Ilq\~u7s=:ŬMa" c3%|LmĬw.,7nG㣩hL1}>Bib-?+|캢# 73EoP᎛#и FID> ^rYdH[O"cEk$d`Ng?MSBUm\~[vb9fT e^E-u4Ebp vU,S`a+-ӽl f{LL`5lwaU u(OQ?Eei͐8z LV3uO>XvkwtGk#Zp:Y+HA7kܛUpwڵ.YJLJh"8y M` A`aI;VѠ^N9nbe7Z'D ɧq@7N|rfOAj'&|*J) 3%5 .+c|5Ze<'(Lt᜵J#*/I-ZJUf|dֹa%@L:>: ֛P,yDU*QIKgb11F0j 0˚E?yEc}Sh $c$j$PANa8@J;~BD ^kk+Ni\0 usn`Z9|?X] nShZy0Ix`Fh8*b#'1 -_JU_$#6+"4SL[CsSOPL9#9+=$,'q+sXcVӄ5ޣ2cc5^c5k`w¦ R2̊d-5Ʋ!^%ޒ)Zz;^MAK1U4͊g2~&ҭ)do4]Zז` 3Ek/nL'.W3/ l?l Ґu 9bɯاhu|M ^<2V{i U: mJp^v8O9:seoqL/M[Z L-ÿ?>@Ez :^? &9II9,iE-[e(*EkcEw~/,@ I5dwUJ}HMURȝZ8zGZԒy 1DqqsY [8+.8b0T*Y] R38`/9 A\kYׁSxwI//0$)e5K1:):>7޼y=vvkèuJG5s1ↇ 1:aP vwՐ6wA,Z$vV[L"I37*Ĉ lylgÖ*qKTOr6 : <$֊FP5 &IFŀV\WJtZ%+Rhgi$P>zq}gs;l(i'tC)tkIDC?j6~sFTi^{ۈ}Q%)cDX=̮tΤy%ż:mt0|JH=D{gz5{zse5끁P^͊*(ݟf=\h|J6($i+[ Лg?`FȦMK Blp7G4ƞV"K/`+hʏ+a$p=Fc>cѯp+`@NɧT'>gT|.{%>TQƽ/XEA‸y=.h3Oٝ(-ғ|;P_[YѲkVpyܓf*cU)tO~dhDDHhREkO[,o\06@?@nXC֟ O@9ܢ K(P}mJJ8A%(ߊ~J4,fdBJ\ķ~T|{-t([Gմg?vMgt?n_THPx2qokLj l]jݴZܗj7ÁA. s/{rj% FOFdB&Ά/`}B,/$ԛmjr8:Yg0T@W]՞=(=hWg?׏_gOT?oih.+69׮!*I& v A;ԤBu NR*SBe*: U:ncUœ9WLtzflw7ח3 ߧ++{ŝطV8~oY:9ɡ0c?^wW`>!ZO" ?6\]ۻ'{vyu}n}-iG?H@,Zk|v~v-YH@E\_-.n粖ֳJ֙y{XzظW/z޾E0yUa褥RhɹAfq8|[=A YD!,]4h%x ;7D{@;y~d:M 3K: 4kI*KXhj#a'# h39~d#Ȧ_yr=⎄2 :K#7 G=Mc!Ч AeH!Ȃ})ILnۿf™*-9=:eБqȗCeC.TZm.ӵ;%$Wn2rRs6 E*M&)r!tmB.Z/3)%*6p-v;Ow|L΁Lw2=*0aOQg"5'Y\䧏oysT1Mz~ 4 is뒁 2⦒KU"S2Z[ [dfXR $Vhh@[m4S=(K/*I}ġY^mMH !K]\X%FP9ri x33I" NU "x":+AVgugm_UQB +voW 1dpO6!Pnc3{-ᜭ;Z{\N=J4>JztxP $^G¸R%hhOJj{x"FfI@"-ƃ򖽽􏽝JU=_V".fsFRIωY҃X˵]M#tY2lMHgܜHrGC4b"J @ (LatDHaلSNv,Ic<|՞YsIN:4*&S+&}֘Lpt˷Si$vw[Dq(*0 ~.gMΊ Ę]ի>ɧ K[Rr0]B,4ǒ,^OI|aw36qkߔ9'`q)CCQ6--xXqoQ3}G2ʹ;0)|G0)dYxJ\0>k*ŌzǖF=FU1E`D}~pntw޾Ed PJ\1`RفJ%¸tIdJq,ˆP)6Ԓq'$ R4R3D zIƍ.ݳw6E:pTNNv20tݶR2zDg F2hi!1gYIie>_{z?E={{vDA=~XuH흰$JspX=FbWuai-k`",1X@pϮi %ѰcoߢB5̀78.(h̰Bv+2w}y_0Gp$for L\9a{8LG"yCșgGХJ"Mfn+K[#c {ArKgج<=M81B`֚ KO*&*.Wrsk`iÓ0JN~>{jWkDdX\%]vwho!"՛_D7[>hԗ'H>/=n[qSXp* qXrq盹afWW7nokMo|c+w}Ҷzw_RL4׻HFo4~Hv|yv!x %IZsvE~q,<WsJV774j"]fZ}7v 2#A> ,z\8_Ϯ.OƸ#;?N=i(_ZwoԏQ?fݏ>]fvuYhۮXWA.jBEJ;Atw'{7{S36(ݽO͵ٱ(3QMh34Ml];29 XTkbl18kJV|ksHz7>j7PK.3@*6ĺ5fAg.v8ch+YBzβ5ٚ< X﵄Y%ޱ!=sq )6Z vicdw4>a߱R>Ei(69љ&p5v ź p S=( -d.us1b..5]Im & d!˾QXJʐ&2b0c9E]l[̂RśtɳNWr~VC\*nhi-|3+\s1ɶ~M$I&QכC:xI=K ~"Nbi@b*8NL!L蠈SOqo#hNO(H~| ~٦tv&pu~^ %.zsV^"A२ڦ#b,ă:amU#MP qBʩJ<¶=%q(kg $(slE9{ V>WLgaR_z}@U'~҄O9-l3H$bY\Kd+YHP65KHp ,%sיdmЖ Q>ೕ\mn2D.ȣf>l:dg08#㘘_PӶ,Vt5s+ b􇿠m?}Ԙ C/9`0:[]kb ȸA+ҁ#;^gE%3~z| 8Z)v$Wޓm$Wo!xdF~鑭&ljwUS %RS즨"=լki8IځPɴJAMy!n 6?}1* ĄKJoiJIyϪ4r0oM@}x4*qU_a-?5gSR٧O:i'J[X";JN>ꠄiyʺ_$B~uɡR8S1qKI.چi2QPA;6:GY̱yne%l\UN-ˬط+`[iw%V߇vYsA-c`;6))=i0gv-.GMY=R r atnxcj{ .IPT(ԣ-;6TBJI= y))E'XD R yc'QLyqZ h-iqIy.ŝ@SY 4Gu盨U 0ٿ;q#B vlf ӷ a|cm Ef[-(cvEDň&%׃q8a ۛE\aJJI@JHZJ PjP18&8j%jFټ824 K( ,b4L,XS4bT)[ItѾ}b395 BQEi*MR"8CU Rܮ4tF#-s)*_]gskraI\7KB8\bJ Uٛf#R}]$઼y}M2n^v)(kӜL)4O2TPrQآ&ݶט\[;#B0M8SD/` LzVRA&iL*iPb!@MRPB~)cD45u+Ïi!<BJUɤѭwR2bp4@ษh=ӱjQҚ꽨-.EN""H<9;mٲ5 ٧=-`>L$}ڐ#r<ۉWwY4L hBOrfq) 3 .nBGkIZJiՕDiH#$9?/ 9\+檹&HbM7w}Cb8W:`HY]Cj1O~xHwǚ–芾vzl_O?Mf`j'/l9sa*}sdeSmضz1pyY-6Čfх?Q+Ow.`'8U G,/E&ghm`t#q>IK-bxmu~k;0[S+1mM|1`&ՙm˃F)~!29iKvOym;z8_EFПqN5vm ]X[ǯRj2^~|7_} \ʨ'J=UdYg[Y,8D鉚$c`oC6&:,>U=)4`8=!7VyDž3 v|$3U`xjBmt(e,4!0dSyX‘u轸\[!X~[@gnNreUL9hvHUR6KU΄L&m+_ _jqlH{m7u5twgI:l6ʎӪTKA!Ư[ ȒJ'0htWH9 U=q?_ٌQ3̶5Q[ U{kt6S݁VPBt%'R\bY#9O4WZB)AH36SH>};&ƢyZF9?'JXQfNTˌGB1\%2NvKOњNBT/!%f$iFֱ'D2 TeLX8-1k'wH0p;ʕ Eش'3IDa (($p H,0.-䤔b\`[NRGmplaX%f~3V.5=[qq<*84?Y&'W9CN TcDo_^8􌚩c΃ㅢJK<3Ŵ~>PrfB>*FcqTi0B (Q@,KTP"KB8pq.)$^]"*}r$ilpJjfyS$Xq͊bFb/sXSf„aڗk"wU0kp{ }E[aV:SuF8s5I8C zޒ򄤳@u"ϒ q@)1gi? Bkע/"uޙL R\4_/8}6ef7|B~4Qȏ& D!?թ"Ck OLc 7u(Df+Lqh4$ooFy1c,j5jчLk8,4M)/9KwI'K1H+(LhttD|Ô)Q(RC!p:DV=Pl!)OCͤG۬P4}cec&y`3Vc:$i<8K42ez[ݗ~i8wְaA8qX 20]6{ 68X`{9deWa[A Xkc IoA2Y4d:p,bf#ƕP3a`ȑN^B!EرyDžG`28.Cf˭eA#aPKuVPVALm|T;/P1ݡ"zڞ>TnE˱ZBY9rzKdXx#)/M!rWk!L7.<9Pr7Q^(_=E-n٧}U;W]!0a "΁ ͹ jrbhlG _ߦfe*VB TLώgWL~Cj+?e_Mo)/ B) C%Bie}5TW ׍؆S=v>h4!SF!r_IlÕ솺a\W:(Ӓzz=kSl. `W$<9~#ճ: "՞+Y]Kb\ l?!'b?ŵ*~ ԗV.L{[T- Mˀ[ % ꤂VhZeG-`6m'P9g(m=QfZV?pb/)fXG"{_k#Gj{3cPKbBQgH~A~m3QscFHD+NY/jZƟQMLM1m3_S&(9f}J>~h)ꤸirCYTUrնF=~:Z!fpWP>žP!&=-|qaƪW}^pPw5|Vߛ»GHO(]cU c{S4CE (';(aĮX*v)E1ߋcgQ^ljHk4{>SZ#8C} aiĵ(yIًV%/8 /wQʮ(5EWÖt׈@qsH͐bE.wf=wQʻCW}nn6s~370s~37(˕- OLonMUɸW=x`8C mٌ鶢^!D$8NT*"QE VuN/ͥUy9nY.d{ZIuIk3x3߳K5vMc#6ġWx+E%5XQtL{DQw9uY6kmIBe~1PFd#Ү̿o$[,YI"b.ef9Ȉq% CrhTeр2N0[!Cֿ_ǹem>RSodV؅͋s.ӷ~gD?qSۉ+ҝO>87_&l&Ky>M ՏmQ\_CG7Kq}kiqMb&4)} ;f%$#>䞠 sȤd6Bnm900Ae>\=PV {MB4xS#W]U*t)fګ O^ w{՚.y;#|NL$!M;Lf*%qZkLQ]cbK$j?O})+$Bt^,_W$:ՅJҾEk!+7CivEЌV.24f %SՇ5o:Cm|J>7e]Yy[\>q3&m|T-c/Aɽ(z"B(Zb"g  R#_`߹9Gs^v.0q6׿*0f I1Jz J%Hku/{F`Kz~KE%(et&=* ս3NUÞn]h(;UaBЫ d+]{PW3ܛCAu]RXs9.a@~d`_VXqQo.y\V$s X_/,1|f،cSp.:86{gdlOZ%D)!/4%ApG@3LcwӪ1+Kj'eID"6cD~_KldKyy_I+$kEiidWRBVR;õ@# $؄aۛ}X"2KYd6⌔W֒!NA.Oh`&*ecj6ހdK"0ʧm~ RL p,&cC3 0H~^L)Jy f X -yVS5@eP̉vXRXF2fi݀$ ώL\ 2;MiY3!xX<7$v9׃?1֖:)n=vH#DŬ>џ@Oԕ ؋1OP_.^Dq;vcs@-#D. TIOeǡl8r\!-hf$PV ˑLCM!3m3 wPj0i案ue1f DIJFP+oC~ݟR(J^o`8.us)KL9`6dt[]ؚ=,aj7y =Vb1#,[z6-n^!yo3QFh&fm8wp"E7;!Cw/Lb)f3guYOHĤD; gSa1hOѧɢߏny*'./~s-b/nṲ@Ifng(? _n9<[ϞF Hh'?]2D\O=g6 t}[q7,Cq 9֜pZKc3xe3);ʖݱ`ڽ<_Ceņ8vTڂ3aCz\3l]ZJ@.Ox Wp5qsECtk"hӦoIGyOuYVypfoϔ4}q_W_LtvtR|Kf"񈍒O_&NGQI0Zi1uQ[}tYӇϓY~]}~".UZQN"\\  M3Hgc'%Π6ןsMi "kٰQمs)27^A]|8%yr-W"q\H@ӕ-a22>\X猁+9jkWl#W+9"2gWjXMWkH1愄T! LBN[7̞>C>CZ.YB 0jܠK%켚1"NeRYuJZ3+# va]KbJDkiY,Υ43erjLg\'Z;WO훷ԯB4XDe,H1o!Dg.՘3lV"cY[߼%IekJl۶;j mS+_Yns!>,,հ!cszq0tSl{ ̔wʌo0t9}r+}c׀QM(vr?:+G1;1宿Cb CsEɫυ:-RoÚ{JC*' tX |FDk;WE1ΏN6,gl' rHY#Jq@abi`3 B{* gK栥˺q@YƨC: 秞u/|؎ -HKH{JDZ\SV i.EzU N!np ]iسvz8}9=G<hSkOI`Qpccl'ưgsN˚&1Ց+b[.[|O:>~_ {|t7w[! 8B(9Jv絴 dOZ"!A賁N<^x8F֮dCW? I{kA[u.؉"Q>Aݱ}y6/c-Jp"J˾{N͊RG@|l~ʅ7J&vs]˒ǂ{eWzMH)+ u;qE7 K0Mf눈8Ԭ<w=0@: 7?@sJmAlB.ӷ~5Xʦ/TSo.p, P9lz7/x7~#?r*~2s$B nY*p׫7|H:/~)16{=d6_*Ae 2Q.c,BuƕAc1g_Кkizu[?_5c$ 7s/1ɋ5ǪO/Eoi隶g\BXXCV؃|^*o ðdX$ֳR-"1F@`Vu^]? YC3qhf1,f`mB2S +O:3; .jnf*"-CDF>;Pu׏[n˩D#*^M4&&o"@)k RFZ E3,32°qoיU`#{ rp!fW +"&M"vo#'r8# G쥮E2@,CzY-@!sN Āv!VX tZ TG LV!?jI~Q8&ehHCRqȁ X2"*NLS5QA,yu)]1ԫ^/&~u 44fe?SCAZ(挢w( IY{jXC{fVK6d1%NY-(UpmZʑ%ZKm҅0BxǝLhOc"G*P(U]ܙ@f$Bɱvxo~ϳ/wGc^OQg"qy(wO{*D޵q$2A =N,lN$j,Eiyq.PpDM EEJD=_uW׭V%OL^6ae*0:C ;ᯆImd  )ѾxO(DуV>>:V}II]uAOYY>T*&$E܅*C0K+zzJXXԬ7y:؋FH5k;WRkXDB6h(53MS;]/3:~ W_(99̼b5903֓)^/R_B).6-0\o>1.=>O.xl@tmQ6r<^ṡ.&p$+׳ E{#Yyo^d'K^(4#womf'zG+C<li=?QVP$Z!l^#yY;&K&}daPck?]Y'(uS.~d)SOğon*YBp%N|-4Iα@n*?555;"e,x[*Uqq9'@i%A0 ZjvH@d&Tŀi1Pt/fTu`FA9 `K,ÑI<,bPJPS"H\hNV#eN)#R#x)SZV`1RkV w Rf3%>] 9]ycZyͼoZ[Sةo8jY)H%RZhRʠHQy5߳7eO7""%|--#ځ hf(CBcAF)!@rh%#w U>νsv҂j>ε\k>ݭՂH!'YYk>cݘ#{FEjd8wTox 3WX _\\ ] /Vjz^"4w.='%Aa,hpڍ8`Tu._ zuEQ;\/=>!%;ϗ,ϓp8OMEW+5+w_Nft |8(-cZx39Tj4ҷ#|> zI&r7Ӱ2[.!DDhy{_'/qӧ7"*a1&7}GZ\\} |$NՍmUe(qHc<BH>dP\ ]I0^/$d*U{ ZqP!VYcJ#&%f+9vY9UU9oUDitA_䨌k14̖5WB8?'*&7V˫_?tYrs$X_LCq_/gx1_~ 9D7~V./R]k?2b'5'ATqyWWV ;woUrz7㗟 ٵ`M i%SΖXRApUi-06"0[+ C.6oqsi1fG' j3%*h,~53X]f[/?7TtT^ Y݃=U,pGҍJLW&*f81z ^ N-ݥ)6j/5Mh4;ᄪ,LJp8G˽v\; XmƤCc" R0ÜKCXxnj]Nvͮ|޾|xQZ`lX&b8x36g^`)3#na6|'%9ʞ3yut.U4)GttUh9% y"33 sX"Ėxў"amfdjJ18X); KMV]_:`L2Lx=T>xHu2 #抭ga-`MhU`%.'IרX꽭MyJ69O&)伞lRj$pjJ`O "z9/wF 1›0>Pve4K_@nMWׯ}}+@(a&0,̫M=do򃗱`H+GJ@)$NؒhJZd$(-x1ZD9XTsE-s ]KF@3DhF),ﷸėޚ 1=#9L`47Y|t` Pu0EBp`ԵBOZC6]ûLw~x73%[h{ACi[E#]rk ,-(v$ `$9 >` #['1 M4Ydj"T8|k׉H1ے)zUdT 8F5vdqN `X(1# KQ0P 3`ZfGrP_+E0Pd;;VꡔȾ}4t9 \2^[z {6O,)SavJ&G | Q =6jtOwVKj+m$f 7(asXK2(geCrQ`KAKGz+O$TO& ,$M A.Kd!KpưxT&rj`niMSA;~%ȮV,ܒ&d9ߪǽhĢ?XN"ADr͘|m[q1wPJZ!<oA@T/aZup6ux_WɤI62ŇhrThߍf'(f֪9zG1v2)U'ƤթK7M7/~>ȎuCuZR*B5o× ~KK$o @Lߥ=gLQ5q:T4iWOӖGvE!JHZWqT!e$k# ;X( lzg!񛐫%<9WhTh]ږ2PJ&4.5s8D)Hg=0nOTjrjrU.x[X>x_Bn9 oyICOG~~Q`nѕY=]Er|Β3Pޑwoo%No@M}] %j_#ӛlں˾#BsPskL34v6x'RM/V0Äcj筥08 "ְ"Zt y 3^#@@:z]A0* ]t.z]vRIbB;q꽎aBݻ8 Pv$ybJ*G<+%DI(^NAh{s \gAB U8Y[s!ñz@:ȇ"6T$a8!xs2@g^YM5:rtPPxTBGcjr@i&0&[ݖKG^Ӱn-tp0֔TUlB,4j `K ߚT,`nՠ0ygjm(^\)7f&06 !WDT0QPH.Z *WskcPWF(⭭mo2TLAKְ8`|]RA^lo/$P0WN蒬l#}r{..>Q6{$X+F.9b)0)te,306!o,[\hKj-%΃~f8P 578<aԊsi7>'BI}!XI5blc>!0DX2(r> Ud@nxBcξ 'pVЮM`.=4dԅ~m@/@%8u)A%#$.T(6DU?@h@Pf†@8h4 ۲Ä!x6B!r;QHёdc`e$sk<7̘-CUr`h@_6=*HVEFjp?N8'B2J 5 Έs@Y1(ku0"CGegٺtBUebm4U}˛o52Xi)V>I,1R>1LBLE Z*{$$Y[9"lo:N A^Χ9STdr:C\&&-vx~ PVU'pCݩv @SDFmcj\w9 £xo)fXxCS#̎C-AprNX\JUsUWE AO팇X5&)SP|YJ\Xʙ {k,儨h;e6bkDFLq-U0mon2Q×d|ҟ^xo27p?Oyp|Ib)4Nb'#$!ydj_[Ń5_mfXVi>ߐjK &k.Y 8]1__⢋XdU'n::U00Xt^~-d>NtqX+/~qG!wԛo/ ;rS‚ l÷Š[=ٛ>ӡ!Wx$-އbTq*BQ^(޲ I᪲$n6_^~^,Zߤkn5!0qyU+T oRT(żTP WSM uTRnWpлZ~rCbޢh#<պg#lu8Qe"J5iT7?޾znfծLEZA;v6$A%3_+ AA$ qwM%ΈB\I3畷Ts"3[W=}f:{jCAI0 qO.0 ,wOxJX{Îx0b6' ֕6ȅχ閗Ry`wL 3Iس[K 3 s2R{RX.6M`W@#x_E5. t(ũ'`!B\UU<#x'0ّJhYQVZAR*JK\A[5 LnjD6BCUa qa6ɥu{Z'?pXJ %Jc%UN3> v_m7SLNjaNKKYZIEsmzܦ ̞-/.#2oE_e|eזfm9Z7O}/21w8Mo3骇fF/^}Šs}qJQ.iht ]si@_0Ș#W[H3\4f1߮Μw"³ =b6l58u0~PmnrbT1ISgcǰ 7ýjтF>ѳ/<R[Z}5 Ф)%u$^.4>Hb%ԦTΐH,"p#6h@C)9%yқ98]'m$yʿyj,:J ( {ֹqnW{Qr\'M+B%I:ܝ3\ A"@؞;K23~{*A@e싱u?OVb.$-B03ET->vIaۓs!Mb66KpQC#_|sRz-NzlGmܰaYhH >ZΦf5CNɁn!ϖi޳Ϭ}kڽ_9&<8#nv P}_<7b7,&u󩹄cV&9à8PLr>)>XB5mO]I'vJf$ؿmQU2g~{L+D|U@B'3k+QXev.6Id]'rf`3w'GTV]M݉%vb8k[Mi,VX c7̿wW+`ZpO:lXZjK,jR(a) Ey&ĦҲnwPȎ~tOn-u$j(5,9Q8E_W <_qRwF_}Ϗ&dBi&@Ŝ)bQN')ɽcHq y"K6 @M%뗒XJBÞBO !ѷ9e(mԊ>' sH E~oƭ!2!;}z#6FhreXm}BR'.ja(05N>o2/ߗ8F ԣ.7-ψ!"-eObhq <^\Gc{-O^X*KFu½(ro_j&BY~* wk^M=ݙ\eZ=pumcus7a#:U3..˦UNf?2 no! ?;_͂sm_Kǚp*HKsoZQ--P5u犹3]n-%ʯd!OD;T+tm\v> O&fm޸@3gp[dcByL yjyMc^Zx+QqzKgipY^w3\$gv%O>oh\R{|k*aL#\}U\:(QhFJgLK)ҧd7ac8W,kY 45xۇ0zqn|d2=w7YY1 xJ9ȃts@s)࿄yHatؾ^Zx9sҽKf hsӞ|Micx19'RL5u==|4*J*Ap5v>AöAD4[$Zk[_-C.m #%Y` 4 S;ܔt{حdQ2%؍Pzv(3P<2UE刋,J#?^YL;M&!K[z8(tmG韷ӱ %gfb8s0 }8pkp7U߳ߔ>(-nSFA.# 7|0L<by{Ҥ]SN=SG#|>6d3TŶdcZ i ioBW:b[Iʕ1GE(wXƼkDmY":]'Zixc"2Ikg3.nu`!bc'y7~g_XUqe`u%V{Kd`,{U5Pu$^BUWIU#+FЅuW#p&~HKe:.6}HP;ղ*t^ɦuĈ"dS-j#v; !afCSqt_=b`jgs<ۛ7ṗ~*n`JjYjMdPyd#Y ɪ7-j'qZ8F$CTH(ĮYiX =_u-xdw NVv AQgKSAԖ- Vj+8G!tt;BH-gasՒ e5=Xyj#]ʰ_/UafNhOT;W abM>h5rUB*bL[!]#|4\3qbFRԺ7_^ N 剨92$A^3Vjg F+`{oW;|'P\wƒv,K 0sf!TA,lCUqGd *mssnxEsts;稢ھnqCfnl|rkqT8W]z}{o 7vYpur)$rT3pҔ· 2PB|C1&嶩c`\cxvOP(AֳX끦U&#'@oם12#8Nη| Ɂۨo yJHVqK)G=@5X@Չ mՀX) *ѹHq:"p'$t'jFE-ڐmܮ'Zx n<ɄIb9%^~cz!B#fZZߪ-,]*wXNh]ݾ{5oO.$:;k(ޔQg3w>*uck< ZI,T&TRnXT.@} Vq%/wFMD5ڱE3tþ٬̛c'ڵKW#S9Fqa5;#7cQ11?h<84p1=] |e HWK;E n [X {.t 7\_GWEvdk}HZ hq-nZ>GRU1fNrƀlBX+Ge\CMj/I✤Is$9*c)-U?&TEA],M*%0 R&w2F1%e3VeG)`P<7՗.-J+>aUMOnBuxRp'D:bYbBhM 3NhG:3Ȳ`lO$`P* kh}`m̐mw"!CC>z:e(?߫x%{c|,L)}3#uUK lZ..쁘2c !!D__O_*@iEQ#yӌmbu Bԭ1'yuo.8u Ä.8tӢu5#*ο~jFqymwe*9IJaڕh&)NS5Y7xˉFdh;+KT6.%CAOuC1BJ\յ;B3Hqy ؎_%NŌSj.dg^p\*N6s /Ngݧ_ktҊc|Ӫ(w(#fWsC S{*wa:\|R0 ~Q'&đj1NI+364iC생뮲M tG]\~rFqyI$KbYFND%@PT'%CQ<(3bx OK$$X-<51 USl0CX;Ztr ܖ{aV@Wn61֝e(r-] 197;ZwݧaGzh#'_ )lEƂTjA5ooVM+' }1GGBCrV xca*oo~\'1?fMyv؛O=JS0B/gx0 57AFFK^w$-Uw{T/|4z:$BS'c,|ژw[D=nȹ ?<vu J \jjoϙ{2l?4φ)KƃsO-蛻oQGTk$l8|fWpE? vO6`!0hlSp*3s;8JՂ1S\lrF Z! S]yEߚ%PEs-&$&'zl_hǘLu9nGs:isǕp 34sD~=J%e<(*4,`2_4]ɉ-4n9P*Q&Q;xr8u{ĻI6We1f---[dEg `kKTUg1|N[Fg fGQ. ܌~KQXWqK1sF1==wF"sg4XgtFFKh* /Bt >vtRlaꌙ7cyJ593MJ->?:O&4#l:W/UIѹg)l6|p;; }Tk3p1Cř*~5|+x0`Dsq5Ԕ7Nᤆ㊬9RlkHRYNwhS(g(͒?n=9E%LRqI FK )o9iY$ )Xr%rtˆq4wJ(%8'T*wmmzY]?󰀃89y ڣ[Dʎ"T)iH͞!uXn]]% 0Ol:a{mhN)wR:KPD %^z*m^0IDVKoBt!ğ2P`8en>UXjuLkuZ!;|v%]/H>?6?+=gϝ  gh9̔sā'dO$h ^aOL0́< \8G÷f8ZW+nd;{uǦ&ۡBZiDR`c^00ʵQ1f plY|(=vnI|3 jt G1v,u9{)kFV!Q&z\"*T%m_5}VP TM*UP`.T1 <@6"'/rEr=_փM$td Ɩ6!Y c/r n% E]y/9"Wڗ\|3Hre4RyaHR{F4 F( Q– ɑ,rnO"f)O@{X+‘4rAx%U/iVhJ(P) xdq%ADMEs={0zeN{,)H#FbD Dy@GAT[CF&b7eilK0e0tQ*`T;<#Pfw10`~Dk!ZA 3ņ#Ԉ` loN c"sIfAI/m3W|? Js9܌ ~6s69 qΑ=sMNP 6K8)(fO$MzgY4` %KY[ qI![)qC&LMrs:e NL(NEa+&{de?й!gJbmD׾oۈt"&MW{]u5zrb/cʭSOP$|E|!Ht<[\OeB{3NrI6:i ՃYԼ!ل{J^VZe >aE %}le5zrb/czI1xoCd49 Q|p!>쌧TC5] /~jQufŻ+\9ftbBA7wW9E:xnPVcaaPGNtؗsYyƃ3Gw\%:A5j\*c_g9/bde c@B8[1FONe 1S/1r9 xpyR];8+%Z /bR`b(|Sz^k}P7ՕՕ7'tNWz@S6Ň|u{qʼMtyYvC{q\>e>·<'ſsh@7rq2 4Lfk6k ( MḗәyW,$<9 E& y& bIwwoM¬Mƒ>ǨA޾EǸI7H4="$CWF?"$R6b:zR}C;Qtd3ѓ&8EɦjB2(F3:ϤTRPzDp& N-9Fϳ`],8~>no܊%54ӌo-1|7dXr3!&2:Ͼb*JKAj|ylUk= Wbv& }ap[i!Q#uUSŲK1ź[߱&Sʀ24Z9Q7BFձQp])4a7JR9w QX_/ ~c(~o!Tu?pS`m?i+-$⃾-Kd.Ds a @G'We?rֆjv`+7w^9嬅bO!8wK :NnR{;%P//թxiuZ@b@JKF];%{.w#",f x$ru'ZnSUf?ŻٍMBU# zz/sQ`R4n:P^>a\M|W N7%o< 8W^4DE=B‡\\bn_DDh*I3wk~ۢ; S$y=zL30%b|64k/c|%J İ }ΕN4 sk*ez78ݚ}&g+#]kt['6L L G%~\e ?X):A'kD=m:tCkw"nVcP8R*(%?-\W"U[YB~+ObIh` z?u@ دh".; ]\;;Ye4(*wm-ᝈ\@KÝCry GC<1"ZscbQBP8Yx Z]Q1p:2gI1Ӕ= EAfLV}{} 'X}lfOP~jbY{ډ`TJ8٥V''f2* ׁJqV֫$ PxR] >' aNOYxRƹ+O0{%OsA 1=1%@-Ǫ$YX?F]TcZC1fŕgVw#&p\ i*Re.7Y I\tXN.LD쪬V(жU?AK+dcJz/]60~F+0wH?'%T\؇SL(g,\FC*K?i2}XCkps7'H~c`$o~˺R(%?/F1<ԋ_<uOc4ςZXG,?yK 1"8{;8wʲ{'qt]_ XDW :|9+4o h, ګ%2ƕ USϘ |ĀR&79/1vgi?E`pcpX[!¡yGGoQwq4Qm? 1n=LS6Đ K~Z7cv<*̱NbF9b!7XbJ)BLؗ J@+,l %\hNJ%QK7H-\`"1SaiІyDR'Uv~!*֖P@Ԡ aϓC gAJ\{3V=Y@b,Ğldž. fؚU/%."]gM(/hi>D))c(pi\#S9 ( <[e XAѩh."m9 --Гf8*mwFb}vT6鬰pE 0l]&"t{cqx[I._|=aՊĊ|H,3`Ue/"Lp(f`˚G&G|1< d*}cEɊ|jԱ*XY%)ڎy]1*O&s>c%[=R'Kviw딈L&0:!Ԉ$'ʢH <ÙO] [L "ov$|pLAhQy3?qP`MC22D H%)Q؞.5L@# z`1XB0NpHOEU'BݕlC$s;G+s[L00. LTMӑ]eUvd")L-  1R1MՖ:CG[αy91ˆ@píGaF |m:-.жkҭƏYKV#!h4m64n!ޖ\I *A֢L\bkD*@Ȳ NŌ⨛RYpJaks-˚-hdg }/]+;s:vQCq"njUI3H&UJŅB^gV_\ GrE 4}t";o5YT J7J *a 옳('4)rfSv(.ŃB:{s *.'$tΊ!v" ig;!X{ $A.&-tSUU0(8Jzmvb"[M 2rZӳ± k7cE\ ~ 2a -]| RqyM @$j acaO, J)sLh<R?(MsTLN"4B$ˀ!BbQlI3[f6ck?wV|Fݵ5:m-h+h䴣zk  {E,#i HjxQA*5ȮtՋ,j,/fAWA1u)ܰx]7,'T9ǽxiƷ ϷP[OJDe^6b2*p{un .5β1 ]hJ՗꼎&TRIBWA:<""Bh " MJz?stP gMORiG a0Enrw-,$@GFhLy.[NK]GDj5[J)G^{}(H IU !.\N+F8O`2JjxYcJ!#Uy'ȺᓬdxF6D$/q5 A.X^`o'^HC;@q~G7({g˩]H汌DǬ"f/V3W }X]3iZH5_ (Kŋ2Q/#|]q4C?6Fg.sg /xTt%U]Q]ъq74c &'_%YkXplmCpufD";lD]N}7n<t{7S "^G –fW9#9e'bQڣn?J6@VZ'[Sɰ̉iF+k~jWt!ndZۿݟa-ovZGZse-ǣWO]JftlSZvȸ OY Q&I&Y95%Qf~]TzK 39Ms(kPNhFY.3ne@0&vclܫ7k"͝>}CGRu}*Š-kiVe#5N,ܪzlbHw>Y!77jӤku?>t1OF/)x_'7ލ>&ߏ>3L!fKCc'fO tǑ}2v4: N@v-dv^ \1#0J2ı;]Q=}x4!=o\k !9֩@!$Et XaR "(%Z?[Sr`vuVsR.b}nr5FO\7U#]H=&v>/~Z+|4nqu\ W XI,hoð`Nv^ۧ[Kҟ˹؎s,Z oS,E,5 e@IJ@wL~ǂrIǭ+0(K7#{8|1IlnrI秲*׮ʵ+rʪ\˪,`OwҔ"#N4p.8d9fR Bs³ ц~qjv:+vl3ح@W{$Z̋1V#FY\|$%o8V jm?i NI bЏ@$P]m'ԆјJ.t)24 0dn$3k68kJ>]Oo`ɭm8ݣh+O6t]\{8'۱-dPv.O\ߚbN[zV^OJl68qT*Y,pJQm A!9 JVfF+0J^`< @JٹܚR1[io- JcIFȭJt(!@ZZƋ+PYRL{=1EqdDnD#O&r:G|ސ3%jݗu1꾾5D%v38֗>_rRʁS17bVbz {QA#s4{㪹IѴ c/1sB-[>Mn%?ˋc݇=~C0<$z6*.GӹOxd&ı'HHYwP,<)o/[,*벨 c` H+ @b΁i \0ȻKhmpGwH(0 7衖4짖GҒ<ݧ@1V@)ʳwӝ#\M:>Y^ˋ:][I1J:cJ#GB{ZIpm§\c\nzSi0(9_]\>x^.t^9ї˱[L1aڥZzZkb<\Bc"&! l1-]S?HrkR}Mש0Za)g8+'iBR V mɴBjsx LIV8sJ j;tgn8< @ z?x)Kp^#lNž݆u^0 eu]7Ep`}W["/5Ϩ mY7}2NEzmCbL]SS JO5yĒL+KKңN/3 t1vH=מs/ Wg:(xR޳]<ȶß TY׫!Cg7({IR HÀҦa>^>ռ/ҼM.ҼM4oɩ& Q(hˈPnhFe&hHL0N kN5HE^ UDwb:v-O5I< Apu.@G%J^f P"HyA)K֪(yB<{_=*fHUTF>U RoU9;cY:eD,z~=[$Ǖugg }'\&7x1>^>a'47eo8~|s2Ig7 pw~zf3RgTKe̗'_iddrnongiMe ӠTcr+ V'w六{sIG)VIsߖ5/$I tz֬2 Dct3-8Kh`Z1}ZgfYLΆSrZ0%A;ֺ2v.txȼi`4Gw"p9T݀N4Pft4q(W_ &tB~&( jѮqlbDHhV|97T0Ʉ1RTQ">[u5j?TUx+}<e{CYN%4U#\Q6B4^,hˊwqd?>& EHmbZ1#Vz]W"zQF=?Kd"~=27V|". *Z G~]5.-mf&|3~fm8k3!+`]J+T9dpM (AW\rkT|C+! @`S׻7*0l $NPncIAmזcYDaNM7wt\ck2oc>onؕ45J!&VŚyϻjf7 ~(;ErEedA+> Ԅѓhy(aB!Fq-w50'#32g #9|2i͵*x۸|ק|#N+#p=qd[vg=g =ߚڳ0T }3N 1ԝcg0a# vىfϪڎh*csTF3Rm:4Ӎ&R9aF%1DtcWRi;s&nCjCE6W}w<EޫO+Ecil ^K"A[!ծ.\o}UOim| }WPZLt_}ׯܫEeWWw.n^u_+OYnzἓ 2EuF05KKe\Rx2[jLKggd,\C?vԎZoq4E mς~*F- h@H޸HR ְ@xiաIu?WL< i! a)c>cȩBQ1ݸw:7FO Sv{_wRLh_2}7w}7wӮ][Bz]zS]NeKj@h]L΂ވhE(Q*E?&1vR }5B 3?n=?xU}QuZIesq$0ƍ( x^NQ.J$|v2פirGjk8h 6:8g簂#e%lb0It"H:7Vn:1NQγ_x >Ø2Qgiq ˈ[u Pr$" U>q?`̳hCGChHh'$5:50\)kԂq:^/f;i2j4"4.@ENȭG_Dn?K"c0!y~\g*idG=tF!h"ZPѠ2 3K@Tdde#H 7 $Dʩ.Y",\[.q;8U'١g-) |NjyNCqw$ A"*#fVTp5㱆1ҲF\O@-ҊqJyDvhPn#FZ0d, c"ݏ^KT6CFBF$Kv0j&g8jq9"6<TT6@$]dnDBzh*F9 NoQpQX)P&EEerLT&ctY[2:6qw1:J|@VMIW+W d>(|C'ր i0qE Dp\RNP rv?BWn4UKlqsux ׏~4]IFPf',M+4:YVb$nE.阢^ !J"C?m~h,z⦩f l r{bOS ]u'sv6 k^J)J&qy<1oWg `:,<.IV? SIgwqrvss7뱸xp H?c'Wˏ|3_NLޟ{s|uϽN^>hD=|3GrOH\݅u1'fM Qb1~B_eϸ˜iV׀]MPlE>vh9vh(Zsu^M,FMBfd"u)q b;EqOU4 wBjh)za%|Un^o|ӻ4e?fK\^p\|ZuN{aˌC8JsDB@ѢpA @MENLHg'mn/P5,Ę&\/iBדza!ƌzQinxta>D@D g#h03Rg8,Ҹۋ`ht,(0' ew}b}1}<{"VNy8F/ߗL}/of'7Su/BEGϛ)'"JAy|6ztbF_Ւ̄yc |-`0pn {K F!ȠU-NEVtXlǩ6{ `ṋ7-kHkZdհLh>ӭ $lƽTT6 rN<`fx/g':;ea?S4xl 0s)vNG'iv(U޻'6m wDJEX>w%1PW3Cv:p1Rf{yUs)O?` u ߸7Jn}Q4R@({hٺGG7$ +>M7|c!cRVź0禀rF^Q(2n&bD{wUV|GNu$RQtw ѣkTr>}>6$kr)W%Iқ '[HC̈́kfw6I{B8rQ4xp?H7S\ysu?*WgJ֪;}hzcF tބscl^I: Eڕ&]$h{Ǻ몪߬ꁴ-'DzHEq!nV/GA [q̐{H;_X n'%1a]>UmJjVEsa+0*zICw;u~gf7cKPdVYv<: ;h(()QpK=+= 2wxx!v%=8zaG(6g0) W|O ab򞨘JT}?fpoc2‚&#'Dxh٬R F9vdj/ŘCIu\}w6j2I$4G2e]#Tr"95giX@n0dKt!Jj=7$ O- 8ڶc`ڮΔҊ[.xvP`]I:}wМPP2}}DƢLFϪ-au( Ya+uXRRKXWڶnխHbKmW7 grbuSgS+nd PiM/uSwKP>ꦭei$T#w7~]Qh!Fۖ#I׭gVgu\ܘL(U;}s ^8 Mg9yǫݭG&)}-UYnP X w/+gQ"]z=篿~X%t2iwH "`D:VmM'>ʾΘD(m*)<-  L&a`4Z ]R֗F&z6 hv ꩖W!` 39+ !x 5)2r\>"F 0NnsQz]mq!fϽxsd(bC^Ӟblp`@4 \K L!e d,]jY#b@LāI~5y32πw_>R/gq &Ao9ީ(yxnO#\|/uB,h-/ŝo_H]!RS>j@WPe.º.pAqCXl#"13ۯOJ8vtr}tV@ Aխ\CUM?ھ'@NQ}0Ӵ@QԦط]FQ rev>TkGnB 4[>N Y !N-x*3#<<crx㚃Rr#{$6d,*gBY:mIG[;)I^%\@3p߭3}j|<Ѻ7\<.5ųS]j^BH+.p Fώ8JHu#|7rhhQTcta[OUhs `n_O5X9T6jjT=\;fEHSmzITViG3m. x/ʪ6Gƴ'On?vzzvav1]E&f==M`'6GȹZG|x"D9vUP"KZszIKq`>;~s5n݀=ꔘ]6YXuby7$>%*\oFfʧF!9ON' yi 'wy! l9ɐcKJ@?1%tI)9fQ ; v&[t̹xʡ4匇d(&epK?0qr9-FUbeeЁsƨmy$XN;Gy)'ETuDAȄ/scb[*~_O[U˼:*ŷZ`1g}pB<^*Dt&Oޔaw{ƦF]&ЬTs79%$tHw;p2 }C1m MFԍ>61Z,귕zoZ$G{ի{Qdq4mq a)J8wK%U(>Yj)y xړ3_Ɣ+OV  ԗ|U"~'}++& %9Ao;7M}?n^TڵOV\o9ec]vsl[#J?ܺ^g|آlٹڢ޼𦕠˛7Xa[lˬ?=6zI/1@Bx,!O:"i"Fˑ=_V| y16j汿+Q+-7'8O\}%o{´v($ke<*< S?c ̔QFTj&rp&ṡ5iKnT5/ F$]ƔSk⠪ksK$59uDcJe~#Re)[qj6Q6fQLX^o*sy |F^e%ώr?)nIB%J4[q.WT4>/fKO4g4tKz9Dzqܸu XST `47k9bz̰4\JHG_'X+-ZJ-- ؔ_Qʄ,+k OB icӌZr"U@ǻt|z;ft:}U_ބ 1d Ŕ$Uz J6]y#%\+^R.t{rF>}ЯWǸ>Po cAF3l.%?pBҘ[9vB֓0&Rl׾˜59<Ǫ!Zw.Slş0>|ќՋe WsXFr<1?5xRaHeJ J%dI#cLYO ^,ƺ,R*ܺy?^NqL:n i}O߽KW!$e7L (NZai S0qJTӗ ӈDĂP!J҉!ip,cBE$yiF =Տ7y@ J'"%ƫEcMz:*>nӷv~۷[3 O/ >o:9)+Y+M N!8}qx~hZb-!ȹvElJޱ|3l}fV^̀9"l:J`صwTh:  ]:x&=~EscDY"u6}7*OO۝ z玈fD Gqykp}t"5|"R*4DHqN) T%J J+[br}gS L!4TI?ÜwefD *,MuvA"*cĎug9bh=XH*%p~v3b_=w96yn֜4)ȑuF T XEHUcYd47J ǽ*D ̤_"q"eqZ}![r쥖I3 )t 3jO>`vkv%Op?wml#`shӈ|qN0uٳ\P?k@%Sr?xx^L)AE~Pw_q>svJ7yW% syxjjM*ܺd|ṝw/2/}l$2p %:#vY1org%zgGV%YlĤ|2 . [H I0`1e k_ӄGq =y)NñDoܥH^1اU$*E"dNfF @t5!BlC<}~-QOkƥ //(=RDs~uT[@$ d+ڑ9__O }\f-;XOON"x҉"׊]lwvVXy^cyG OZaFsup!WPSDzP1zϤ ڇtC97uF}SBv=i53 3 t}e/d6{;zvfUI3&9ӫRO|u.V2 Sbdx 4 *DU$a!l᫅X'H74qG;CK:P\iװAm%bQsQrٕszTz.)-= ؑtO$]ȓ_PRKH*^DbT`h,K%:1@JXc\Rp~orrI ӃrzI{ؑy_Nڹu C+8#B#逍M)~`-c߰ sΦE3#jkuQx[|}|䮝BH.v0pU0]\VL9(z:1c4MM Gy!YW덭ώc DcaǢO<ب/k{e U y^Fyiԋ,OPyvI @g1^x:WOb`9fh)"d`ԭtF;@mp yS,|=4@1Fmy{頮9չ` [jXJ{9&_&,""&㊷ØO,d[b bY`Ibw`7Yϰ3m _CQs8Z7>qϟs=. A (Z[܋pj ¼tV+@EV]iV0M+u~R{n4+|5 .QII7 -^tg܀$|2,Yc4{F96-9_dAKXÊ'{K!hSDzOKP*ilgΎNeX[xwFD',a:ƪJ|sŶc;#^ga^XR2@ױqo>X5kX"{;/\Ј5zkxÙG{ s?MyFnYump~(ϴ1qv~E⮑ҵܣ?*CcI&xpAF<qZf=%9;:v)q"q?fNL> F"i N& H U>]a8IQ5}i*C#($%"d"pyz;yXY̲Itt'u_vu0&`{ށLm%k~due /2[]MfI헟è`Ԗ1,H8#!V gdk_U2_|uBey/H?` C$&}ݒ~1"SF)08leXd5ZG1JxH C&SVEXݗƚ]X0IZ(0+5>]ftxddoѿk_CƓRiU*&2$LuZ:ň@ީx FswtrA^yң;>viN XֵXJԈ ^N`o sJ #tt 4Gwӻʄշy{[b"S`]D{: BUJ>ڵo~3K?.Ao;Xޚxsk=mGv݇{39^QFƛɇ,{TO6Oi_8ONkp G̉!:kI-cW?p$6(=*lFu(&Mh9ԇ i$ͧG9TG)R`1Qxzu͋JV|6"~qa}IoD$J?{Tqnno7>L\VybM$ݔqpƁ& aF"CAB O/o|+{Żi~gۻ΃koͯpg+ V} 1ZI d>^O/'լ&r9[!+ ѪX1fz<( iV%},:l/WډS.4>y.D8]&S-;I絍@6oFCL"['V`\P/NdBs;gvqֲw3ɘP]Yoc_u󾒏S?;#۾ފ z ;|#8QÊVi/"֗D[S2H4@?^);ŪkXJH޵|cHm`=oM\a0&1I2:) t0'DqÌPQJh,")i"V;>zu'3Ĺ{x 6%8$I~Ҧ;T湽NwNaIޕ5q$鿂;nE{3r ꔰ"jA@C"qtWUgfe~U* qRC\qxFmTڂ+w"@r0ֳ-΋&ey!\+\S% 9hq`Z`nyo { FmWRK D^W b2jbʆ' K[N&tu41&e#H4'*c+2hʜIp&4UNۣ}':_Eٿᚺ7"0F=.SoT`, "XZQ~ wPOJ7B=O3`i?XRSS٦.;Ε^3@Pέo+X*խx;kD…% s^`Kvruq@+C:A7 LSJV輽>[ZZ\n;\Jn{=jl!E}wwKs8>OJoj7:68IL#ը,FZ+uHmk&Prk {Hq-7!Lx̯wk ΏϏsXxyqp;o]4cXSMX8K#[AqNd!ưµq#x viq{efS(U"sμP̂%pSЈT`\9 ,L\մFi akʇ=!YH>KTG:Љ</{oU v<'#rAKɵbDĨ3R+SPR|4|4|FBkd(53 o1;CA*PqQ7{{X*n Wɡ޴k\L.l7n} Nt)>z]I^kD-1P1B"1 ) {Ey *FjAo%FJ(A`l,"ȑeL^rũO:M@g\s@EX02,96>@ 1J 8Ir&i|QcY6.  sgQ(౔1eUʢ%a@%DYQ2‡Es,M`Z6e'h8uB2cL*)ZyAr$*܎嘌3bQ ɶ͙O1X(Ha%+t"_.'R`4N:L$+Bc d:-pofM\ 2[!PXUq $N {,t[F #ZEwAI ݷoR5V}i֋zT;zLs`&a4Yq^8n<.+a%>hQo.F0~H'Һ,Һ,۩֭` Fv`%wsSHtB+BvVR-`mhQ#haayH1-7r,7rj|%6cWdTiZXQe'/+M;j$M;o ʱMw\qw\qZ*\xЫDsǢ4 o 'SqmhQ#h8 2j6~)ɣq9{,TgmR*._QmŁP"}EȰXՇSqu*$Y[*3Pq$B\=Յ\ZZZIz'}|{.[-l<-U [YR7(Yǵ-gI\OOk1’via)f11ka䜪gXw^f5 2O)qvJm^hm>O} H͎r}.;Ly kzdMٳF rz덇Ri<6Lvkݔo7|zs.+n; :· |IP;vSazó>/iߟE} kB,r\ *0riZz1 Dݼ}ϗ߾<`1߽\yūח:^7%mvݻ7._NlloSYzש}guo_{ŝua>ܥq=RYsحܚszEE]N$b1K7)Λ>Vߺ>{\uw`Wm VިdӋ/__e[0c:ew>;/w h!.<|w*FKݵ||tsG!أ <ԫu8yA6ٔn _n& _#F+?+a#yKwFȆ6xZ۳n?rJδAcg?yuwx5Vb %6e͚"Y|V`kJȅBZm%fv-35C|l盳^6,'j:!f*2SUwUe^G<br2V@xE!:"VdD5=>M1`8g(L2Q&\`xkpcISl+RMZ ḢB@ /.,qVQy1"xNI}ɵ\1od  N43[ M$̊]HrV<Yh )" ꐋBis` =9lQnE6 LU:9њrb6#t '}--sO`] >N©֩E>ն@.d3 n?;= 9'Mky住Meu{U֑`&:£}U=ocjYx=G U@ J\Y~0E̓\ȐL՚)(zɉ.3&XHTu$ `t20Pw.1&$)u@>k {ϧn?PNT?/[Xm=%{g'wwZe_ ]:` v;~JT'rB%= է|cB) r u Nזà0/l gT--gj6_ayQ*e{R{{qIíK ,&ɐ*z@_@]*Lt4lMWIbW>a),pP fڢ`h/ܦ1\RYFfSJȕ`e( ͽp_s #! ӻ WQ4X1 hRL_~u5L =Lo7SHf"D;7Dshoi' VR}c U7VDl Ψȭon}D(!W8AK|:\DVW95|BDbT{=di!1RjTEʤ4X`t59mM++ \MY`_}p+VuH"`">c^#F#*/i=H8|p)qD9)h%#Eq-xi)b5z(Da?11_s)-=L:d튶<9CAٴUN K%V$Rݱ25x?At,z$Py_<Aq uYHYy:${g_#;0{¯r,<&[M.۞~HԽ}5C;}m*>LyTaq: ay׃' QVպqFhGIv/&j b"/uK_u9Ayп~ B696 Zۉ+N~ۅP}A1!"tkK= %ASݵ33xυO!R/\(CNV2Zirq2z&Chz[#c,[!/oV- 6aԁU B@)5j p뿞 65wzښ+jw3>/AXq+) -5 Q%le/ZīQh~At/<{_6U>x ?Xx D=cy:纐H'`*Jv 糯Mit"Wj9>z+;/]2>%x[e`/W-p1ȡt&pf)?z"8ZȳKDʤH4 [G^X8Wq0Sk#PP uTXdRxڢjft!rKqpZSjl tACj CH_T+u y* .IbAD&3D{@O,lиh^o6ʫ㇭'M(J#f@.ũD*cǩvIT2Rd<_rJhh5-LtڄkR@EÄBQsvp)¬ ]?XR*jsE {׃Z4wCvMy)ݬduܔ-ƲC|)*`,`d5#@ܳzSY|zcq`/> 9\ DVMߚo[swŷO0(2anlqS)3X'1äD5mIɑc n2徃]>\\{ 1'pl孁,oݻ[U ^JD#CoD!XǛ`D<|@y(?+PǛP)%4&ѱ:fֳKӘT8 8*pji[݆jqRȻZ`;\@R\aխ * Ϧ p`Ŵ܆\P͔kg:ZLqLQx$g$' 4.oP&S +b$VĂb$R$5i,h{2lpaS{EHt+K]j@ie1x;~:\FMeY:2.;\63{7;7斨ypƽd|oV/Ad8oēAr=ow7zʣ 6,%,^R:YRx,u w)TI҄YbV |0< pJ3s ֘5&M^n"C.=y˃|fu~u>͂t{~-ƹ\8/|;W+Cހ&hq|{EѢTO\O|}zOA` MS!f{Fs>< ppl %l˻{T>W`clK.Pp%d$Q@f,t3-gjKIxG|K%Eu BU)Vge[U#3vǬTBܥ 蘵 @ØaqLSF#eDJMI)T_TBST{p^ i潟#һzfVdfa=F'G*&6~ Iekpa'.qY$ :+/- ©,"ET5k1v;*0D[(},)iHb"f ,4 N`j7T%ZkI1AƜR)($%&&N#nb$1j y1cH7}:Fg2eS/r4toa]lXn4u$F 5J &O[jy%i,JR8a=r`; ǘ3fL$rGIlHBiL3A [VM7r!T/\B|qLU_ 0RW <V^Xwuh/3/T?V=-͊,CPߊWA^g:gkukӪg]ӱ aLo0iF_R/A@%n%E2A8Sȭ .]/ǒ U8ܰj>g̣co~Iϯ(<_^Mfu̺mF_O=>/Uo0 G & QdýA{(Յ77\0xoj5^]?M'`x=ب}hJf4I [A'70{g{y/5d^!N7yo7k{x^fƽn0:h_Lko65"NqRLuN8܂SLj`T3˴eJbTKxB€DZv;*윒uEm>"bR\^|B #WrUc:c*y ~Xyܶ]3rӧ)`p PP1"2F"0b0/$dOm8IS_"˭pi3o lzЋ;! c74]tmݻA8+T]dk^f颐 ).(Z Set?3}x K8iovPq*2 b >jB\^ƈ4X`T˘4:)o<1is8+=al VqxDRwW]p#I.FC*1 G4օ BkuNE풹U3p IE>T]*45[Jqubl;";H۸m13)D:(ʟTCA`݇k+ԦtǬUp(jnY{[@]V1iTEg; B*?JTO VhADB%ǶdEo޾=@!د#r a6F~M8zS8.p$hT.iBMLkcE_^16 EG; ͪ|Wn/+/(U8YN>{t`^`x&ty1{B- ֥pբB V- ! h2)g[_ŸƄԨ[ vClq1#}e4&?=W?,:sx'*, KXǎ| ~L u-9Zl 6dM|Ǥtc*;udTUvW6ArOԂH`xncd~auM6 1;7`*Y<8?n~ HJ=+T%AoQ+pxN]wsvL!UƖcuA[al#LjD2b\_RDDe]{qӦiݿ,_fc_8nښ/rU64), Q@e?]V]p\42|R( ~fQ:~C5 j 7 |g̹3.r_V7((qz+o^2Ftƨr.)_*KƗK.\o'm7w߮rG#t#L,s=oTL (~f3,yu7Wgt3ku"\G>_|lAlތ؏,f~h.$MW`B/h.3ռ c̟@˾h.deܓZmmᵻ0=PkUz@{@sThR3E8q!ķ^Y |-TUloۙad@g7?L0aMOVuӣ@-?=:E9a!0w'=L=Dqv j0?=R"6Nr!uk{#FaÓCṧ :]42nd駋4ǪTTX_/BxtqYƫ]9]ܗ"==ll)uWy.y<X>zmS];0?p] ó;td(ꪣD T8MR7-#L#XR1eDI̵sj1xB @ssEf7ˬ:͞-גsHa.ݛz r<IFy%ğ?{ƍ /gpJ.Kݪƥ ErIJ4HJD ‘eֈ>эnt[% 34%^THHi` ͼHE輩DrQ3aÚ9OSd<%9&1 VUYf?Jb?MDQRRSTt! q ߀HYOKA9Q-bW}VbjF9m9ՍQ+1?;I{Iys'ww}?Bx;Z -1qPOBx; 9E%(- j^My Qn[h%7o.-?&e"6" AQoOZ[`L en~FMn u6W GNjY]NZDFAt( XMnsYyR7˧~Qn|sk^{TuWޣTﮖP]vK1kTQT>p˒B>A'傞[Go#7utܰs]ay[9h -dh8y k*d-YE''͟n|!n$逴@:`ƺI$X RE? qvZ9 4Z'Οn܊,F:ve =[7gTʾ [f%F?uSHPerf MOVbq}30T҆O :]i*ߧF(0Q qeëP&cCyhaHpPhv`WUFUFU~|iZ1:-9Eh0~]Q/,W\vI6󲽻ͯHVfJIbXcNks|K 9$KxhtBjsçѰ?zg&!@_|crψZ'Rd% adܒy&#0o*T4rulXEg}%ʮ~B%XIE8w7s7EU~O;f2}`C@UR瓮ga>b ^_'"QqA2 <"wg?*؁(*= WEDꩂU!Tyy?H>/& |<1q;':(' %w^9+"wC*@,.㏝,hn>ewjv3o.>(!0njtmzPbD=ʺNb*8EslO<CLp%=uUe̅$D/DژΣlՖ&9\pmYݶ: UI0f`{Q1Q>Q˕8EFlT4egmP5FRk|NH '{"eCcgK"Z4Z-ۛ.=_v( &j߽`4`ڻe1K\>{06<6͞C&ݘZ={@trLYڀ>fl}//>#+++vܐ$Lܔ>%SPeŨveWxX+ŪNuEmq?܄qi^^ZX bp3*^ _x}o^]OT+^P*NBK]=C7#OoGFwm&D+J9C/({+Zw"bSRm{yyY@uYC$em1( g)k)sCa6K&N:rm %y#cNqr!rݦ Gs?wMXǠ-*qǠd1hu Bb ^ѯެ9)дꤷSS%AY!_\OcX4ZjvD{h1"eԘY:(Ab.kF?A8L8 U|zhgTp|i0"}G2GZR;oA);=AkT=^vH "|Mfq{ivGg_,^v{zgƧ^zxi(r} w<95{j1PYE^rq|'l*N(oS=  cg!Jw썛_<ҙ,^߻nǏȆ/㳎:9|1~tXkb6xx )So˹;o~HM@; GӏN:sWtZf#(V!; RARQj qFIFg~d| ؏?v,6.]? mo{d@*EHR|)1HR I"HKv$$aQH/(D߆ϡl3I;o|dյZ ̢EBb`zڄ9 C2O5M`WyX-dqi4ܯBC[M.3ITBKReO1Kuʔymow~y7t; ?䧋0)F'tۙ֝S6 v 58{XF1_Lo~xG:88ZN,7ˇ¯/-wb\:g,g"ψA cIC3fV#ewDgY*e6[:0NHsU `ZĺQ@b,u Y{7Eo;s, 5F,d;{Qw]rXm[7 x㧟$7~XՆvt%[ =rfPu 7 0h }oR`m,{ޛE7_›[篻y;gfixFs4`to~Ӗ!vo(맧 Cc+_nb<*2J&Zm(p_|gƽ}=|?/gq R(S+uG˔ ZEYG'q])Zv\=>s_4[R]wA_X?:p r!ί'VJR^/?%DY b/gB#`YˎnC@PjVQO`aQJb FBOnAF wa \cZxlݎCd`^OBʛ4_XI( EUXJ1u*SXY2HO$Gi2GHkEjP>X菝n͇ _y24LYљɤ߳I gL'kYkj}2Eᒦь'qofVcpR;y L{L3X$ǽoAm.C,No-9vMk&1I '?$C~8e:I)3EX=;RR3 &kY*bff `D `s09FCnLxy"No 8YCwRf])^f`q )դd XL+6UrmtV1ҷYt.69α ]|R2')mMRP$"h-{2gZ#=%VKͨaASe-ۖb)t}6ۖbahA鼷J+?)`[Q*,ު~iPUjIJ|]ʁ]0U-x ڹ`3ݍ?'(ZX gPV"i*0? ٲN:>!Y*)kC0ҳ"'Ñ1 y=uǍKu|"e5d 4<024Yy΄-`JJsȃ CQ;boc&nH)AIݬ/3.OP \Р)"dgBUAMhٜq#;Q3 MrnmLn+6řxqD* Q,3 #˂!qQlzL,ifiCĔz-U*ɺ8G/qbKfpӁtgIy?ar.,>8^/ժlx '3-Q;T`z5irVz)|-Ðc,-R=Zշ6'p&$>\@ןꥯ:n/AMbG zmJZx,_l>W!n2#4E.VVK^)x弡o)xt8:"K,3'4yPEGSһqz/v!@0SJ.'akViK*A\8yP̸E j{oRۅH-1ئXO}%kIH[LmԢ"A5CbJUP]~ lJjr@j* +BU+u$sI&KL'EڌCZxKU)ĒkM,fdHH*zs4l_i۶jBXD/Henra ECT"G6eSU}DPq$T5'zK%5|IĽ &;Hz.]wסgU+k:쭐4A!HC:]Tq*KMj ز*8 BJ9OC9)B wNgōciQnAx0玆p9bo9?ȌeZ|k˔Y2Aٖi Z^m"ֵ&OD5 \4dsAjnӘd-,:Ik[#=5MX6\8+Jױr LQY(yV`6c(JW4q=)Rd}d0ńB@ߜoӠU Ae|&VM&EI@2P  dY! S̊x#Ô2沣dX^Y^$A3, DpCܛ5˖KmUK,f1b`5gHaU++Q \ Ps)&aC~ZT@ 7}dpR` VVZ%)6 jrVȪ!H:FtPHeɃ x$}Z՝/ˮts&޶#ޖ)bL'c{ϗ#XAs`%t2([:!8=yv긥{gĨE^(zr{rs,'S6r!(x$%dcSLzdeK" OGmcu(E`$B<𜳲%Rh1S(Q_BH-4ߦQ:Q͂t$ER )6ҧLCB@"תyXQҷ7& 'JnzLFvR&O#1[g}u9i,ξ{~L*ol=ڷ'wqGOOQÑ#LMBJg'{wQ#ϦAG_l[sZId%}Ĵn\9:z<akW|FNd1yUo:pP >8Ѿt{ѐQUty@)t.dCPǜQnܩ;|Ip1enA3BH)*PE*L+oyɝvuda7ʦ.X7E$O\gWisvc,պ΄ҠhfH T1,U0Z$7(D(9EZYZS6zLGc:.9K2/"[x4å0q^L*]#Ta'^i^jz4&FռTFj?IB X5Rt$ Y5VO'lQҵZu{~rM~2Mˣ/mթ]fWm[=%H[Žt C8\XWoac=+[)Zl;/V\"uxyB* c(R+=i e&*΁QH&"m[+4Lǟ5qq##0גwSZ KxcP,r#Z0 CY>"zv4v9i(ȿ =h̭9w[:WB͇~ܷeuKCKX{>~%@]D>]^HiܨB$g&ҧ0aʨO +#i]|?t?Dw=Ky=;#38V@}OUiW@̌r6On[?2I6?~BYIs5%i 1mɸH틑3 1q󚗫mU'n$GI.LjOH{Jn}\\cU^|,wA\ ){ Q˚9s%0:ħKi>)Bn[)=H;mR[[O'})B'L Wl_;G%YʦG5<{_jhbjj/vP 9S-3ګQ7=VKL\ʣ,g0bNjm '8bݕsًϽF?c1Ѭ:1r'x`g\`|)D3 iA ANg&YG ymm Vw},Sq ?kAqӍUg5_y׊#7#Iz]]=wƄx -]g>5{,8X=^ve9B*~_̑O&G.;E&)2̐sBWNX R_1t5Vާ6>9U7ơgw gl5~$㕖M"U۫x-B:b2usWՕ+97v?v&pkވZLQmݼP{LAG?У)dPgu1=;=]JN|ߌNXe$'yPfS'wf6"܆,<};%O8O'. 52ّ*VQ]9QxiO ;& ,qM^>Ć u<ڬ7G5<"-.R3Uu=]UG\7qGo k|}pv=? W:,HZc\*'n/{f m1rJ77IbT]^ c5a=W9̽0h--@Kpoz?+t[:(z3}S\0o1=>g7D7R8fv9/SFFVwMSylߕ ,?{Pd})*0u#39ȷS},dߗOe$2|m =goߒld~hh}SvU7LJM0/'p~J*jLvG里s!_J9 j+%FDiWq ˸LjI8F8S5^|ꮵ<v P1~:>[~u'Tt$`Cq,]y"D+ƨh)V^)׷# YMh cRI)+NzJQ XLb2+1KG 0˔~~t+4*#R%6O'.>6!Rge4FkOfTk5Ij:n6/ 68J)͂7^T*4F!:,Sy`f4+lךq>jd!6 q K(Q3GPy&ķ~x r3o1w &"rBJ ~bf5LPKNU)$;+"-epIq-FlhVݺ|0cog).X;ϔ(>K-^W0w0٘?PG2VVwD"M ^H4qAhL;y>jR@`k'ג:oC5#L-ҝum{ۦn宧>"""jw~\:_Ҷm+z)iPt i*Tzh$%$CEN!*9h({BC Tx9m6hPN鄅H @KckAhm` ON:|c EX00.K,btJd"!$@&! r1"Q@G;rׁ{1c*TT=m0\/-us\/f3E9OeNnwwx|C|H)WBJо׿%Ey97` ҥMn5p"@Xo\7/q:\EwU/|@m P cQg-NKNP+1ewkXsq}Z`W@me󴼷)iY75O$`_'2s.eqL[ZZ=_SCӣZ[Z˥=sUvO"Pegn2sJڠp܆.f4 ׫e!ϤܰL"1O33-1nE`B!L ,T<55yvj6)./{댎Q‘ci@ i`Y$q /&C9%OhԄ[XQ#qK6<jCt:.#*I-Vuf k6{[~xWd{zEC љ1)5㨂˗rAe8 $۰ܹô[߀w[4 B|J' 7?c>O>݂ruSX;݆hhpPJه|6}ʁwLluJK$ γP >`fopuyɐݓz&ί"O<|bE{,u,szޛ4KX?ޅ/kux'F0!xGkgN2 M )(]am Eq=(vp|{ k@bhwר<5Zӯwq A2)=WKjn'`T C ]S9)-u$Bhs'TbpP4Q=R# x3^yU#He#y2%v1,4Cc9W];ߏ g5rXY{TTaLQk=f7D`"Nl. l .7=9f"-XӄR ͇b(-G ՌR}ݐLJV=Q_LAD['_n*QFwϕ/˙Lظj 8'>s98mZI@we=n~99hb7ƈ'c(-Ɖ}~ZfZinvS=78^FTw"YUeu}nxDX1 U}徾Yw,mh2!bMD.rzZ9-|Ŕ+]PQ'zzeMw^hA(H[=:M}3!im9;V+rmfMgi9f@9f#?dQb;ho玹ν!2!5Pu4PCo}jdn拫 Af{] {?Š2YخAJ+x/zvױ{.G)S6%JT\p[}3s/}kHF,xw}\ F%}7,(ô7lB%ba~%o?6Қ̧;̿c* s"岩}F=LVC=LL`2w`Y5{FwckrݣkP]IXxǘ!!rr-žC vqD놛2&' zx+m" 6~҂OT'؊`ISl؍(5`>,C堜 +.;HoWyk 6gŧFl̋2m]x]dVÏ8=3kLˆ)Lj(.{jP8$ 0iS$|Lj Q[)xe(P=(Uk W;AGub6\v'x(~RwCmO> Wc7*Hh pm_t?I 39HxbYߒ퇚 B6HߛbfMmqop#v-5  Q`#D Dt#6XU;ՊJzH+{N" xw*N# c?MJ"NwM (f)ťpY\ gͥF08ˤ@8Cj!k+uFQF0jRr,%lv綻aòɄ!f1m Xʶۂs_Yn)2Iq]Rr:rϰSaz<`ǒb5JJy¡NLjnocтk,؁`$ ?3W4Að(&kj9Ι`0cޢmlCʢ`mguD-lyA`$Q :8ՆH,0\CѴ>nm1a/iٟ&uzGye]jh˂怴 bȁF$|RYpXMb ]qp`dL k-4MVZM2}-8~Q: eղv0js=J]c8ơ&?8 5ҷvP`Pe-f;G[eF@g;΍bD\CqEsr L{#o gk%cPPo5k:b`)08eV4wRa l!RS LzMD ru~طر-G9 P$'lْ6aejD3 LgC*ʠMGHj7)h\ Rt4Ow;flz}<~殄lO5h?œ:wr-Ͷb[J 'oIMuIEƷaҼ`r OF_mZp")J)垡^za@`M&DMg2ރ|^WƄ[h9<ʟ e9(8DlgB%b/oCq^u& dYw ӎ2NgKgKV+6W 1c$$LcsUq"~$!|c+1CΙ, {.QªFnHva nH!ws7$cI4ص4xdJ0>Bx0D&<ԅd@OƑJ ;Z)NamCC=ŏdTRyZ^Oe9FSI# z5x*Jʄzd8?)2( IvIÁCQ *`dA8δDzd#D)ba,H_ $5{ֈCv7uwϭ A5?h::9L, DVakEÅ4KPlyTތJ,# 5av5UB;Xʰ_RA4Sp-@앑 xlhaBe+M޾aԝDЁZь&??D?wϠ;T< IT+Q 3(+?Q(Av5KiuZb|AebSKUǦ)dp 0U]|i#SvDjDPj%oSq3EGw։T1ܨܚ˾[Y>Ԛ% Z&%m uhFqҺkM?G`s>\w}J:aK:P9q40[)Qs8>EO-HUFHU@iX QüQ#j@VJhf3L<-Dx Q@iWύ0ru5V_/ c!'uBΨ oTӴ64Ku̱C{bznWβA7=]vujh&g x0bdѮv6^xAC"4v`bA,mB@CaiT+sゐ>], ⣂Ԭ";81siS`ي +p,,Aeͨ<5vs}Ր5+AXbNA~ '3ډ0KhK 1~$Ú6EuPk[{Η1/:æcU&ݝk R`T7;iVc< 5WYS E_?}Hc8c宲_~%=( 3XOEcBDm{cT%0)2sN\"{DJܴrJ _O i! ~T&'kFg~2zJ֤qP`5CsjU\s~c2Ep0ݨVuGɕ+m @;Y Ri :Xg?|+16zjLk77! us+4u0z*S!sih3arVrN 9'QbT Pkz3\kq۹%buu1^/c~q˙mL: d+y۷"r>FŽV Cc'b|g~و 08bJ*#=Q)x9:bky;^={`|;I 6œpObcF>D0Yi=waoy>O?_FDOBV4Ҧz`EZH6 ~>YH>j :P=h$Axcc'ܔXJ-cw 54*ߤ.˟ٛEj,E(f'<+H ҁٖN9Å;&,'; Hf7 ,Nvj ,N6/׆ ?gΞU*XAQA}|Nfj۵7ri&=́s7VafW:^U ͋g7V/9(J/@rg<\F6`+GقnMSrD6 JA?Po_O+gM=joT~6 _zlj}}~ӯS\ՁwlX][sF+,̦fZU~=SSN űH)$e9%AH64CK$prs)ŮƼhV $m-$ s/YɆ鐍#iq5GḚIJdw ‰"yaRt Ua:ܤVJ~- (14D_DxPIjx*.Ƭ^ >y,sq4VhdsIC 9@DДؖHܱn`1"%ܙd ZDhF1XuxTx&: K`E`^w?k,tb`i*Tgc -!G4"雃{s2hQ F5Di]ۨ;"S6I, o3LV18QT`J A%m3"&T }A7AQ2'6v)rdJ }FcjGxEQ 4T^Gdp2Rt Yf0})fHZܠW_u [zed g3s/r`"D6~x5RFmQSӋ)Y:V Pر h<9b[!T$)BR (k[ӞBܧXh$47i1n\-TLxi-_broŜh;|[,cV5n k1(v'8˵Sښ/'j/UO?sܺ / FҳY↔ɩS"(sjsh/j2KBW@4Zhӛf HB)~y:-x7H\ .fOD6\DՉmcL۸ǰB1fDc% PQ* )ə1"&ZQi]pL%l/ZV< y9=OiJHL`?8QTp"deqG؉ CiHelKżo3GIn6<[e=^Ğx\q(GFҶl! o(ּicc6 ]'Ol)r@~&65n%1bʹˆ;C!JBL8>DKnRM$XvÁ#\u>n8Lћ%v= T-jpTRe{Dԝ% ֳMaWQLIѸ/AHRP8k+9 %ZV"^4pրTсb5c+{lkU82V]:})566n8Oˇ?MҲyGIj]O+:%BQضfBnynU'w2/=|*ߧkxF1OK>GyT^懯<DP&o&\z i"!{csWQ^Z1lJV>)&x6^Z+kqGa{0!=jqD]qиvgEw+rD\q޺LTWze*A uh(v%Hkc2_Yd'&V o)Zk _lnZ}6ݿM)z (磿O/xǫ/pc67O6"Ol^=Mv)ݽ  {ja$¸ZdXl|#{)cfpW}^hz/7YU}_|#<~M%FLQU,?ݹ~Q-&5v `TԌ6tגYΩb`"D־wËfH1%)Yǟ>퇷k1]zpMi}pZռ66:|QկL lKߌgX)Q!LIkW'ĥܽ8 {5h (e +_f9#QNRL^KM-4*s $N s MԔ62kMٗbͬ/bgsD/saA=g%DKum 54@] z.[˕I`6ţ^p>Od bo!"DP\AqQX=kEƌ "JC18)ϙeXRބ: gX3\fsg?x>-xx;0:`LQ}Zd -lX ΟwEwcXw0?AE<42!=].Be"tٻ(w+c /`+tVfPNts ,Ue<8˜F/j4|y%k40j{4.BEHI`$L),fN ؉D3<.+h9fg05&LG/j4|,0Ba 1ЄT唜5TuIf4o'g!38c$asq(_1}x98I^nnb&LX,giqF i8k=^Lr;`P T,F|>< *}FK \cWpDr= lrޝ&&rx%0פcՍHbOQ $גWRP޶T\$Etw )~^ܪuWځjgtGi 0w6֣+tn Th_i4T!lN+Yq>x koi50 Ac3ZP:Ϊп$ u[C m{5Uy5%Řb|u=!ѲW\\SN!D0#rMbВ;¤+@_V8Ux g,U[Kͼ2gvY -Iϵt1O<,_]=76N6<:׌"DJ;yz"f(hp.aEo߼h؊`BBu)|+%ƙ˥&+rm\Q}v?qc ̚Vc&%lY؀agKvl3 d4XɃ Ϸbqaj}y`jVAS,4F(Or`z`5e(Vir)O `[EUCܺ׋.ť9md2&J[[N}:,|GI EZZM nPpOZmHg }vKHhкc I2={L P$6B[svG"F*vEK2JMF 5ᠫfPAnfblz@id)'=#_]fc,rLc#{E).x5` Ge28탿,B~XJe۽#1ܬr}V+hGÙf Eq.#0h|Ԅ.uGcX"EQwzU0zѰQ2= =.O+`}5CR;ͳ"SL 0MUpHvӣRMx'Ι&j~x32,H &||ʏb_Ɠ^4yF3z~dz拕ve9ǘa-OM4Řx~yd/ƙ:'7 clt)<) iS y3le}Z)>4)lڼӇhƸ-etpqhTBTS,*)쩸~Pu1:4 ^H8Qmͫ)Ҳ_#|#!)f8vr6h8L70'24 ýOUʲ-NW[~_OD]-}5iǂymcL?l+  0u0 [E:j]Dn5j-X x=[e$b>i9ۭkO!NK.qt9|]c@I<S Ctf7ڏw#_>hᷡ 33=&ᝂ#H68'K;FLm|)z5 5 ({[VôVZD+VTH3k)QMH\"F"w5chr wx?oa!u%Uy G{`2!X-g_R;ݏnF>Kiq't&5x{{amb#:D"ÅY|7JlQKHHAJ7 xQԽ|I^jەz%m d)_kU=گ^4pClZCa$4> 1'8䬐1բB{-٠*a9)LH)LMIX2Spg.CcdfɄrɟlg!%m3f UP=;s*엷1uZ_TKZlʬåVzV&u!yT  Y fn-<ϐc]/߉k^ܲϘJ+1?{_}k2]wm徽՚ۦx׀xFP|37gxd jw]=Voqޟu;hlD=tܪŧ+Gb-k"{U[OW][O>gHx=HG72@۵+lضQ_۠ZePLI`Ͽk]y|[[VR/üY6RkF;nY6Nl6zɲGNAP eSt~g]"Z' 4?bO =hf6xWv4 CN%jDX6Ƙ_5H@h! 1x.0)M =qo7z`HTR+Vdj֗! N=`_>c&yЛts? Wp2md| g0? ]5L{"%HO̪zfuVT9glB^fTa_y:J雧ţ>;]=GoFkO=к y&eS~+uӉe^x\;6zoLמ.{u=]M`bp٫;x.=+ 2 /.prή9Ҟ~lmqɧ2 &h|94pd7o/~3㫛?*5K6w5ჇV2o v㟩 4˧+`TbŴ%צy $h"XCO9wߘf#_Zʑ].BͺbTz[- .^ xxm%˗H=J{wۧ),j t(a^8zf|bdǏ/>7xl'$o/R>F] pEgЛO;7f2ۜLwFsd~x듀k-|ǐ =}{pϘ1z|1S73,E#t$׽F^l@U^RuEpܚSZ[Hc!o>Ͳ4{i F* 9\T:eP4GDik"|A:?rhPk-r6˸Dt uuY@, sm;~;ʛSO=LMQ@.:}!+qtq++"X5 o.0iYI묰3<3Xj2k ^ 5gQjQMŖ;| +)Vc^mrcagxeAzFgATT7 ȴ׀hӰqMafK$8v֌Jrw7p9 OX\_T00#l?~FF'0:Ƃ!D me,d 5աe);U.jWD,VGi pԚ!`,t A0Qߕח6GLi 6cEa3c^չ=!$xZX}:]ukaO3ls>l}ęCV%'!H%EB/S_^)Gz:WrYiZ#ט,!myEz#ҒԗgFx}59q3_ղ HԱ#ya]%fktڝj0w%/V68 4XU pQ+_J:/; QT K"W!Gx A0IHxXXotp38:o,i,C1), b?{4""urƁ)ǕزtKCWNZ}7}q³D#{%Oau8}7-LA}>~4ra˱IjI%{)7e3Qo+m 4]" r  >L 7"_}N5)4Y2f$լ>TE,83]L8+sktaIk1ʍ <sO B厃KZńB8Q r`F0 ݢ_ZMN|!Lӌfad@idž{"`BW yFhIѥIb\%@9 X[kHcB`]}e_ t[ɵ(`,.ȀG]ZDIwzKYO/f9,Q@O.w7]7\`ǃQƕ9e4x;m xM30Bk4rE< oЦhWZn{O @ѿE'^cKɲ=U>sWUkRWjm ׆jJt6AaHbp"RHjW / s`R& hļoGj!EVzD#ZRQlSwb(֚j.H5vۑLZ*ayqZƜ(mPɨlL=?]s!&`rf\V@ `bU#Y+fr.\ AF Š)Y6 В*M[/`G!_a#gV%R.ώD/Yo! F1ew\WHgaa(Y 6\-/lU(*u(s<8miM(*#U)F`5|lzގ' "$r b}U+]jJ6czal[7GX9f7 Aq;`m8Hk|$`-d ejJuSjS-$o4ЁͥcjWq-D :: Un|5Q+]+'n.ncSft>KhʗKikq.O>47̽_liw8g5yIgG!`abxނBs:y=eݏç$+qΐmT6?.=gXbloa _8<ܞfT~ ;'o,ھZ'z)s.y8\~~0|f *74p\y8GUtQm$+Yvyv][.$AbO~4Equ!̜׈8 {Cpʣ5"G<~hf8>  3^蓴>9BbkGf\ήOӦnUlZ\ZLL ڪ<&Xwи1t 5=šDp!6DN26XSE{q("R:T>c#]{D}iV~:05qde+q8R%q'{Ϗq~nۻ>`& qY0.`ڄ`v 4[BxeSζ  R-uŦ d_YtJQ7;kFWW"K֚O~ع+긗 NjY0aY?Nďͱ/'3nX1r#bWuhaʶ//cjukiHz gG*q Lнjw ˎp5nwϺjɦ1@ڎ]eN FMeDnWtl.j}紴Vg_Ywխ`H긟Q u_=]SnByܸ௦{*)q۫ Ji$Rcg4hbh"qce:ZX6$(`$!VS/ũ5V-BzPbW9)&;f(_%u%Y+[*i;ݡu@X X26 odAuGQX Krgk~lB'Qݴ/ A`22,,@P$3FWP@\at_Dc EZA=r"B"Gy"gHE)X o]56;&,yϝ9qa& 9 SP)ykX+f36E&di:Vte 'uaذ#^`%7EV7 Ba,nĺq6Ɨ3F痝 ~|xrѸ"nm7]샫!j)q^f>$•ٟ|"6D*B"cHLu@d`P 9h6yJl𔔗jw+  -.r[C-O{gϸ7lVM^?Z?zM+[#h4"Uq1˃f¼,ms@Z7BǍ+qMo!~ӵ=ƌ6AzoW+IEnñ|jd_+L;hu `A:gn@]0F;GV5mB4ۘ3 (>%ss5!rgɚP y9PƻTO4Nv0K)7\щ?$!\]@$ rPk;٢>ǧ i|){;]%a/RQx=;Yq/J (Ĕ];]a0thwE>@OMO<B2J\R;'b_yr{|ZW͕opS IWG{;q~g5o?W[orGrR>n'u:aܬU\ S\EM=$Ypux`Q[DvRjWNRqܑ\롛8$^(%)F瓀)W}A'Ӽbeq-8cY1~_=Y|zV&f%c4Oo E.'i`8BUNJ ~Zd<4F}c'm,խ/zmOby+-\?6;zn=y?i %rc^9/8BPQW峙3`e}!Qj}{%~%J53D?2䔥-ޘCVc@P#cRXFwZ`1PPd`30?^Gz 5 XȖRSn^V肳pRn^AptiYʍk0gy.5j6_$zmjEԺM\^^~k?&8hܧ"J7v0U[g-ҟ(8MP{O,"3p\ Ꮍwq[U & lᕃq+^8U6;fx=q=ٛd& <ݏ,,S>$Dʤf.seҙ3 \-5cq IK:Xo2O,0߆u3>ۇi~^2dx9/Ktpvwf 4֣sĖ~ l3;}8 0G&;Ué{`Hԭ9C5x+fc7F4/3T3jX8G3_޻*? aW6`Ok4̕r]\#. ֆXW%*rSLɞTIQB 9?/pzj4 w[ݏ>:|GCLnIN/9OvNLX::\ϋ[m\zmpdX"u&"&ʮS&u'DXpo:dKzJ]DP1wAbI,C ќ +Kl. Z#-(2Ha jq^ґ{Q;=%m4Z"/z $RB,rhS  cPgV< #M΢8P)ȘXNFxA& Qa 49A:Dv5W3g &,P2f<vh N0+ݵ6!V;o8 1 .A9c>Y #ʼn"Ěro*YaglZI_ɲsb?+ܻmA-b&rJrvsw(|dS畍Ʊ(rpf8N187)pslX8@CP)=lThRH|k~yoOI%r|o9o9TN򚗜'[8GRQdcBG楐WB\mCtuv ,3edZ3JlX5K?1]a_rkY[Snm8mGdw,=0Ƙw}}`z._E{,řaxHS6LpuP9 `fIsM1 drDF;n-/4S2{Rv?͗Y}Cx:J)B`.^mHUmHVCy {?r@i <.Tyǩ|oeY^-˲by+1#֝X*PܺznqƻWe6gIBAiICIֽ/z2OO}ݔ9&nβہ{_s16&y$SCqHruZy86[+nZz2׊ ~{вMW& ÷X&}@AY"b"k{+,D.`TS-Pw۔ _—XN۴MNM^MBZˡO7Cac?cG=cDZQyտYxcԐ+9j%YV+#ηcw:jyi|83(NԎjF/EDW֠t; 1S$.SNP{59Spt98ݦQw9{J6)+F g;AֵC]CQ T^?ʪwvj<{+Q;?ip>ٴd p""P5HÀA^4흑54}("T8HsڵDcJ's%!o$g!̄WW U*1,£Qiip gkŽȩEA),2j,$c:oE;TQ:\9aGg(;½Ay9-Zo }=!&/ot0,SLkI295nBʻGe[(7>Hҕpk%X0i=G &N1'D=Xq\T3Lf=oV abB\ Ō;cd#!RY4 %X#KlBHqdgg=SFQ嵀z~1\am$k9[M$XM4QNq$USz2jCuR+kw7^<#{`rk꼨w~e.V7}?˼u{ѕղ- X˕diƱ{̂Uv[Mv̥k^"mɛ o.%?|Ƨ_N6O+/VS5+YJoꢈ,<%(ŊMry򠽜Z.kቧR*'uZ"Uq;QH)R!1(Lwj+̖L>q#pU{y/)l1^mRI%$*wʕW;ZWW;\,kV-߿VwAQ/#Ioz`;FlHP 47p ⚒$+@ k>*!곯eT͌Z%"~b+Om'(UffGid`a}0E1#+ghT>H,G28RLg[_6>@,PE0Y1zQࡄ\XR>' +\)1!{:͔$Jo.Jбo %9X48w;ýC^Hj !"iw0M-UZ'PqN+w/VÓW!I{UHA3lL,rqxFjꝆUBȓD{o=A` k QV&*ANP|@8;ȭK}X_Y.On_}9RK!򥸸,X'Nz>J\1\8Wjm+aD` ?eX|*u޿󵀽_zEO o۳OZx 03}avT)ӻ "6#&?~xsbǺSHE'@͈j}20՟>l0A0OӔiDIv2oq&V0M20B/]U ^![x:U`5rIpsA`U r;[Pnh)*}$Z۠%`Hp \%3 3b"T)Fs_wu4XW N= Ze$*Fjli& O6oiR*s*'R9߬I79)2! ϙ?i/~Yn5;@7 KSNn e omcF?ud ?vDBF1'+r,^՟ +7ýnßyXA3w\]t,ͯLO͗ ,`% ˮӆW}XOcCm+8j18lU`y"#:w%DS`+;8VڀQ@Ƃw&첃XӴw1 Ѯi kLk?虑X 9r'Hy3Dy9yX`~5I@/'g{Oz4gksK23|pDwaC}[\j{c,w$ ];,t vKζ%J~9 J7$Ʋ`RŚc|<]{Nȣ!<t{;<;OD$Re❲T$X`&.XX1 g G\'t"'t*rbxv+mJyJ[U>P`C  {7;)"z`+%)L|T$7WkZJ{}K6804za;O l/sDY- Y"pi |% AB1qR ~e7E&~h=#?UX'1#ytb뱲c(Í'-ȸ~^Ң>ٕۀ4l)`;Mi5`KC%=v]s@*BIM[Dnw.fA fZr-$%T+,>y> B/ߩw2$b"E`?d]Ew t2Js)o;ͭK1-fwBxC볳arv5_?~98'~2y/ggZ"엻NY`e$@r"N903fmp[%uHYنZlWŪb?OQIAш4)秅& Ɣ+N'}(L^ otCy9)_Q=ie4ui/oEŌp T^@yfbR#z܂f6QsnItKRPf &'zV-b(w`cʫӃsכyFJ]N#+z}?S!P.-)LA[rFi(\zf&j*[@U3Kir5tr0Dב8Ud;_r \\rc>P2PI#r +8Kx,3p!Ӑ~K"9*il~@5Bs3!}]3BSzF . f-dEh­áܔ.Ș6F`s1` FaacB#(@E%) Zc*D Rpb$R%dM6hIC \'E`UW2d@@=2%E0S\0q30"HRl}a fXeOQUZn!ptu%[1 0 EV2@ˬq*ǿ xRЊy78;N ڼ!x@~*;U'jvѫc͠B#RuPH HEF&%5!P:[=ɐmPYqf9gbݘ)ٰSdB%Clb.Sq S$R0e"~vKԣITAUlg'F\ _1v֫= "r^}iȾ):Ě vF6sF+>f48 `w&B^4uEOe}F4i*3)bns`\T`<>L 5Bk.(st7t&c1 ߙ'?M6O[B?=}l_~C!Cy52*7żC/^G>.gUHV8eO`s 4X˒r'tΗCH:APi>2 :2͟@od bVP1&/]rF%Tj+!T'p0-{֙7\π%R{W#%he6XOq- 7a wGI2yx bC|@_to}Læki LUi'dڙ̈ZcV&N#qGO zfghxjՁ9D&w m@@>rEBިza`i'!S Eut?׾ ?? w5<.=ڗ.wiʆzs 5^HoF?gGĚ8H79Kp],xwk~Fmqt^8!QPtFaX㠫gI8(8;8#C>h1y)}:wPsUI{9۱LdT Ҷ݄UOLT Fl%1PuKL!d $L B &]|F.ND# qB\m7fT?z/5 eԚkM·CIAF@p< ]k֎ ( 6/er_ӧŹVf}MKNuO$2S()n1E7Y.d>Q`YR.]2PHkձx!D 3w!FTϲf1]E?; _t={8W5\X6o3SɨɎj {zx^k㧂}Q:^;8 3(`=Ӝ k@EpʘhCf Oa/j-UU5LC^;Jɭ~{ {0߱]`@ տ1J( ~92? m%l 8'SxҁaǛm O_q|z.At9m4:A?Ŕx jks Y+h\5*iQ uXqѪZzDr[ rwZw#,HΜ%ld¼_„/-D 7!c)ZxXF?_L 3RB@i!TrZضAtŰWږ6 sD#ʩlut逄P4tɪQL Qhxp r &XЅ E^hR SG%*ޡa|qWڊ& =-ksЁ<TjD>qtW9-j$Un.?~f7sw~M:#dYӲxn&9_6!/Jd[cv1P0V8e'ErB,zP6F-,1 H-iŝk f˜3΀Ȁ9& A*s+bU>pF-dp3REh%?җ{45|_n})ٮkbzp5n:R'yYBVwl羑$PI-5R]bjS4*/ vNGv{|x{sJ "X0) E 6h RDd6Nq"KZri9邴Ct68ENA'PRPfPNHaoXkmXS*Ӷ 2NJlsqy K\8G+$bG Tjb:N2I fhC (@xM#j5ϾDBU o^JSz1ڋߨX+IÕՅfZtyIZؙ9vT":g҂ϨiyC'W3y`/1ivg4h;e 6hiSUQy/Ϳ[ZeΆ]@"c#&gvxWOgr8gCՖuUY0eı-L-NdN "/fɫ@Jޮtź1QLQT"jTNj4p`5\uiJ g3ZH񸟘JIo ꅨ+tC3F/3cr3h]1niit7Nۧ+w'1KY7ܾoنZr|jcQKN@8^hd[,}btݾ=XVKmV'F?=:E7LQCrߏN? HΊB&Q3jS~JrL#"/{*"ս](~AAкc4S$#Q<:̯B_4=/n_6n;m%9Ve;C?{WƑ O3;`U͋FLav(j#0JDl$@VhT"/yYQbMo}R m PI;x\g/gŧM!3!lU N!mWG̞<_?ʺl|YpHV2C.(!iSm/f띞c&chAcq:|E ^=g  J``FH ;e'bj8 \yIe.0蠂,BB48A3A̅.r WN9lt )ߡ^00Qbj#TB & J΃ Sgy  bKf@Iϋ@{CxLb !-ߟM, DܲLt>0H2(1C8 S$dUa^b4ezݣaTj{W30L̊l܌y`[ ̠ 9?\ūM]aC8| c@j}t#*&:Q%uXvTn=y,%2䏯eK#XzK=Dn5YL_48N8QLm r3ۚ6{4Q7(gXb$%㌤q%Y. (]7ie뀃,;|E}[lwřն|Q>ݸvV{װ4R]HY~T_R5W~98dq ӎ+0G$DPgD>{DP>$-x C҂Nkǡ" cGsjk5sug>l %몿ϋ2DBW{^KJUCxuuj:#کJ]8 %VG,jJ*6UzMxv$] mT_GՔځ5m܆O[tq dt%;~9 ˻pn٧8[S99.;?MnoQ͊$˳Oޛݛݵ]oPnoϑ,ns%J?]q?{| Kk$UmKXIU~BJ/y9ռhpj#qq fp^O൥6Zcn 5,K(l6%: RSr(a EERZA{=BNՃyṲ~x> RGp6^r iF@;iyq|[=CVp38Bަ=CYR2$N麛CV8ZVNvuMtqıUe$`U$20AݟɶO]A[d[ E :[m =^kdfս+qm1SW(3$LݟZe!eJ=jf*~gxfMW!P^`_`|X]./e4Zttw;^._pT$OpbBO'|6-|7(8mkVPBhl:E1T]>~:.I{ܯeZfjݙ*(Ydf6|Ӡ-um 'уs1]3btOl^٬) tb6_S27МUMѿ=@\|f"їK s/vpa#(5N`t@6qUn/ђpW8s, cWr0\|1VWh9SH++ 7s*j8>]շfi1NSH+DCUˡQɞҊ _~TB] []khc87E Q ,ڱц~skeu oiG 5k"qUb< ]TF \D-x$UG h}+QA' A'VR@#.2/ȥ1W;ߕPgqz[RhD@b.Y{(e^wy*!~ˍb۱¡K4JƎ9 G S`>!C.) /$xC.G-- 3@WSsNo&C1GnWE[$)y:Z>p%BO&?MCUH #`M2_hTKsAmcD]RNyI55F|)NjNSH"jߢ R!K{K{{d9s܆=P_ӡݸ=;Aw[tsM9 Cڛ wEZELۓR@Գ+J}0{OkQֲ'UMy7~TQ}HU 316:%1YLSLDR/sJ{æ3=)^G';ZTЖp̩ӕ-E .z%EZr}nC9K+mXJ`aghk0UZ(:C18k,r)y]tՙ@-P_1C)dPqBf z \sB}V-ۻqY+vZ= ~`XcV(M<U~Wy#;O>^_qߣRs#+*J WES(徙m>|.6Xw riLE)bX1E8 oI=$jo+uڶ5lIa؆ril95.jt){t䥅J48m޲6JOXJQzKT*cfv^OY#[m$ [mWe5F?8!FwL!L$o}U.;ӄ)֡mrߤӻW~_-[e98qGi ;w F'v}ZsʸJjY~;fv{hS+.,3.8dNąp Bp5-S7g|:qGE)w8_?HC0m>|'ܡ WBjoIS1NšГg o - 9:2ՠwTjavZsjQF"r< Z:y1Ǹ  V u8%k;ދ6M9{!Gd 1HfFCl`H%.hLsZ ZF8qcC ]<枪(p B H`0WlE"!@Ja1 B EF9$(oGPQ%bjǓU%9َ3\L⇇3Ku*L[F_&K̚?pc&;#RnT&Hȴ,}`8CI):}&q}A{/^s)Nw\lp@  7XL~cq974A<:}HάZ8*ckb$F5͙_:KEa з yt@lxk5sZT$!yhR@1Ji̥$O.DG(Z⪋; ZpqDNpopzdZ13paw\8 ͅTtmJy߻ޙ+~}x'דe\!X1mkǷd&7F Hڙ =d[6s]X߀0!QdSH|XF~(g^?5UA~}(ܭz$^toD!d)w./ov?4|3NFXV. __~~{?/֮r{s?gvϵߤ~f Owx}! +6p|zt/6+L9} CM78%G;#'ZSM^~;qR X|=d3W#9}(结:D(g VIa3 0ZU6Z?8G|sVs7sso|뺋9&1A|ȅVJ_F;]\?zv1;4bWc ?N08 17ҿ\"#NEfwDn<~i~wsS6)I^buSUI5ZVpSqqA 䱥k= H+ޡ(1dFڌ!TIA% Xͽ}7LcFc#F&н1տw[&B/DŽ#jD+.9ٕ}i!>.[<,-є:E:љaVeWpyZu|1-MwN f(uQȢBRxe^\c^[W腢Ԟ$! 'M PN(oPJ/"zl#0jRcWR5B;A .$~X_(%g r SDZ0MxO 5b #pݩ"Mtp{_< {JDYm_f<1=b&{$X"hXU^;,gX/)vْlbQ}="oҴ1n \n݃..ߌˇ g+Y.G91 gD`lƘd"f\G-xԨ cGx Qq:7S.u1Q҂bMz:> m'ɣnxHә]gFq(=(jm"('y 3/BG.Oph1&jb*&.{NYzFHastE/ "Ny┿8)bBF0%`#RFw:lu[ oV'D"8I\tJ(K5oP}'zZZ`- inʈd`* 8(hF5Է*B5͈8(8LD|4QDnnzK]+w ja ?S3@@$=ВI`EFBS=,'L80K(98؄]ĉ,ǯ1RD.5.bPsz/aiLHdVVن F }Dd*oV@ k\Wuyۚ{6q6mֽpLi#jEO[s4t/_7 ~XZ}X^7y- ެ<ձ7\tOIbm͟<0WZS-9{9*v N]*KR3qF.Pv9js'6G^ LHCmdVr p|Lh40)K7A9{В| !$ugJwnv[Ka|6[k#[ʋJqyGxT1vfq޸+5T eRtk۾ DYBz7\v~|[q? B+ء)YA꽶g+7tݘƯy-}we]Ƶ`/Ҹ-C۾gyhi\ zA E:C:T i,SM G(5ng?<1e%>x)nXhDKq"tHLHcQmSt4}*#ǽm kM{rUן|ȣKM:7nph\Kz$t & Hh7|AU*?31B)-"FK?  !r. қ,O"%HdܘiѬ1HứǟIR#*IJTɭQTGVkTJqm}nrDߚK{.qX>l 11ZrYj [0At;yڼ\~7^n wχ׬]9HgbjV38(U*kW7W4ăϭYZ xXDp<9 8nTx{E] x{dl!al7pEl=zlI7&('6۫gq9(UR&\.eϑu]z:;+gm'IV #\"p]$;VN*&Ae _yN k9u|S9#S$+8ù{f=ܼp1.ңREE. S H#;G!d|7=N8Z7B&"Pqze>Uq;Im%*AFt03ɺlXcAjYc4QfVnLj pp"k|ZʜgnĄa`L%fFQJfS3LefR2PpU+F藶 [Zh bAן|~$%EV pT<Օ`^2eO֔=2lL%h7]uw(%1j¼`br:#uWO:}V!tRƪdD Q4qD ;aTB˂7JB_WߗmP?uN|7…L5؅pÅ?wtί:6+IЁsJ9t&w0W< 횾Gku?CĖMdף{utXsV/N-AJ6tG->S׳%{RVrw--jAuO4F@H%-OYYZJbHRM犜JQF'ȏe k v}=1Zw'QDad\{@i\{at2DXp.3̤ (+8r/y<9+J!Dv_qe{Kz2"JwMH7rMG+0O0(5+<-ͨc*LfZ08Ar3E-T0Tct O&`Dԝ$g`{T͗Oh&* }%05FWo8Q;heyƙ{V`F~o -DsWy$]֜VN1I T(4ދ3)>Χkw4JC?IIs%H9$+$Y5Nqђ/MPUJ.y%q'jT⸁X88l:&]\_-=Ĕ0Q"xdGvĹ#-)+eiCҔ!dr0f=~lپx QRUU!$)pu2;bߋ3q;ǎ'-K{ǯM9+rӉ\*/.O/H!2 ՕQI/ɖ& 1Փ5;N|ڄeHMq}lSK鷵 A$ggƄOk@Fh\Lr`=j%5\3czZFHb>Wv5-|.%EC8^wS1+sqi@NHC|{R)Efʲ$9TA^NFeusg*s`7U2%] md-X''g)[?K21eL}/gaER?q|$#.խ(C?v7;}6lݽ!]IX \ ~[oX//bg.n̖2pc~ǡdXGQ?־ sQ~ TU Kź#G tFTRoV?pnd$ ՂSa̮4PM$3s<ÞhRskf>|G"(crn\JZJ_& "lZ)Ky.MZKa̻M,R Kh$,˭J ^wQj]Tt=8z5o &ic2ZNqK˙Pa{ʼnPϙ_x2˸Ѝm(C)S5 2j* JYÑ44dF_:B6ǹW< S zJ/`NU *㰟!$čbefZǸz82ZW>5UQx,JK!nFfG!+]*aVӈ+BbF{ڋ Qj"|-\dwg9Fb踱eM^NH:,"JmWZ_??IQGIH Ua㔖p%AHZ/Qf6 R>TK"x0aLQ WPWRم'a?PTs? _ =H L:#kU-h`<~k5B>٫|m՗~լ.K7{/f5j$e}Dn4Բo-]'UzVo>w>қX}YY[0w_N5 = s-޹"79Z=!W 95!3co}-X]8:(y)ezҁRWTKjsX:(8êJCl*'`xR:!C%':RH1Wɍ~DtR/SwIЌLl6S۪M%][o9+_rx}I҃"A=2 debJ]E,T8T<HX`i LZi4xkϖ Q+#R'U}jAlD`bj:! ^icC;)M1Ke-Wc kmaMpqVG!˂/-ADB\[N-DQ ${ܰ`1V[RR $Yyäh̹-Zp-$ BzV&ha=˂Vv.ڑje=o`ߦD%S7 305gxڕP:]._-cz6 "b5~gVO29֚O_u/)E VO#>~~Ar6] ?ygfn."@XΚ ~@ko.@w' }/zI!s$W?`ȥ1~\=[8we}vxgHZj7pj?%MQVd/yk70&a Cx硫زfLJ:80t-c:PZG芐@i}؝ #۱^xskrA_׭~ӫʟȓ ^=EƽzsmQ5cJyE|sm1 %N. ZC1'`mIU4H~hJXPPvjVΝ7zy=<4n'k 7{0t_0}}ZōRexap0J3UZc߭O#:yuvTǽ݃43H$py#Pev&lhlpL#׷Q(aPo>f:~Dfl,85;e1C3\&kO9gf,&/s Τ3 M9%-8;M:cG#T0R 1Z1Rq]q+qYhbC=VsdPx,$ D&INPwagۯ\ާ{vkD6fhJXJ͞1ΓAa/ :54mz%ⲱO #0yJvL9p Q0t/ V J<g1Iѕsh(˼!oT L&r)*)ߗ0#&"+8^S W`" CRiQ>qps w9*:_ȥ唐3ƚ_*9e=K!Hґl97Јw%8 u 5V#ib* *5VMy1i+E*gi9b[pAAYac! ܎t)Fogm48wM9)| N7[|64NI6k#Rbv<|#ڜ8c4Z > 2S@Kj҈;N;cJhrVd=ђIo`Xa(8C Q _y#$"e)H(+%fDmidZ5%W}=m-) \7w~w]]g_uį=#&'i퐋*aCj/܊ct?ƁkXʊtg¡DƔ-G6J|:6𥆡LQ=cɵֻetK\K2I?ޔ|K2ٟ/m;d I[K G[C]"s߾]Bz_p{I-1~|~ hgqΙbg5y&uG<{)4gǝ/@OnsH+џċf3^ٗ OFD È {d<0L,s/g#~`ʼn:3am5f]Ze#BU&0Vf^ K,57'u)(RA7ls1L? -|֍wrLh-A"h[<п~4- VW/2&ҽ8!UjK5 Rk&hKXxEJHtXJҘRNaPZ0}!gFZ0 I0 4}}sW^*ҩd0 -HIBYJdV0 qjD͡`>/AoW$R?G޽}t7CfADՍr? 57mL>G#v.S\,~z]Lg?MwV8}|$pm LL={p{`ybO~^ ] ?ʣ3zoPa 13*%Xc[0MJ,R3Qm\"4 0ۥu&ܢ\k![0tKf”i01: vꁱ+?Pkٕ0q_!% &/ S6d` վkU_3Rl}4.6˸#3<}0Ã)ͼ*11€^6tMW,raQk˩YL@2dwDJs p1y]Q3'l`=f4,gsbi';Ds4 0 ݢZMېܻ)bJnn ;E|4@3 ,Σ׿o{ێ"Q#J@KN١#aJ_k^ݗcab.tݒM[|\*/QN{7IAhy8^~,_aw3Ó9,THtz50서VȖ~fn<C:xo}\g`9RHQג!iBZOwnRv-J]1y"[Oj?j{ p`;+6u5׬fe ?;k[SAj!O1 Y(t*3f@^_^:m|Kh=AzPIv>P5}Z 0Ƽ[">7 NT OẌ ҝ?lpSn8?9}%֙Li?YJ Ϟ](W%ˎ.8bkIۮWB8FUU\ÝWkS*ynkw-ؘ$eL#ϴ9䲲 {έn2_0 uYTN]|iTkl0Yu]E|@,jmT*=jLqŞ1ƔwTƴsFcÌ( QևR#VZ;|Z[i%սGmB"+sG[t.*`JzXtӝ3s7G .ۆw2Ufw nH5K6vh{G ״ax5=4'sCyQůG j5*Iz+ >ɼ W!A;yd9$մyEk,#_t5Up*0Ӻk][o9+_J $gqvN 6vȖ!3.濟bKZV;v$VYX*\)E~Ve|eyU:bBByX{!CUn[u[D [[ۮ['znp~9@d| ܷV\?z>MFNRשl V20u7Ť['*UAqСѓs툄HWqӍAgH@D_IW7KpN?A6Ja7hPʮ۩TsgzWp{VmKŰu&iGwG4BE%N'ڝjC51]QMw̪Ԡd?]&UNW3XS#(ÖdJT~T yEPtI54#PKsH8ɭY"Tò`H5Np#a8"s rGY屒g29)İ ԙ եKs'r2(ùGKa2Y[˱S+ބIBO5&\&ק7Tg>wTrAޘ[.I*%HFs^bJ[ \n%EGVRg0'Iw*ɳKt`j/(wEzG uH$#S'3&4')N3Z1 Fb`d.- k9.^DJf]p8p?~Kuۍ}R.:Wp<ʼɝMS%/EurTS EY8Rt,dwF,H\Q&ݛ7ѤJWl~㕹6?m>}箦ZD !6fqB9p-. 3f^:J8.݉7]TVf~KPfF-uN`Ǭ.&A.뫢-tϤ`oqqeSI01%Wc:*Oi]qE+fN21T4cήoF(RգkkB'؅[Uq{S4cY't] }u,_n9R3O+jRml9TNH6YWIȢ3M4`a)hufK\Dq(cFũi`tT拽` v +  gZ-t<\8-5,mY㼴_g&m"nQG M7GC%n0<[Ê'ch}:V BL˥+򙷛o!{ ]O> ֣|1ͳpg\:zQP vAeGBU RQ>;D|ZYD:9!1 r=&Y=ZWsp_Wr&\k˵nfūō /BW/R_Z7h[gM᷻l_GxVRAHc2aI_/j0I4r3|GN..T9. x~uTD\Kj%(+kkGB&T)eyNGNRtxĈQu4؀|QcMLu7T)d$~rϺPՈQ}*EmY}[.{>5!~@ƌS:O=  sȡ mp霤G2TYe25R(KԨB{eP?=]%T,a9̈՘0O,QV H8&Kc?b=8WI1r?Z!|B] O"䒼yRGnoFͧA7Bo͚boӫŋ4{E/6cJ`MPc,G3̋\®'MvZd Ngܨ 9U&DEG' ZXD##/+es NP.}yf%Q- V<%QCPvxQhQ4ZVaTB,wnO2Kxǘ/š¬E)uG yEƔs?VDrǬRmd7^1?ޒo`ZCeס||ʕ?1Ft\?rwKprի$& ֚ ɋ,Wom>= %$G*P܇5?^_ûN/ &%'^’jhaVӻwޡ{s|sQ &M) QL{c$FS5ҭm&"~8"/7?s2 a'4\ȺbAC.MT޲&1Lj]]**՘`nvIdK*&Q,xI2bpNW3XS#(Öd*3k3,9A&pANΜƣL!!<υ(Qo w$׭D+|%Z(ĺ9(:LEްah$% 0j1 ^kxT|QR`=Cy7[F|"rR_̿`c)Ezyo{Kq-0/ŵGh)ŵG fsdq5oj tyB#ByLLc.2i#NsVg3OC6`k`YJhx앤PMJ)(L҄K SRq2< $̐Kmc\X) r!F` (]ƠZQ XTqAR 8!E*:(GDR0DĖ;z!@OO8V HlÂH )8?|_a1&@$yz練•@ 7_eCiùYXN"$sH:5[o^2jLg V4 T6R{,g`ʹ0 +N2n8S,)]!W(aaSy'jE0/x.۟Eѹl aJ9;mhiB 7u I͡l2 r`ZL0 g,#p(CՃ}ja<;]z}Sf* `afcsYh~}(JYkɅDGbs ZEV>Fp=_M-l{uCrְJY<8ÿiŧ6ϧ'-/ogum-~Wx4r?#zrO߾pL@cl0-]EJҬʚ AM-$РǐS*Tm7UGua),dJigYٯ~n hE)<Zyo gh>DyI: D!|a` ]f]Q:`HZ޵3^5WVCY"!ŞWzCeQE8b2.X7kbgc'n+Fywc: yXnqU!=;vC[|jc;:tt\iE.,UNB5R}(Hn"YqrTs',Ʌ`J ю~Hho޿o&; FA{q|2܌gSi ;6PdR)7s Y){ tz̞g`%˻r1-pFXg4S3}Ǖ=u ./hG7%|Qa 't:gƷ4M ;K*"h7gϗ1cIS’*dUbG%k maVUT N:M& 'TsΰL[%Z`!BZ7Rh6 6efr%V0ƆR-,Hm&'RVUC9bA>M[+#[! 6VG*jʁd 7^1pg}tT]`]t*h!X^\[L&2֧%L"AOpiphwt,8 >_YA"8,sixĈQ1h6TDnMW^CK!!'.d6q ^89N@)=+"04b%r|q2)AМ{?0JaU~{?{F#*߼MW-|sis zٍyټn=HiqsHPE, -; 8C9 i]_/uEc3S٠9`]s ;:E᾵zg@RpY9lPP2 -} p^VVW瑷UBQ^њ2%ʧ{X>֓=.t{}7΄W~#{tr/U&3Aָ?COjo .'a=a+AP$hr)U%fUAܰa4v%@K+HRِm`+dN"pPcB&| e??|.N>v:l$Q#/Q#/Q#/QPop;]WQ.#8'6/Ԍ@ 8d \*#!R8fϧ91OyX4:`ỳ\eʮFM[,(@֏*8YXs}_<->$ahȧU|ѷVy?&W{7\-Y^ͮޝ?8?_}WXTgO!?Q1D!K0Lhh2+ cP"@mishC2w7DED%#)Q82BB@IQЭ2OWXBnxR -;* *A8 vv2c3 r8d-ZF\Pb-8<.!L"rL ֠%9F[DYXAdau#sĜn4Dm(+h9(_ a (:[f;+e=HrJ&l}2Ryi|^l,?=0 d9>oCɅogēM8vLܿ;C , ܯO?~{Lto:b8ۃw=AAgk5}lPRV!p}blp]~b!rSJ{ܘ!On{sКQwm¡[KjkeӻtdqlF"}_}x:}/?צ묲߀ 4n[̡ؑ}JhTH7 >pvRnKnAisPR@ ňR($ <'22޸wSyUܚ'CF__ʄSqswKHIi=y2[aB.0Tlw߼r j=Qe^_O̓RR*`zp" DZ.xKXʕi]~f:>1: [ۆقv5l]Nz  C)|'˶qn'5v>&\X|4κpןpŭu{m|m0 Ir_.u~[DA[_]WEpPpuj$g߆T'&a1i• J@Ҽvr% 19pΕ4 @#eP#@nJD^IID=+J/E/Wӗza29џo%t#ua˖Ѥ~"%䄳gIQdHm u9LzV'Zi{>JSvyMn~]{kĭt1fQ@ w;=TRl a2go2j4Yvܢ$pHr p/&;O[9p-Ab^ dz(ڲO."^,e"n)97Ӕ5o 0-*Q-M;8~f9o+,ToL90^W~g5\跕 ybY`/% O9n 7cgR”T$0T9 E+ 2QYťPhCu1 ZuVUZPAW6V.1%`)Vw*o"_K&I%SNte2cEqˉ礔6 3D RkQ-C:-+&u% H!]v6csu;s ` n PP'Zf9pƂGf'PROM>E+w'Op|u  x~0@djɘ:*)w$xُ9vv!nv 1.E<{x1b1'+\w{v]dALi5PmshK|Hؚ]ÞavGi(;8T<'Gޢ%4E%vaE$eΔj>2pxI'ӎL6|83H yރ=.lL̜HZfj)B%b>$<(}#]WM#΂S4CE$K̔NpfdF6i'i C,[*/M4J}m - {HjT΅߀[x_ClPP8 ۧgP6IEm*“SRJfgJpHIb5'iӠ{[oJ=1:-g9u'i' jϛ}<3Z RFM?3A&YkNqNm!9)P#} ja0ȫ0MĂcE:Φ r!E:.GAV/@B;S{p @Ǽt~%Uy8HA$@{753^šv~TR G,wYt .o @yQo9b.4͍x8bD k{Y"7<͞kGȀoe@=cǼk7zL$mn[l3ol?RPRynAI?qjeix]on Ú:Ys&Xq&_f^kY:ITv.z'҈RP{1RŢ+O1{bs_ΰncu9v-ݟ4@x/p]gad z#O}B.& }#::ಜTZNvz-o&S7齱YIKP+Fi} QUDrA ii rr&) rlÛ닁}Twq[g߿?8kĭt1ܺQ@ /J۽ui/j䧅㞼`/,-$97Y^ H* (V !iY 'B^%qM6Hly}#Wbg&6 ,fE~WF (+4gQ;4H3),C0XVw&_qzObtVOL;fԍ-vԠh.U'RU%E/<ٿbcDŽk K$e*}|55NqQDRuw2xrS=szK*9IIa 0'P_'RGIynRj,DQ0c8S3" ,&.eyp'ӊ9]p!|66W _0&VdcT!`c :( XríWF;X{A35Oq3tŞQr.G4Lt/`}x+tSRItV4t DkDnl ~ ˵z8Iw7(s}_,]S3[q:#eEN`r_0b8(*oV]?lPԒ/=bR \dRL s_+˜tbV{\%D!Ob|iÿ3Obp29Bli^F̺#ӠrM2_i7Ng1X(Q1J+a<{gSO\f Ŗ:P 6 JJNWd/B4N hLY¡t e,s!_!0L@f&09C1A" tE(AZ=U c)!5)UQ,tBaJ;Fʬ$o]C5 CjKa;@D>pptL *7UӰ .eYK.=Ոi5s!9eb!<X&64R%ǪBp ~٢Cc>s,N-%ƞau(<:QxHJDytOob77=-=w7McF3# RcRaMy R|Ȗ ˌG6!:aѾ\+!a CVy'ȤnfՊ"'Arz2øy[aךb`Lyဆ2N!X9L`J@l;è#d3$ݯ?N>l]<-cǹ= H`oFvI9z4"!5е JHjq בX&o 4Jy)Jt,o|&1\"nB7:S-CIa`1 T, F#B rAâE_{q;Jªj@(XεZ,O 1jĘN3<˜` 1S8+1u +)*U@@ L }&>Su|T&$ ոm-T F/y!ܤI^jl \%<|ة8Dv`&O_N e殁PV|gI*lr&m[VwgGgUeC.U_}iOAWTżL]6OG-QeqS!b,{iA 44T!I2EjW1XIE#𛔦Fz0_ ցwY 8LCEbt>%"ʲ[p)Bw&OMƯ4YUi=$ݔ^-o bOYB1< V'}ž4nъHpxUۥN3+C0i$5N }J'bN= S/;PӝOF`M@<6{ͥ0Gyeш`9o2 /=S+6Z(W1,Q ~xm30}/i(h :/p*.ƒ iz5)6HW)_.6MzA# !\$tnDXSGZ2$T/SHP cߧQ+qɐBp骢zK[ ˖r[\')WO^t{~ fz۴EP\?TZ#VZ7ߦC: N?gJPjg3y'(ڂ9h=TyxS6t`^ɶQg{gcKM Z.o!NY"/y+wfLRRKԖi ?V?f lj8^> t6Lg>7cu-mM5 z՞fH'y@+8eZ9`%?Acm^9'Y`J=Ok *dmޕ㇟[2%)ދ:]Mv ,*:L< dW ɝǏ;&ret< w\4w\PjD.;l]ݑJt=u4{?Mz*~yj Z*6#򎗱zN_zVKV#P\\j?ai9G$cQƓ;^FF2ř`GI5Dq:^-ecRasƇAJ{S=Ff&l>w٘vu GqoBfyTm gkIp/r?Ey?_AܿV>oz0M7ka>sYoӀXIOo>(zz]XWF'=z aEFKjp-EO%uWB  %8,np Our  F^|߫U8;F@9vKg(P4! z!k&i #ׯK띧=&_>ܛr \q;?^V8TAcać #1ԘfC} 3?%}`3~pCe)մp[{x,ΊULLKX1|y/S4-y}B?ˁY,a|0p&ռ0Ջ9/=_Š+_ل cV0ƞxٌ>- |oC࿃?($p{3y, jo&3$qEʕ~jV V^1tMxxM?>ܑYg6Âw{m1s>IUqƐ0,uO/KloF&-N[cPq=FX c;c%=$Rq%bgwU4ڦ VcÃC5S;eNcH`YJ|O &2:}|p&zlw^OϼOozR%UlzR0]o:x#U+sǾX5{ PTcX&lӌˢa'ˆH%'y퉓Dnb]vS 5եx,)ed'ER W\2Ԭ']4Y'AAƼrD2!&mNZy`SgjT*罵yfޗ̉Bj)59G\`ņF({c],BTٿڈUaֱx]R#I+mB3Dz\Q,gH"pD r>W)ҕ]=kK U:\a2%(C !Nme:Y0E%KaRN^;U,+)X`F(2ıK=`H(OLa|)b}0I*ɐ'P2-ULQfxU6ұ28[uwWu'D3Ki|ˬFӋE ZQ܇5.gf+ԝһf`H 5"Z yhW\\au+Q؎ͬlE0wbP/?MWerхqM)FeӥcFh^ K1ӥ7Վt]X7$"r?O|` O7>xۈ5ئj{f͗Hcן󨏺 fbT4Eh3?egb<bttZGjZG]:dd=eJn)YM v 4r;Δy y&M1A(GPˤF$RB(ɈSU6 bzP}Z>sP9ùT2g0wb^"Vy[#L2,=#"G0;΍⠊E#$\ƋMP(n:j-[fuh! 3old4jn LkloCԧ2КS2 ĐD\dFTehrPN87\a2C3Z[oN[gCȨa s Ж`g},5G3$}ɮ{ۀlc\qSrJtO@ ,) A$I``]Y-UV]DK1Fjά )gO^XǸ9!uneo3`B"l,)jY ນ>ʹU ;ÚJu=0tcy'$| 0 k8[G~ƄMäRL&ILƤf[͕E; g΄<~r7."s%ҽY^;li~~EXZZ^0xώG\c>xXd$ _I_ [A<W#Ss;>gGV ;c\7(v'OߓxJbIPi* ;G#ܼ exrH*M0E!| ZD@Y JA,pA9ףh"PcA25eD1u)%NI [\+!:`uwc@3p {sC@H`ʔ  ^{.%!djPW"7BV"!Jw^a#ZpׄpNSAHy3͈N+`aE9S`p0(ՊPJS W.FB}A\"VBVS5C"MX-l XOsX(VVESRTHie`-\.jG <_g'au8$b} J>wzG$\bȇo??\(oV)$Kss9""C4v&? rbCyB8+MU,YN߹[-\-jN\={rO+z.D0&eq]Mp)h"d> ~3|~U2&taXnc4dlSL[sɅMhA/ +ZG06&!"X:L }kc{R?g)AF{o31R&xA&vPDib}1ip)ܓa,LJu/3-g_̝6]o Szh,3@!k:?G,4馃* yf9ˌ4s 3-}ܚyaʇXP#\Agfd8B(( |.){i+Gͅp8[nyfl~E>VP7{c:AeŁ'pvs'^ڟ!+[}6_` x,ImFsBUgcQ9kwW$9ѽA޾' ZQby{EB\H6F.gwWqQW;fh%RA;}G A((Tn9fLa:Y.@s3훏g* ŏJɟ'ۻ_o ;w+<5|V]䯷/]}jzyLOfW&i.UX[O_\[Ϗ^[sFt"i\Ճk^4J%H?X*{|kJZ [ 1 ^Fͺںrxm0RhE`eY"6FCj?/ߺP,%   g Z:z!G?x"!\ظ\:W#+ 4.>7n5P֖DK삆yқ߀DDb"Gρ+OXP/[ E%k$< +N慧Fsra4DBaaεe 煐F48ڳyr=RcDrF,`&W\/5фB#]d5"edCov8`>ɴ]}1ju`??|coWxdδȨA:cL l2̵JR^BŽ #{$m[ҕCuD0ӞoKw :HEP`YpynVG/'b1)\=c!JR6%饲I8^Fi԰%)Gz$ F]C둒+E1TLSB7MYPv 0fX(G(cg*s0chdMVfBl3W7)}`[Bj?Oي.o:J{1+ J*uϒ>3HRԏeVuz ߂c\|.RըB*N6_jvs**'w5H&[jBa>< xѧ>M˄j2X˄t¹Ȗ ]X7b6pcH26OG>;ܦ>W=K狌t]X7$bbaXyT/)a\9MT +̧uťE"I'񾞎)R 'A0RcTO&e_!~y3,Xt ):.S: r0f9o~G0Zy mΨ::XADe9m] |סC4:%cH *?. grF!-ҪkaK(bNhtkJ۸VkJۨc;]Ej@lmNéN-4ĥ|Lړ/z8MuN[RK[~U(һ:(BO zy牶UWp PAsH+Qӂ,m;EQ?IF%J#WnrGY8ȉB%8r:Fib,SF PqSOabOQjsAdzo"K"% B)5s-!(nU2r[%m:mPj jn 7L7 zٍ3bjn~012ZTVgJ)[F-&ƹ./Ys*._T a.Y=f 3 .whČVB&q~,NA!fzv#7y09*T8{#,*.r8t|I>tSBj<%9~齝eZO"kp-5r @獨׈IKypx']wj ClwAoeJBtTd@K3Y˸UivMƲy,Ǿl<0}?/xzmQZ'uƳ78^Nڋ}$HE=nR$ޮnȶEwƿՊG\L՟ EHO19c AqԒld"4FA)WTmye@Q`!%1`N2EkQ%2I3LPFՆ#= 'r=J0 ҆H%j '6\: TH#PEV"2> +͆%txiAޫeb.Gf]hs˙(oЗu /CBr$S^t](}t/er3My ELwW4*O"ٱV>݋F*t]Z !_.ɔ))sJ:~tI y9:<De%p<sep`|u7J$>"SRDO.ϱeG\ jc (5W=;n/kb6A $.z1O^= w]bj YGJJ)x~ο~xaZ7fG?>;Ɋ[ ?_~~s:f2-X]]^ >U3"i~Ng-vyJs@駻k|?6d:6_=*>Djn'$ō>ฦQP FS}Ѷٓi-lH@h Ä4LLrv"&a jۜbHJhzrV8JGeblX0ВTA=w:BjigzE3 T)e9h N͢2hX֠f+m@%[o32B2fL&[Qha36cwN _Ut *E:5OjLZ}!&3,#rJ iekR0wmhi~JJEY#eYHqw}s-U oM,+$)lO\~цFM =X'` hfBXsfxGBչcBU˜A_gׅ*OW Shޙɗ^??O} >\aC Qr"*0QiLV}ލ=wWJ$%_'2W(94ߜiJ4-XOzs5FG4 l˃/q#/(6!N k>c}u~.<թm8ѫ~Λ*9f̰iVrE),Կ4]bFJK*mZYi*1FV$w/UKC8SYF JhO!)`#PbA5b)W@DAckMI$QmҖѰew9cA~uEѓ"M3\졓XSN>" lBD~NCHӮFKLy@YCz* ̍gpj-)/\͢7u=jE݅{!3p=с0!vu z[&D^0Dq*w/CK޽tR !OG;]1KApd#b[p:/tńkCAA+6Ÿ`眭%0 )cJSsA^ׁ0mjASSIࠣ. Ge:+t\5UKf5u~epJA#pwISWErD"H܆IN$&ö| fϭT累6x/֔q dc5v% ΢>o,A(=\O"JHWkV)HG/ zxoԺnW I73i!voI4W^@18Ly\|aJ=s 'jwٞ5 #*M}, *v+ћ?or|yFPj rb08U)f\۫jw!Y+#z"$֬vPIƷ\t ޘ\B*&J"1Uz5S.$c>|q8Bu]?`⪙$3צ#NsF}rs_lTc]M<ݦ{%[.hM#e:ev8Qlwh[ă;L|~]3RĄ };cG,"@s9]-b U) Z3TCnA I_q&: ?,T}%]kR+6jP:']}h m`KԉDcHc?k?|ae@Q=ם.[\ytI Na}.[8{j%-1x6$,nЯaтjA{~UOtYEᓔ'h3 ьӵ<{Ph{.ǽfbk@gD3ϤUGr d&A0沅/R48P]ܗwF_i EaLIN+AC1rDWoh[(ރs;v9 L9A=_ס%G~=ԹCPG7"Әk*~.w|U @p1d i5Ԝ8.@>WͨӼ0\Ⱦ4(^2ؗpʆ+E^]ơs%`lڃ, FN]Q:uu/JueJ |u2[8h$r+/3ME6d_d}4MiS٤0h1hotl0\N ܊bٵN:k[K07; T!*GCvОHcsioN(r4A* mLeKrV"-98HEw{ԃ BesQ|%}U"CqSh5K 2f9|7/SM]:bO#ՊF(}bsdvg <'M=zJkqW}!= ~௨}|ɜBmn[NH tǣ]7 uX)ߧRQ!Naܳ"""fȉB) Ee=#./u'A)ÝbX!QMy];ꔇѵr۲|z9L}hl&K{u.򭿄`ND e`QS3VZ&@`ց&0bBňMIcqDhJd,F-1ZqKY2h 0㮯.}\rk6QkhΗ F9$F|\E pf|J {ǽI3P 8I}^QBZz]V􌒍[dúu| o g0<4W]= wsykKYM5ʇ'uo&~:F-CG*!goNoLg B >U3"i~Ngߍ, GaF/_}x N+Oq2w$F TJh/ J7PNoLH``ܬ0XKRBDt` (-")b?gonB|M0Rߎ3JBPKrhwځ( F#&w3`W/-Z ,vuea7Mܜ/)crh * a.'|(j4th=w7Y~~oӤ18w=Gq{t ͇@;N!%cZwG׽G{_Jd{cQ KsǕXpj,oE "k.nO g}-f~w:[?< :.Z;DP*JK2\f4Xh&AV%#'gZJcДerT?g+., Ggw3s[خ/dR7M%l /W b)1WĒcVrd"JZ)+gdcd N}IDCJ4o ?#vR}XkVdZPҚXЧH7&?ƾ~v|ެ/ *d!r(U_ck]ߣ~ wRw(BVEI]7h Ɂ5^_$YellRJSӺ5 n0hic+'Y8pD}km IU,l*%d*vv1zC& x×QQ6݌f?WEX3HHɵXbJt|Y M6Wmϛ^_/5XM='Gz`6V[jExy﵌;S}.4]Z̡[@ǂu 4HJ{t[T*[G"{-\#$򒛠$q?r PV\tP#. x.Ij ΰ Xj͹ץXL( _dAcjz;ԋE{)BE_&Uw߱y{V 80GWwe_KS(l;M)iyAlovP= @K*Z/h i6罚frN밞}mSp!XҶ!~YJRpTSsYXFXYMTt/6%4+Dzb֩dT:Sfê2TttMqD_7p%Rرi1׍SE -qG1зmNHoV؟F9! gHذy=Cz5m"V :Cz%miܟak?%XLIu؟ڦ:--m1F+*R] l}NN~#^w^yk\y"w]R*g$*_ZTАbBɾ'T=ʨFZw"rZA7t )J.;ń!*Tw  wB kPaNq9Q׀swH K߅ԙj&ĺ=TXMCrGqFP!& )LXD'A(HF,B|nH[ A΀Th6GH _]3Wדկ<]PeWwQ5Q /YJS_Mbi1,#$E8uV&"gr!EyFmrPPс-TD8]m%$5<% K”jŨMmh\+NK2 !PŒa@jAkVK &t?%`@VaJ!ULlU;B"xb5'niǰ0$c27)f'u≏unQ,ZWB{ޙ:l# "Se(2`7SVM姅sƅŻwV8I0Q ^UW*UTB[))ESXL F[rq Z Lcnd(C`t eYByk>T1iסVXLm3%&[IoW6SN VQPc[!{#RVl^ːU8 |?u&J1 4.J9YF&OA~vܸ+65s < 67>!MWf!z47[)Pzԓ@M}ȸ̬W~k/ uЧITuj 0sظ,20ZESmmԑ}EQz FIOąK;xjM}trSBdG ;ԋ~>Ox3;VciyN<)q߁Wau{Z-.V\/?]nh`DإT2lfbHs2ʘid 020.e*U<4餁 >OhuSL'N9 v QD3:mmb-K )  *\$0Xtu$ $&bn0(SN(4K3IXIB5[7ࣧڿ?.BN{F.Yƾo|(zպWd-%]mK޾b[31^Q6/߯'OW-!\d9wfd~^ML6S@on7űQqp*l{Y^ƞjj7Yf%|SWf(3,;ׄ2ōY5efYT8} J(&BwϣL(mp gc@2Ii:FÊNkX,֞rmk[W^v薣/|W0$X^Ÿ~a4?oZQ1`#K?mɑ } 03YzJS/k>Cy3ǟ>iW%quMpM֖wїȌ>FIɊ;zF23U7'3.yR ԉ:!' &A :*s[,:X5K_w)gkb9Jz9IӗlWQpg,RE =ۥ*Hr!#m7b_*:Ce<#R,fe=G5kw@`T G T,I Mpw&y|"av4P;< 0ΟA}QdzEgNjep63Ȍ@Vye- ,ZF Q}91=rLO#|1}gǾxaUJ?F{(^˴ܣSTޭní} ⁩o9aw9i:}(11.GW&3%\|?[qfWd}Jб$HӌLaa iB)©8b )g1Ja7 Qe\k5 jaRVƅQ?me֏[v&e~ X,K+3(,uWpѵ h5e>˄xlohw3B1z{uZ,x5 Q5{3ٻtF71e)tsD2e&S` fh"0Z+Ug lJp͠h+]X@aPb?ݹmR [d{|f?xwsXj91wfu'LOGմw-m;|H]CQ|6MṏCqwޛgfC1XS`N%:e$3ƘȈi?5؟O)P!~ A*L3<0[ex8ce!I߻DŽa8l$ R6.\``bomCb~:U$;tKDʈ7c +u-d'GE.YPcI|ɨA061ijV(N+k2%℧M&0$Zuz8Lq!XJx3[:uz^x_5[GBa,,lgp{czܲyK#Fcp}6ꡥ 5#d5}wmmJ~m<  d.McEkdߗlI6%ul}AWbY6n8sW'RHw.;2eZA6PAd .JU?>P!:)B^WBfanYʙ^p*L*';?]\׬0)7l$o.aa.aa',`um&Gki r E9 r 1v!qŹݶ+ŤԐ)7J4v%Oֻ,dGhyiv5LG`9 Wm퀀@aBP9CR6*!9"7@v:t3`)F؀$6ɀzmO4L/'Zp^)BPR%Ueobf>Ok1KXfX&!.3 $T{[F)bXL@9fY (`V32FI]2D4Ƥ(`zcq?P@Be3ZvAK(7>\JR )8!*&5 Nt/ďkqX$+%ƅn̚lײݭ aJӂAJ~BC8$̍;E*_f;\L>eWH[P./&7l__[ON36|:>MOSO~euX!?r .(먺,;2ΓvmoZIn~v;?y8/t%4m;K}%Q{GS'!=w ڷA#--wJͅDޟXZ|`KdpzNd3STt.6Q:QFfW2WX.ܭ"Uw/3(f䗟MnIC^~dl$[wD<Y?~qwVn Bv_mm3i[x -/^ʃ.H+6Dy: ϻwCo1pcr b PX M9g,Ѱ( 2lX.JCi.".fZUr ~n6۞xϰHن[{%V6h[y]zʉ.Mb'`i1D- - 1xMW!%vapQrCrp!r_1#nZ- qm \vS VGQ X<8  w!|l;3難X]9xlb\֢Ӏε/{?+ lBBs)z` h`%€ME pDxu :$S(O^װN"2lSz Eh48 u:uPDwaStTSIɷJVQ8xxhh3`9؛s-ܦ\DdʋF~tiX+w :7| 0kXӂ8ƗR$/;s{r`J$oUu3p1մǷK3ѓT3K& z&n'h&y72KmA.rTyeLSi.,|S߿ӏ$pڞ튊 A-餃 ^O@GiN`뵕Ccv$1ܪ G)@|ќffBKC-ewHLtA BډM,M~-MX3$YHBTVʯB~*0{!gOզ^mgW,0A}mR jy#}  bC;!Cy=6< )Lp_!AhxqÕ{ֲ+ qW0jG  TMr?^yN@\I-;r~PIG klEg;뫻[ :AP<[#Gm^_cf{n>ݷW&wzUmyA3ϗoNξwgWl3w w~w%<îq|LO^5p/= ^3SLq_K`Ac҂j? @#kc!+،}l!(w{zj=e$6Ps"%B}Lwc^G1`x,G1/{+#ұncΐ_tTPɰq?|j)M4!~|.u~Ds0:#sc2fY,Migl$o'xb'xbOf҆IaTڍF*@QWȲ`XN$t5,%Q BX W?|>VzeW[i-Oֵ1 ;!4y3pyW>$}FD  Aې_T%>uJ|)E!ՈK;:'P$sd!iI( AMYc %Fi\EE%ەEܾf!WP 5p'%Ԅ댪Ȝ Q !CJx & ɴ5Bs#ΰV>x٣FÑ47wk:T #BH?$$u">^װ;X5yC'4nNWv·rW{g`=; wҚ$HZ%y q a%4$BF ${ZF XgÞo4lynRD"M 5ⲠD@ }%)ai0 RRD4f@dA[A@h+~[\ze4dQBV@(۵R\ũZuXˢ0F`)9Ja 큑%؊ ZZryo=J{zʑЅDFҙk9sFa(),JT\B`J*)11b)9LBL"{̮MLMù%kG[ ɻn`!1p {{g}3#`r *~>uennU:g=zW\qjo%z tn7`Q=5Gή}͉)Ypǔ &z?wzjv͉fΫ\Ok);k|>:,>~ C!5b̓>XPF1DD i6^! T(scU M}gD` R&r!J\rQՌ ߚÖZ!a[N\Uo#:ńc2YL{hyHKw(p!.v#W;ڴTZ[OK!!߹Fɔw6Vbj4],!b6n@Ne{u'RHw.;2%eJ"AHS_=/'E<"Þ1ׄ,=JwrA?6J%u\,7/ 7k k b0bnNKXӐUe_[<ٳ]SUh@明` ЌK2bЂJMq! L [BP SW}U [ci/j[~&BnXe":j#X9f9+W AتRrQR.F9qA,&ƱJs"(RtQ \"ya]-rpD)a ހ`밇 0A^.mblcC]XA@pXqQ%mo)SSmS+( 7#ÀYebp,,ug`r1.r۱/?Xh[H2SAa!T5J^ie% 9, `qYRSZv}#̎V!yNE)EFKaΕPęD\jQ`b e$V옰]{X@kE}Ek)maetadR|s⊛,쾿J`xsҶ{K :9~)iE*[|T&V;j8ե%7׷7_KO֏lϪ\e3r׸<ϬPg3~? "~ \XWK=gdUZōebm,hwpIݖYqyf#_mOh9)Ȟbc j3-"B,da V{O^,<˙>XËvEޕ5mc뿢;3Tc_\)ɽ5k̼$EDH=H-DJ)8>,giu%kM̕꿝ϏY'Tf][Z]lex֒vuq ɉPj3A(mj5@Ci@xA$ 0!He%D0 yhVf@)u8D*FRC@cB`V !.XVʎ cD y4Dvzחps1MV84ʊV@BqɒZ.6 6FxsË(! X:YYkRHBS̉JB b! ICw1͵6_)x;ޡ ݙT8 \0:͆jp)xjWJKD=Z5u,1RǥA6ڛuj9]G8!,SZ Bto570b F :ߎ| >AAqT<&bZ^K4ߖ= 7^=*3@!6}zc;2W͛-@;b_CP /*//$fP.-{TyrU*R+tjB"|S@+svZ?Fۦm897 wa."vm Y)wfmD$&|Hc{2I.4=TRN7u]f&iNgocnis1mRb1z`;38sw` JY ZY97g2XYc֫7:\Rנdl^جʐ``]WFXg0FDŻp|Sqq/}Bj EwW^!A=HD +nGb+84 + eWdٍ? s&$DL(` $% $ h 2Ϝ5DE6|n?kչSlnYX2Vjut8hmXR&8C5~̓c7(1B0R lWsi`%\6sZ@{nvY8f6Lgol e~Vilf*Y6xبýl_A62m^ٗ3H?ҟ'`BD,l7=Fe 7 ۧzYNg#9 prq `N}SM'N?:R쁔jd%8N&BB%j;!P ŒJ=qA 3 O쏿-H%m %X܍G'ٶrז]"XE%_ Ǜd2c,^=8 [N횟"adFiF>DCAqf T޳*s+5 pzx$Zd*'j쎾$jHpA/{.o"_+gh髁]b$T+I҅7qW 6wLXY8oAtIC"7C<~>t3__O@OE{1d)5.X*U!\ŽjԾUk!>1D kz&>c7iiO ~kXyBcv6 tZ_s՟}zcÂuJw=1]NbI7"S ǧbB:!I[Fj\ H^9! >10FTr31řv7zʴۇhv^w_6GWЩTꇒ`@ރ=BBtu!Mmj(b=VFU+Gw KhIìYpR> 9CtyK+k26 |(_}Up>%;#hIQ $k=%pm#ܕ j(R EV)Y'+TкV(G(dr`G>/vƑ#x^G@?ZBSy@N,Hc+NѨʪB_wQt*=%sY!oBކv6=]B=յZy`@oEj&S6@(JJA֭џ_K&⣭i-u6vaF:a11!֝I)HADb$5$1 @.7A4r yA/qNHj2߳ Nhe]\sS·7~9~4B-;Ίsp3.f&(,f`+4 #H(S洙0(6dWz QD;y޽;56x.*I{`{5qNy&} ,t%%%6+}kbeyewoLz +ºӧ# wa9p[;!DtIH"NtH r&dtY)~"j<Y!//ABR ^ԉx`tY۷V<'dz3tXݓyؽklUOe:NesBPXjǾ BW >cˬq0幰N۱e\tFt z>DS]UדN{;dO |2 "a"SQ0$s+MgPD%1%jЫ*- d/iHJS^LƍHLp J&L|9v3 pH#h d.%A\ (Hq9itBB($@D0N 63ծdK)sg"]zC;wFv;Z=8+'ۗp~;Do䔸vMw>k ֘* F{ $Q$i)w e''clބMPD"( XAf> CFB(6&q QU dHt shs̄h(b+D FFF0PĉX\&*J2L 9:G6Rbq.~ A,(W2T $]&&EEb@B "]3f^R3I9l,=%h;C0y H #?w)0rp []St9[K/|QX&rmLYIcs~Y!gʈ!ى6Μ&JAp{o{pquYY܉cuEP^ySA(Gyre,(:r(n讜H+{C n 'ԯQ9`_ڭ9F|`޶{UKuF_J:WM2 ݥQw[ücT1ws@Bc͹x #p9qa>9JSE?nRbjў 1)wМФX$QLb֙ʼ"*Hjf%2TFy@S ~cۅ9]&*6`]}8Y|~^^;6u TQNF(U#4BT J4f>Og ϛɎsyS pDNؒ  gB4ƉPj&H2R(HpCL\ pj"to՗C>>dl (T/AevuE[^@q+H]Xǃ穞ߖ}E -CG8m#F]?/6F툭?][s6+*SK!'㜳M*$/r 0V,i$yTiPMQ7P;LM6n`e#JD)bVShW}q }-dO E^Z"e$.6cbJ1!{cvBL3xcFG-k2'q RZ'pPA=!Ffo-ܺ./#q*DgH 5Jd‡Qu&8Ʊ-2}`t2R_/Zk$x %C͍scN;lRORdf2 lYc< DWh9iАF}cQ<bq9 ҒbzJWxU'/aP|~X}r( yƳWTߕ*q)6Y)IתDЋ \W㝁Ls+ID*I !BE/~rTV:N%g]_LXILTN< ӞN|1a=Hi_ gΕ}Q^$NE5NpW0\ӎ^Qk%H%!eV jB y.+U60)ţ*ĶDgj%z2NUKsirWuV}1yfɞg<̉{F07jATk*Da/T#Ț@znj(n0o*1m<Řb;Ym> B֐PN[nz3gẀ_w_O&e91|}f*g3+khjZh}>w]۹6w qHx4K@}'{Xyt>$RZLO{^u.\kCyDRcM79&Qd>nu-*l vS[TSAviCCdǸ-jg-8{a( 42\"θ4T`DQ,w!`Q͙Aylv&t<0BWhCun=?]jwSoVnR;ߜ=æBG&P^(2Jѡt2Poݧuߛ-|8Zt||"3U[~;?ۑt( 0m 据gLTrWTxOT +(ˤ7qOy _)a 32x&wZ}I U 7\|Ar7]5LvH؆nx@Ԃo.32x_7iIޏ3eBT6.\8MNAg$ 7Ϧnb_?跐%DZyV?\A+䬑!.ưѼ*} 7r ~|6'S: K\^r|x0a|s@t?)fg($hDڃvEkaprf.PM-]&Qs~Ft%~broaQ?MjӺ|wRV"nvzna>O}C ݆,›o_]Js96t+\ҁӡ;ovI:ɐ_x-.t͐r'c Ҩ0k6e<$F3?vvG9uux!ga^?a-iBH nEff66ެxvf7P] gpbDE=fu}kf1ޯ~LSyUHd1J.Y,n| n|pa;ԑ1Mkq9CY-p1WZj'$4ux%Pew ԅA󮽠aHw.%$}8-cpMjǏ">0J{PwM›Q𝛜,?}lZRS{+EKϠNQK|8ϣR(oIdZ%-npձNI PlwI竊V@%*[9qƂj|R΢J*i|Nji/r!RF'~HLbَŗ 1A0?ER~LBQD: In X:CiQ*R2;f >jE}JF(\oYd wT Jt-YLc = @q[#BksLe5LZ/ru(u·Txk)0C.<;M|4񆧒pbǚbՂ%h'ἫQ4 G*oj J p!Q.r%+&TylݏW1#j(`Rk[(W-r *E qSSL):Ҿeq0׍nD,YEXin@H5S5^h2| {qL1@#!CJ[uE7rjυ`p?BqK]?xS4^/jHlF};?ٙò@T0ѴQE."$?܏r- `S9uf[jPf6Z×?FM"'Px'Xߪ LZkr봸ǽup-Zo׾<%Ju/ o<"-%}z?WJwVtnzMx4 .m*5*i`ZV=,!5{1Gk'?V{tE{ztzH4ROv;;0]k7$C9u>WF@קOP&R4Ni*8~~8Q(!B)坣7.TRNHk|6G)Ӵ1,sWQn 3zI=S5!zG:G ̡55FT9J#B/%5R=qc|X uU~u_xySÀȺ-΃FI {&XMzwȋS,ͬSK!TK3"# l=RX7#I/]lQ?1X/ yJ)CQnJjl2ٴRGFFF6z[KחGJzz?&9/[P08rt(_<~]V>,<͓?~K?SzHv뚮{*?\Nc q!Qu@8V4mo3DU YW9TG )@vky<ċzHѶ%/Oȏ;0iW?0=܌Fg}KjgRzDMYޗe ?u*;G,}WO]+G)~lM[?;Ю >Z*:2<ˎ SK1kv7POAY Xn@o0G=/ڹWwW a]>}Sޔ)u=[ 1W5j0;@2TN[@#]iA$7sh,:epꯝa#1g";LP8.ag9cXњCةA٭־Mnq!QJYrsսŋ.o܎nf(}XDE;PC 7AG\)}ۄ~OnSZFFǖMQ+:Z9MM񾿄zۮxM)ڏ"qÞbl>r ^vvΌݺ&]'65 Cx^D{ܛ=S+`U6a.ƟM>.vI{ʲ.isgdQcsaZ{R|| ʩmq?=v/Nb7 Ĩݻ䃓2JZ vNk$Ʀs1Xڝ=Q"b'gl󄕮Rt)dZ‹*5{V$I *kW `=TƻsLJ(9Zȁ jjI1AxYɈ2Na*fEFl(U;^WDz&Uf,x:Sñ3J[*WwK\5Rmޒ&U=ײ F'N-:;Seݕ;qĘC,!d*FkJJS\@sY^-~ʪGِfDUϺ:ksUYiZJ 2wgoXL҇PF-H-}Z;GQQF|HLph*ޯc '^L_םb*ZBa'QuO~OKNyJ-}mҡoҾ8G{miHEO*2PVTΑLpڐΐPtQ樕8k+qQ]Gc:beł9T/ )%xY^UpEgXF;Qfк>ZʘExOK:ZЧ Ԉ[KuѥEg q//tx:hsRy6Ý `3 ka6c p3`3!`3]VMg\ۇa'Q+xO~OK/"BKi5j>k-δڵٴcxC bP58W<2&TԲsPNS #yjnSR5dWZΟhSk`,]dW0uηEP-ѓ(6 05bly}&1ѥXs &v cв@ JS%LΚMLEGtTA$l:Q5VI]fOO]~uSr_6'|g',z7;@k96ԥ679goeNvRe@p,eEwer5Ԛ?<|{Q>]LF0Z!%;Tq(co 믾x>\8;/n>1E^| [P~f]N"+㡒1gaIpyRYG}#0Ar0%Mq͒W~:82flee +Wi hrO\O';)݊zpqiC`˸W`>uIMIeZf >R:2)`X+ 0hh X6T}y:h d=z-XSQgCLPhBx, ްMXa{VUNYKGy;OX b& kם L3:Wk_@5l Mo,&5%-rHb/xBdU ~v2yj ܯ<Xyjc q!Oml&>?6jyӡP9Klgg1h2<-hD%[q3뺦m8xJDS Zsi3ܩq_۹NgiS.^58`rκ>rxo NEH bLErC X(e9FuaxINo']p(;Rs Dg<쟗wW%ƤjksǤ܌C[cV >L!dBxUBmOfnv}^)w* 5oSoV>Gە+s~WnR=,GVJ׌7v-3?xKX8wj˒B{pa~Gd}v|_<۳wNϗǧ/hƿ;ylܶ<*Y+޹T8?re^Ӓ'v.OcWF쁜JsY>fVUڤ$z_O9G!ȇ_|_˯o"VR6~Gʥ!ri'mr@>mʁ'H?^PZJcrRɾIY(PKG2 ʍX쏹qf{nz;:]۷SF" _|v/uv>(f ?oVZ__\^]Ǜ2!Qq󗣯 m^Oir$?6P\5&,/neh\H?|+zybsGoGza7>ܑl{{7NY ޝRdur~w =.)z!?SJoϺ8,MdzX޵6r#𷻽Udqۻf\6(key+4ve8 ĶZzHV58>hjmJ/)?ϔ'Xf_aTu^NexV0׍r_~S+Z?}Q=ʚE_<;1K<>~^痢fIGC|`裿ͯ;z ]0Y4b"˵}=;{Te߾~'nAp$<뛵ώ$D7{im./n_c'ȣk ;,V),\^ 7ŻoP-nS=涴1D~M5M]~ jexV *Jk9KRe,PSwjINP[..lqfivjˈ9&ld<﷩!x^O<+vf jGQ (ЧԭeA^,}"2/!H{N{c8V<慘)rƞ˃IM͘dZF;k9S<'Jf]U'bd(eXx*+XZ'+8' *c6NVB$C&YʲD^a)0t Xi/~q*(eg@|@zIF\%dNELYBEI'Qn 6c<C$kPzyE^J+/*!3ъN;ݩ6hV) ܛG-NE\(Y9=0ɻVlS߅p;B nЕ% JݍIHT=Gž=$*hya4XZ-0,#Kd|BVI\%!5vZźr>ԁϛO姥VO I2RfCj9( 6rJEjP6DX_y0`@9#2 ɼNXz٬eG;- gq{ znXH-if#GڮX+2'gC惙,y3Vݴ OmO yKY6xSѮ* HN O-C /aЛ],gClLjd )ےw;YoKm@O+o3fJ6HaxlsVo++Tm [Nd3f :s*u`ɻ2A+dGq"ʜ >x[m'(tl:,ƙezz %Xf=e"c,vE&MncO![IHOIO#aOq[iׂ0z؆am-%j'0):9M`~kR))Vs'[UFk a5C>s`SԁzWʇE(R,~]o6-YТ¡;`!#J IJE)( D_p9}Đ/;R02j p͇CF -4SܨmKBqTXٶS@nI6'ܒn{qGN3g/HCq-ْ.0{[4D&U"RwlRRd1ηrʉ`~Ϟv"Z<DTZ##8 j62٣NK6ˉlL jN2C?dq! 8c0`dp JMe܊N*4 ;(9(2r1 f+E^EvpYXzK"4$mEt2FS83.c**T Ԙ8ĽRpJ}2qmCs.korDx``L $!fiBTE ,u,Tp\C ^VBm6EII uAѶ9Lcc|ĄH=mmT[{)F|&džD8a?IVR/y 7,Ex+Q k>/~5 (Z'}(wvamu_Y7gˬi~8r`=@!|\ymh ){̼Cc-OoV0TyعB Xù6bЦA [EN"/zav"@9(x6zY{Vmoi_ZοHFNG m PfU <y]IÅqGvKe&(;3Z k_UY}tPrPRW4] HqVW*R /-~[ )zBrj?ډJ fgLxXAAG 92[2cwX+Iܱ"Ab9heiYiQe}Qf\IHmC%D]-21 ʴH˨f{Tt8*s@)Pڂd",P*uϒkZ2k;Ҍ0L 4M7UvmG>V4hD ^0?ۼ//٦y 7} ep_ 0 a~Y͘Yd1UMXE ^NJ&ɥ:Q>=R.ɑ=ۃ1Ev8m4fWS~>}\<]oWV=Lp6I(*3W&W,rR5Sikr޲7u @jfD=Om]~S\0b , x+פ&+péI=Q<TgǺbޅ #NgΞo0: }E[lDiZbV mEKn"Wj:@.k_,P|IPU3@9v/%`u8wHn#r5V8tEh=BpEl[ ~O;Y z>rl֚J9QSnpȍ&W{zl3`k}o[?=ѿn[Jm)[;0vtQSJt&˙Ex'ܡsm˥Qus;2كSJש9)~P|F,wr-ЂÁrw뛿﷘ @4]ӼZ%o-]]D7zXCysLM;mbnko/:o0"gkQ>,U8AA~v`؇>4zUBlw M)DJcqN$KNe1N$vRMVURR}!opwq99ռqIȪ22JK,l`0—ԍSVwo>śyf7IxorFx~6&~urcaiT#]3BOՇZ[_H<&Dv!@4xz#fpI-Ps[5BdEͤά(77ux^8HiyOAY3 ӥu"Gϲ'Ğ;Aђ=UpOOy+:ӫ r?rr䣧-{ɵ-4 ZFI\X8tK!XhqO͠өOc?w*-m'` V5-j[ E5W`dySZz#Lv80{jz52KNU/][Ǿ`ʝOËsZ1MWo:^'R4k4__1O]87mrڜYŝO?.Q.v纸 ~Wժ#?9?Gʵn%jK9}T6 ^݆O=_OѯJH)OFJְ|XpMR'4h5L)M>eCXD{ٔ0'jx:ΐ+36x2/b2O< a!peS]^Oty1hxP&O醰?v)R]墙]Ɵ)@(Ϭ8h7{)Ųc*= *i PIKDz4t25{:<.JF܄dHlfufC+hI)5ӧ4Kl#]s9rWTJ W/{T*ݷKma𰸖(IM@R%3!eӥli ם9rh}leS7BJɔ8 cǏ'Ձw} ˛WPHLRѾo[- l:#.I9 R޲ci}2'Z]N<>S71'p?J}3+Pփ$>d,7pXHětoNP4]_nRtT:dZ5@b1bB^8a p|l$SrPe)I;rYI]aބ wZO0#;uz "R'Y^+%2 T{/|,Rc}9߆KS3"k0&X,aNj9XwF4K")!4Qtō.]c!DAT Uukk0rjTL y)LCHV{OԨ&.N~թe2psm>H ˗ʺǻzyv񣾯L寓-J1WYƕH؈s]!Yq`AƓh ӯ0)R5}S>׈ɓ+Et0_GE.Q ,f'u$,iܯe/X 8Te l!-0X-$= $JFU ufgWkĔݐݰ'ݗE.O ֓ z .䁠 5Uޫ,#L $XCFmYWJ`ǣGYfU˾'>q6j)" R1RB}'8 eZ - VjP\{H['jo9j%YP0:s׉9fIzL>:hg}7^Lf^t$JJg#+LV 3ԇ d(2( t!|4r $g8?}0.Tc.]m0q;g ⴿkc@P8ZC(Crgǧ}g擛rtc#L_1r\y敛|@/H  lslkLǂ UHBt*%&PWC!X3f@ ST2nmwJt 4R^jv!hRR9&3%6Զ+OSwO\T}߯J$@B̎ {${jFH!_ IIX5uR(F/^b&H.Q]MYF]*z,Ch2gȘ!*%<;oDR AV@"1 J'n,@Kׅf%=м'_H@k}0䷉{&=ÔVbz `2Bk" `J^اJA}+]LdBb rYgL:KJvI(tL*#fqa5!@bXcVAB!!peRF "`3e*=6ĵ "!CՔ0 3+_ ` KWbP bbkH0@f^F bLҭU][Q1㘳59NO"ĔD 2UrgAcGF(eC'TbRcEu uM-BA 9Z"EXZo%aa&8G dVI̋-/1fb?-0՚? <إeOsC-|$ኂR/@>tEl11< 8X\?\DDh<& x:xqg l?DRGwz6 D +xbDFV dƊ/hs`J3S=7?&,}r f;%iIIQ q:fD @!ɒ!D{3JK0ohܴҋ&MDwtą /8yy{a  q˝{k."'2]Eg6*bj8!d -,X$= A$IFFC y b8ѓb ]beP1ڍe Ǐ_#$+2h ګp8K1SJ ,ԫqS[+,u퍕ă6a/iQ7sjLmt/pe;b(}ɛ)FqĐǞ;3';o@ J?"2@_`hqG:IzBɈ &GP IEDI$e;;a {[ 5VP!#X<Ro >$%HISk#]P !{ԆARA{ߝbpJ+C{\ yc qN.^6[ @&ڣչ\竪99;PW'̴WUg˒֓ ~+$S1"0\I? 9*ĽKҁj*Jzb3us.UV$zR9 2eIRZ3a yMm|D19"sLtu8cб8ARC5Lʸ,~$j %a<~HIlI=1LQR+$8D+Ʊȼ/7%`kn)fnL' ^/f7O[_[t9CL] a6[sQx҈g1D+vٮȎgN[}xLx/nt7>MC84|kll~b<KIR+]]_ 2^o[&1227Te4s54xO^@[*äqrH9%]SO>՟a ƼTdzEwg߽uG}pn9~ʎ<^m>Y/>^->͢}.|B~ dEIksY{&^QٰաUU L]Z WsP;SdG&J҅Jԅ(QdXߕ`Epa:4)`&_[{1qd[HӧIt醌/BF{ta_1*f>ɽ:k!L)KFZyCGLI()Mgp"B$N*(|fNc`ˎŖj|W;y [oo?~$8g#yXJwi[ptĪ4mZՑ&=|tתF_8>^8-FȽ,˧~2 ghTRr41s93O.Is2B(ⱝ M{~+OӾV7SL>'Ik`FwkI8¹Z*3k$Wjϼ A)^wFQIVFܞjNh5"@ƄF{ f$ A$LXa$#(*#ԆvZ h48W M51QҬGyq@Y=rW|Tӛ飉&.7W0Ӣݺ 7OwvY>?]/,x~u\xAw+탛-~l<@/kTn4x EnϾzq>|w_ֳ U. YwnlZ8લ逞*3Ow¨!7_JXwnlk$"йyH1ti`3|&ڦ̮n^l?76.G)'T:w'^1H\u=%s*K@}2, SY8^7sK,Olp3B X<Ŝw1Tu0%o)4>K@,R-v}Ka/8||.5bZ :?ä" ~՚Kl4iWӐHD@+Y7DaWLg6 7*/ J u K`Am 72A:R 'PJ&J-I]BY*$ aWq7O̍@O|Udt s{Ok:Aq:ٗGw9TW$)ڋя=E NSKY\'$fc7ѐa_O1.߼~`HT|Tưo[#)D@`{%]+2 ~7_֌\epޓ2. QcdiȺU$n)%\ցcbWbb'yE"oOIΥ:сN$BC~gW,KKU$acF9&!ok Fa|or@exP`YңTQA b>w5,11-{3w 4kHu{ӊl77]8aVCX\ Okmj?bYY՜mpvsl'kEs%!ХRnsL!+.͕j ?ȩ+Aژ:{ CoNLɣWkEBW\"YJ̃eymMg6Sr6lO}#DBǺ3\:W͞J7+ #O#Η=pH6 6neI)~j$kKV^,ɪM] 0`QfZVS;c(񇻐v˪^*fƪ̾mԲ*GF^m|w^=cXQZ`JͣV:hW+Iq).c`.:j[G%'U;ƅ5h3*!u]Qq>NRyڕ2LFe\a-Zb`+IhaJAPnRy7 G_wsE{nn(>DRVekc6;5ʠז 4icZr! -1hFMXR@p$ heu:lCtN=Dí:`KBڣdr&ttly(nM*ī_x'gWs_M*S3sp70bfJ/~j91ȕ\LSX# $PRrgLY"L%sj>-v[VN|7 &ͧڶ"Ɣ79dKf+(aSW"=cȭR+/ӫpj\pi ȒJ@:hT3C.lhR;[ČE.;$>,{u{zn=iZfROuK&T F4Pq?khQ8\-@4Byug 2X&3v:Bp#CF'kk&LE[n"0i&`0Zj}_ @L߾ćz떢I"8<@%Jdv"겂TntSj2:R%Ȫ<α$}TJ*d/Q2Lه.dhG3_\9k>gs['0TU}?fu}7@esEc*2{][vҸG&9?۪71lÇ> RuE%UtBq$)e %k(| J&AOV޲=y}}ٔE=i#X\t8h<Sl Y=?OWCxZ؛-d.J`ըdk%"ZPSa( 2.x }SpEr%^෣7Ea{A>v.IZ{uaw^Jĵm?|AwK.~EAYuvcQf#q@5 ˴ܻiݷ$2vkOf!IQG)1V[h) ,pMm2(y]>! MQ Z+r.9>zv~7\, 0Sg6P:_iY<{t'ɗ0gpZXƱd)ќF6`xK<ր/þ́2]:iPϕ4$k/78TY c׉ɃDJ̘>kDr S&c2VB~n!D{ y\IwפtA)CmJA; l.Ƽ~n]tXpn=T$^(E@l6VxL11M&27[Uȝco}8U4U*QQHwU,Z׵yj\ģ^\4䃒;ڞ d) اZy=OZߎLș.d펊yΒc c5 E'[%\ZK IXՎ.h / sCa, QOٮTm!uL݂&OsL:AUD! ĚIU:6n& ( LY*sшG 9lCLt9kWDz%(T SQ!Nt-yDWx YZ"o% 7hc&/ o\(LB1g {6Ivüp19Y`H$(mMZ+ESdP91,p@L9fH$Q>@irX gqaBKpf ;B (iJHPS=Шfa_HLq4+]΅oz,Ek_qm4RM@@0j0)b} 2zP⎔K&eWa:Q2nho'Vݝa#5CGjQ ^Z hɢ2os,R8EČc.nVZc0V ϼ&UY/rZڷ*~P-_e~64f? MB4/asy-We$V+(.$ìb۲Rik6B@GP̥Xt@>uϱrW^R!|N Qxqpvnsث ݣ<$W|>76& y7=S6}cILw~IL@$`y" | է˛mg>_mQu#}:ѿggt26>d\Z@[c`-1\M~MwhFX~ʇ#JH+<8 p$8&JB+}%XHðwbMmsCke_FxE 3+T|ߊ9^2t uLv"!~/,' Гf\N{t  rK͖&nr\bzPr8#r2|ߍ|G ަltbAi*F]q̈́Eeo' 0(S$uE˃Hi}M)(8>.pK!H2qr 7C1ƽFe ["-xu2Q(xy R+thtbj$W:xh!.yZ 3Kk+εM\ pUfRj y( P}.vJ-P3+k+H3B+TVGz*#qsB o8 j k{ܣ|~ON\Tk4.t(l&O v`D/-ve4"r)d"(fdbVj1ui_M#}|bFZPHޗF8H8~:hf,WUgn2F*WQIrgι8w(1\8`x`z#v;7}o0V?{,8A }7sw}e:@ٗm.(D+һ[}*_1[AT'&ܛ2yƻ+qzBRm\N61iՅm?/D?O d񲢥jix"m:po!By+/?zhs(^_{s}x#9v\<, 3c˛7?˂惡c<~/"eʵׁcy~Io2h)_~Ecmr0|&:Ȧ,;UeQ G {ƨ*r6O7wBsdSR|$ZQAx(1cttϹPUHn8QBsmSB0p=e'߂)y(l btFyBvPX rahiq.Nʬ9/N:@}icCaEgz_89+KyffI`GY$Iv}IlDI6mw&3RYU_X$2Ba sm Ą9$SPT\dJ;@J+I/\s (8TS8jkJ, -$%KM*0`Ȥ*B fYRS)jw'qHqꃗ9pB${*GӵKt.$[.f/6ES/jlD'ֈ xwMv.a(ƚ _Q"TE]sphТQJ3Hf.Jͫ c졓p VJ R+II|g dG Z3w]CL{Gbx*[7@p:;sF+lY:1&fcl,a J0XvݞcF}ܾ([i!l){] 0/Xd<% i\Er!! q=* "uR dCX'8Ӵ` @rsl3w}me) C$kvfm0;fs.Lz=|n3En& ؅)TT7cu!1 B]}r8nڴ\Mk{ݹR9$(fӾV6M<"61[hK7 fŦ^P(Kaɳtsz͆ZFC_G/폚`=2|-_7j HFÜ?:b #P}CFq*De Ҹa o(?b"#zH ?9ę`Kn~Uk7(ydfD,99P2!Š.I\$*0&NB.%F9br~b8T4UD*$9$"]$jjgѐeM(r/20th~{cAz\ \r$m~A1A["'@co:S±9-cNNjERm8A{`DTL&HK$bDTl$cF.vW{n"r/t!ڏj}L((=WCÄE`/f Pj$*ytK\ӻ,-;`emdMtIDi ) xN yJ>.,Sa#4td8HJ\ 77 4O|F] $}xR,F@!{]`ߧa%;s׍9"6LAdLtLTb+Y NIBbiV1)0 q0U*{x=ncx1EG!~2wT ; 3~tpDٲȸ.9ĉ^M#ʍlGg& ݚ83l%k`ZS;3ch tbf௘FUc'|IYVhc |)o|pc13RII$ɦ Ld7X0 9h"&4XJ'!N,dc"KpAb+ IldXZ3YGiUjڦ3sgdbK2={ܽ@V7M 7 +Hδ`sEE!BD($W9+*EY:u*l4xEB ,ǸB"ǰD`JAQ8DAshV\ T0Gv=n4Kxw~{>P% %/,h%ZҖ'R)G 7X0<,9am)A)2<,>BM^Veu *-ӔQJ NyX;]J奒si4_2I%3oh^?ͥmHGԙ*Y$}< fGI>&OQ?[G[}f3hӠFzL n:iPE%y 2Ee.2V,` wD1^ + xUsi;t)"|хjnφi+w7 lqP5܆L.7Fo؊{0I\h.|˗+ݯEuބED hnej`܄O&4䍫:4n]my\;rOBX:Ě$!;NDvSHkW?[%|BB5Rò޺G̬H@鳀EOi$2u`ֱNwt&Z`!^;;_PKdkilg,NqH+W߆ $]]f xA3Wڷ=d4Zx+_LϪntKjB`2Q轤!~Hv= @'ҩ HMj|q,7zUVdh3Wwb>LdfCPHO%+~sSZ2^ya`sv2vRBP6Ե֦u$:L>Jɀܬa1V#utXJ !&(y1 DCOx3a О>a<4xB !hi "tz!q8a Tdc>R#kgu^nŞfgo`J=9, MEsCŚ~x (HWj/[]gbcMwQ5]7;>fMo}t"q(FlǮQs'8%ca4c\;|7wާem4?|`r%m?YA/h[`o h &V7jQwVzt,MܦZ~_t3cm*ϊv@k\ Gq{ừ:qѬZԅ-k=}7Sc_`3 ]!#`D hOpOdS@զ(6$bjG"EݮE~ȑ"x%z{T*"lD=lcě4=4yi]ݷ]&AR9D/ `B"8=,IK\%~"D($Oe0駖*e D Ap)ʜBuQH8#Ixp8xRJvJRE  `B\) &tdlȿ0"{ՇȥG?Zh?mԪ&R5u&@Ѳ{o?d^Hdivw3ׯ8"ՃǴ-'; .mm]4I(Ea}?('~Bw3B0ŏvߒt?N^(RG<*C}s73 z(Dǘ2ƧB|G>IMOx{lxN- o/9A{::N݉{ȝMt,O]F$8ӂԽ5,UH3|4_ ͵􃺟S?p0,!TtmA`w1:lxɚl|tPaKSf*A$eg9sPRotpP x%T ӈ *%Fݒ *@k(ְoTFRLNqȫ,bᷮTihz_4|$_CkXs[?=Y?+ҙ.̱ a">lN>|8WЬ64du]Kb-nRM/oF??3c^V[5:̵V1sL0!cAh5U.1VT`\l6o =~ʍq{?>t$S,,ciFn"Fd:yNOp,ETs묤]ԓ@dWL5'VLIF_zJOzz37(&xro4{ڡCd݊%KyCÔs 5%´%W5 eXJLfz[ةp \ޙʘ=|zyj-r7l9^βg4;jz_݁.*)ZT7cu"~-c\>S5D/轟7!5+;_nd<[B&ESњ`ZM? M m1%w;YF ec[빎-ɯsK>t!j޹2Tf7H>~?J:)ܑ sz^BгV07 9H g4 @gwpH@8`8lO7 G83bê7Vp{ {x䒒K^RR j K^D =jKK‘F~HUlELV@BU!)C T9/QU2P?{׶ȍdB/^Ϙl )Bn;pCQǔn{(HQ$lbUf9@)X&\BB i*$nD2F-_V8sYąqG ȑ :IXgJ)La[",*2epra⣞&9 cy:.9ʬ+le?)9$6)ʨ'9#&9ZH-*LMrB*H$l(fD}H*[oM^EGE\j-JB%ښ䨯*LQoGArlAHߒxJD c)"E%#hrc5=jz"8vwU7{[iωMu<\ē]/fܯ7䘘驒ZiMiHgꍙx 2l܂Pbf5-VNjߗYJO8;ysǣ:#5)H4D#&)J?9Kc%2'}@:Ic)Zu;ٿI]KڭM6' sdф g'sĂWui._T{BGP0ulMu^X[ 1⦗2߫2蠃{vsqPY;B쏇At!^fMꖬNWfC(MۥB팇 NNN|vg+#.D]~Dٱ U`{ +ɥíMri_`!tXvK.퇮Jԇ766bscL{3N#gfuNipP@Zg1J8&k¾vyDzF8^c9W v#T#?lơŁ'sR]y93@95 %$Β4PA܃p$!/.&vez&Imj̤M^Zc vAfI G[83Z\嵇~,I!\ [d?n믷8wBPbL8^eҌqJ Mcc,RI,"9.^U 6&vI3$8خ'p ?t;4Xߨ/Wr^( ±ZS#ty?&7geeQOW(s%Irԃz!unR𵲁;rsW }PBE=&$LioGP@=mkiȊ̗L:՗:3ȚP9PvDEħ^r ҹNLWh<ʋ?Z.)Bzxw0HSW0_EUm֪aT>fݚ긬Sj1/Xl `*IfRPbA$) E"ӌ >'+s^MUm1u@ߘtjnloXM`b2\v 'e9i#mn/FHPf'G33p| %`a|{ 魖LsXD&} GUHNʉ*>OK4+OXv4Sۢ5?5`Ud϶'͈p_MrɃ? 6fmͤZQl%e7!P,˼Hri>4⏋1ĠZb+"k)Q8 o1Q,4)u*i)UYfs$i ISQqݭ>ߴAPac -KۍR)1,e՜,=,e%= \VjRV+y=g P-ݞm40qx?/-3sOK K{εi{؇|7H, Fr"@:c8f$6!6m Djp!ׄKY.% f6K,p1t/ꢶFعl˛lSuun7&?DY#Vj].XP CM_toL-Ԃ۞aP0[<`Eq}gYb>k">J,P^q^tE͊NDV:F (Hno`iMw86VCT`1x/vN ZNO(Bᚑ6T3qD8Gr>Qpq p;$;Z<,rݱ&JYmeJ\᧨%vcYpU]'"u+ef?~VKvP](_{A-V0zV~KTGgQ#&Cv;Vl%qP0g2Zq@3xE2ι`+ \W,g?N{>x>}[$S bo s I:iQȎa!S\+3L靁4=(i||hsAE)'35wPo]se RY;ҍ0n7m !:}sG"P֝@v5!2 |i&Mz2XBVNQ 8qdP )$,y!TuB3Ũ,0f)"˝2>@FӇopm1FmQ?7{snRD 0#a4DTI&S,(cs^!U|y!m B)J]a T0`l0ZsΈp(^Oeoo1e] gT_Zp%ĖnD))'cs=*UnB1E_.76 cέ Zxi5hM,=,*PQ5Tuo/4 k]v99rwGpn7!ʹOe)3HtY -KKA؁,=,u;5g+V2KwjZ?7d1f)'nYIR>ߴ1gr,ղVAzhJg DX3Ho s눢A FSMŰs ,PD|j@vʏ;KAe)HD tVJ,=,E▥X ї"qja?,-K]}5iYʙ[VtcR8{դP9X_Z獙2ɭ,?#MBOWWl]jvzcq;~&bZ~}]Ov{Œ<|aQ"tdҊgB ^^m{,Z}pv鹻Oq)}fZp/0͚I-~*w= ȺNUn}ί+![iRo|5O>XC=qFkhr܂#e@SAjݨ>u7AN}VLMsD om47IUH_^T^GF6c[+]GJkdd%f-O\,Ŀ&n>5T 0gLM֔\vv$L|v7dnSȗKMlՉeL~p(eg^O>VeM,E&UGM~_-DT 'Oֽw⏷.gyD] ;t3^wghn>uzn7W{,>3x`J JPp0v Ww+}{ fd=IߖwlD%O t\0t:E a>5j>nE}oc Ju ް" j\ɎP"'֬ s!T{WpZ}k R Πcՠ7L[;IyOj.{/NxJd|8]= UGiZPZc{^ d|lRX -6Lɻs2tÔX4.Z԰Z@ko=u:h YH(v; 힠c7e7)Zq*xo $Buݰd [kBZNSs p@C8Y 9:Ɣ{; epA}4F=AпF߭L^>NjjIrlug`O6:SmX7H;%[MnmfǖZͪb͟F1,L٫K3 Uedc-_OaHSLUkWvch62v._Sv2e)XqkQnLӑJe9Wx[}6 -nO3\_0` x=ѿVt W@Ffy)vn&C>/?)Bh6".nQ',^ikALW5svT|ԉ1^\\L*N*x0V9uT uT"TŠ 3LP !tL!(Q+. j<6'py.P9w"<L #3c0A(81c#tB,T:cs ((%tpy_;HuzKb8OMm%$ʠl3Dms;lT4XjaM S-1Bd,ѦXsm 8eo,5oPZrTJ@%KKbUsAzO8##u ZKvҔ" 2ǩNMbYRpé0T&\,d_!% Tߧ u=UdH,tOϓKo90._[\z1u#V_ƚJJPN`exWPƉQQ4b KhV$MumBĨ,(2=PK!:Z 9όν\FgK(FaR{eԮgGr}~*xen3‹›!X2d5'{ -@oB!;xo7onev2-O.DBf<~?bxmZxDs12J‰ڱ*_A6)#JYw xn׀4xq/eZӕ|w[ иpa{?ƦjhvQu'\} y\-].q_Q46إX3! T+MBTC|0^ERIcG?M'iKRxKDQBl]ċҥxHuыw'5駕į+Xqn5;~r9ZNߎv~?/]~8EQ|։tv3MuBcjg0fv/dn5$i%i#˶T]9$tXXS}NnKv+H=K17t"MF #T Ĭ+ [vT0}x!=(Tbk_3i WZ8c<9) V2j5'JUY,xj#C찝`Or"V)O!;/c6,v e8Qd< ñSАD[cgW X1rP(tsӂ=\cũDo=ǔHE{Yoto|w3r7zY#8dۃ^};8Cۥz RLpy>5\}Toz{v)9s7B]VƺML*E<-֫cj^v[:'GnɩL吻 bƧ `.uk.usjh xIZRA:_r .ԥj~/EXԅP̘K^m9RRmJnnK])$¬.uՍwӭxԕ@ݲI]Z]˕&G]t+6&Τu:; vOuKrcCd}VRb j0թV%G* +R1 .t2KR(l /O{eC}y0 OBB-2T>ymcI΂[|qWө^geU4H Z:Ny,]<|(t2;b`xƐCC^(8tϧ ׂ8F)" Ta7'|t5h+W".UOy;Pb򣗡!BRPoz'xH;<9SPż{nH@w.bͼ|X%;x?ODJ[GdJ$DQ"I4_| fs!pSt&ޚS_4G1lGQnO|#LjZt^&<Ɇ ق@$ O* HM}Y ['lJ kHچ0J0E4I4vǒ~b+C/HO0RAR@q{ nbh "44 ?QVGtAuNgtd$J|<{pw~EۯߌzM|{'^BX*ֿ=Tse$c\4wr%jQ~i#4º>:1M_~Kʞ4W,Pbclmd~`\k3ٰg/6(wW/#CYDA;[T $%cYL6l'_nm1,uNpFcdHe0oV @d~n%ľ?B-iwMmxS /\\+OUԂ6c:0ܨks" k{57I~ѻMҹþ5k2ji,(B&$~:9ZH5+X&W]'tQ*P=5]@P=$mD||}t9G]. ]h]"T3]'==GL{T֑{eņ}Gb tc]Q: ݢyH="GXJ &(*;AQo&h;QtQҏkRb.~J5*%ԥ65IFI:fPR3l5QHG:̾iT> bm4)%jR-  ܓF%@iS,C)%Uc41$cZf 3DcEn 3"p&cm'ILAEJ9aR *R*tDņ%O=,{YUf!:vJ|L=~vb->knu~YY s5ƳwΜ fc:ݶPϓ, Rȵ)( T8!˟vn9N[_¾?)&'o|\cem熘u ASrG]ɥ&4~Xð\>O'yώX8 ǝC;WɪpaB(\uJG`NXJޮ9]R$vB7s뱅oV .gV ,MǖxA#<3ݟ: kԣN?3o:H^ Pgcl\r.-{wz[c3].Jۛc=5.Gb;&ߘnNBhgD6A1A3wl}@6A x!_܇rm՝LߏW~ \S=Y ;FwbF㙝nq@1e^vӇw?HI/կG+~~7Qva:v艻'c>{8E@d] !$ t`K5]u"|~yOP<+Cj^)N}%o~e_'Ke/(O`%?h|C -" itxN^^\J@.3K`Snf; םkRh6^i랴- QXwu ~8^Y})P8߬>})L$WGVͫs$ ^ kh]Rmb1v\mNHI|꒭'@ 2P[U!vXq=x* 4c)A@F1Ktu bpiXۥjAaBc/Q$V@h=BDnjq3ˢBW&@Hę*e!B\>4ys_fyӃ~\#f\Jࡒ J=~97J+.LoM]dzG?z$Wp}w'H:⚡)E[r޵8ztb> o*jʣ\C`R6RC^yBG%QZ&K" B|C.%S] ڐSG.EC+X\GI.t`|6Fd`NgFXJbi1D $@XV*"3 LqQf`R@ RKݐ.u _A"TExpj hQ̉3DS8qH"P(V3aBX ,X?{䶍e_vk)ZoUNdo26.ݚ[=x  $ŶmW~PppG(@A,PDʡhf@T94{EmR841t26Y>FƜ%dwĸwnMkhǥ]}z(Q7uO e==`O=YS[GyTLMg7fܵ[d .Ww#3UZ/FO*/ESlV^Wc2ϒtn>Y"Gڻ|x~\"3'd$d6Hpb$ :Md*P !(7ޮ63D̫>9#x,)@JZ|o<+/ E9 |jsoȩVww,lIYәE}V7^{us}Uݔ/s.vj|87͓7!mcF.R9x&8eȘaVVBAP=~V :l$=_u_B+3-Vr~YEl@N-o5SC[By*+C#Bs(JjO HrK{ /JƮ^73S ; h<-4,ƮU#gփY(aZ[m]]o5m "%=pKyn B䗧v]GkIBpeP#MCtxQlM4l2o%jך.+-Pӵ!!_Ȕ]=tDiX%EDۣ2+K r2٤ Dom&Zܑf^Yf1/|pfSׯ&r2? amK VQO'KI($:IR &JKH2W&;m`"P [lĒmegk?k{w#}no>` 'qsYUf5V釧 FC8AIϭ(9t߄p*3Kv>|QΓvA+唊=f(9̎|Ù'U|Y{KrZr8yʾl5K{^RacbK$5m;:Hh֤J;agZ ">% $8KA$`F#)\N/tP.zʦMXʡdR2>ꗵ.5A dWhp0ɉ7h;H L~7z ݍJoȁ?{x=5э_-ʂsq0tIR-@c*c"®X5TP!))KfYJ@J(ԩ"7)!'T%'?[)L$%L )$ Gɗza~H iŜ==͸`W啞mI,ݜ$LZ=wnG Ngjƺ` `L8CQ}u'`0(g~fH t+5}DK[mw˱ޙj):Y׆=88 @ ±M 䂎> Qǡv^4=>Kx(^hdfX!i cBid@i,2MT iRsac䶌~rp@RqDd _2qv}mV193Rc(^aɠ8al\yc ԑ ?F+jן=?pKzz@ӧ\5h?\Q1FRbh?\`H`,$)B87dX!0C!7u> ˾P[u8V~Yxe%Rt<ÉD&BNMuCH_yC`ePΥE Rj9XܧSnsY.X7k ev~ݲf?dLB=&Ry`!yio#J!R &A=?H]{sNEBz)R[z'OkJ5ܙnw{\CqJz\n;RkÛ{@K R \ ë4@u^JWRVC$㤻Ia[~vp*f]OBr[s&u9xܜ*|5BB50MJ 5J]oW+]F+u2J01znfxd4hd4PFn܀Q̫.y1B[_{'b]`*_ѝ!0}6K;\\أ ͂V,Y@fAНٝPץ'ktsֶv>!~:u&g2֋Hl/ N']/N1N]ҳ+TH.Q2EWcM1b6hZ j5]|@M׆|"#S[G>8.1mNC@Gpo.>KkCBpݗ)d OUxFeρqpw؁BE+4kd)O Nk,wfopLm ۘ_y̬ f1/~^h_hG̦lz$k/c|P ?*bSIM!Ez3u=bwЙARԐdMR+!ڽ&c2!g43˸n= 2. HI5"1@o 4vrJVScKeFiH`KPlq"M8Td"4M~Ƭ|`K;+S -)'o*)v;a!YjLLEv;duYh"uB!^yzh=oA^kv Jtv~gAb%X1= 9,_,rҷAְn 9u:ў5ͷ^2d6ͬgvYzegnJ &ko5PZsT+8]SyۊY,Z3jKT 2y:hu< =!zK=B}ﶅbʏ{vdRd]}e/NͻHVOqL &t0<DBoJJ!DSr7R#%R-P0zCk/GX%u |m˽Sr9$_n8fqRex`Ơ`nw9H$ꏒv%nƟϯ\W_ϸ8׏p>C)kU}cc}d}H Mp\37wjΈsЪY<>~x7/nn֐ )B:Gflk|.nfE2) $/~sJ"VD>C 6qga?Esc_d0FA ִ3IB)$]̈UOőqU)H$#jhD^WK?,N}Umh S;H/gkWM]M$?% D?r޳Eiϋb1=9^.rj"1bdXgK3m(:@XXS=%QHL2jM"hjkYyyʎR ?q\ qeh]qkk8v5l $g.. )AtIa.2j)Ẩneڢx)P &I/4:8]jR|\Outh@LBtYpJܚP3IHQ%CӚ3 wAo7o~{MCDW>Oggyd]5гnJfXhdڜ]4Z^hM F/2628%DQ?GɎ5IDYt@}SeXqz{{xڜ! {6>skף٠+Cw9ZjU .8ʠbiTu y or*e%H4L @7ӵqFX{gt71EmFctgLׯ=;woA}R"ILIh̜ůkknHŗ/C:+/NpX>NJ M\rMoM]Irx &Yϒ,^$˝ই@>+$;#Xbd1n J pn|djg aQy Gce*m;R4{W4kdbYru *5bgUqTOYfrjWkUf4EhkUҮ`PTb#@r$C8Zgvz7՚ +.ޅ-mo}϶ͷ7Ƿ}xsߋY_v{Gpx>#}ro#@0Q"TXxBR'ν[M5EBJ?iˠJRJVJ!%XXi5%Q GJBXcJHmUKxȖ3zqu֎>yIc'K$^[7f_y}Gn ⻧6ʱs'WQ/*tb:6f|F(c.W, 9K_AU!r)~ghؕX:_y;X .5-8ttUhȟ\EtJ Y:6K+5}GKwȏfPnKͨBC*zN1رe7JED y%`1Mb= J1(44[ ]Kv7e1&%"=(6Q0+UեYMFUR܌Alш]%Dct H΢f1Au 3S0]t 0_",N4^gm%r)uw!t 2t`PL@2~\r|H]܏gsP=}M](F8Xdol^!,~Dj^Jʹ@.ZƱJJƟ$ܡ?{N#gxlQi 3߮ހ"%NH2 cVMD'2>di6jl~cuU=[fo>(_}cN nw0ϱ1*0DtO;h˫,?n@Kl](ΪͤFj x צּjH0 vNX*HV@q.0PaT G߲4N94k)V589he5ڊ !9mZjrԞSK C}Z FfRZfmŝbk+ \b3ڎǸ1[1NX8x}mIbINo&M~&P"j¤׆uakjT\"` ., }.8bYɺ0^T)rm.^5T\U7wRZ.H~%."/r*kW]5T%jI 7ըUR}XkbXu1X,rpn\5Oͅźph2$q-F $uʪewvynztB$ vۡQufhoiLH| vͩht8E+.-RݯRq_ܔ&D*(G6R+RĨ'r,#i5 &70}74@ذ.Y<,4OqD+oUX]ᄏ۞ tUl8٪(`%eHH ,I8c)[$:Bؠ%Ԁ IiJ|ddjpn>lIa9limg6LKe5{sf*6<*ޮьbWzp"}īI/0z2@ODQ}Ueӻ&nw}Bgbdxt4d^sx6ƚ|twQ~`=uJ0Gҽ*pQAtSA$LBBa[uxe4ݢ^SWni.0'^r A86G/ Ǖ?~8@ZX6ԘiC8U(LjôRal p̅%\K}*[m6H[Mlչ趗ma4Jےb{lTt8Hk!=ɍXȶ›n=[]6C>"(!k Y 2d5=i%Pa;,džy+Taw {6{T1O'S ώQO6yj-¢vXzȗNko&MNw}".گZĻQO+`#a.eKoIx 毧L*W祙g}?3Q%8evWO&}.Ug׎p=Qc0~;=0Ðu#w%? ]ö`3Ng/bV: 0/JcY4l%Lj\O'm GlW]S{ ~Vh-IB *S(%X鮹6bCjo4bSZ6?·9]8֎;wd^ACg8ߋaB&]y~oxL0؁Vi~:0jpr9PA$ 0ܜhc:#bF 9(:Z+ SsHI, QBbcB2aFBBaMxX(`8ofoƏˉ@`"vޕ+r"myHIP8᱉LBJPRhG&!x]( 41w$ژ%sV㷃 >DhS(A iò`r[Mn&“nP-z$x/ 8 Vf¶UZ4M[<渥GҏʨoԃG*6w {X]ɧϊMw8BCh֌8V$G86"ysQ0^-d/b}${ϷP7ؑn{ff  _܋8 2!e:phZ&vPzra`MUN7ld+O h6R#**ѵUꜻ762Ljd޹Qѥ. k.:[X4dyPi!/ZwqL’v*p}LDZ*04Y?#}o@P$3>)u^,BVi7.+kŴB* XaZ " P3^wQmSMUq.*o֭ZAQ5d;0]C̭3/3ify-üRlV=XY8$@:B&B(4H"PQ,<ĒRB lHPm Ee}_Ug/CRѥ1!M27TT6/"}]ǍAEFx<u˯Xls[ fۧ~9vԵp`2+4j4F-<Ïi'@[#@0 GeZos(1?Qi2;!kQlěcSw'^/@9CQb-̀(L%#1J!?w~lc`k:/𥄬ӝפq+$ {%TZeQ~ҌHt 뽲dsF^IibXsMnk5BhXZl5i|Vbf!`#c]@m,V c6fHW.h{b7ۮ[q׽ϣz_l c?,c?nhGkyD?MtKLǟbwekVd/pRY_m'e6|;xSDZG"^ruˊV]O ;Id3o%Q+ lB1 Lx0,3JiTlM2Dy30auJ\ګ BsUNaUlPk$kL#4]AkL.Dx:Fb;˥ųRݴ4^ۭn }> [TuZj-MK͌@`.TiiJ5A[A[KvuSuRj :-mgʚ7_amwFT]Yb䈉nDX~,R e-22$nk:\RaT_9gstTJ 4mcPgm %Kw$O"[j֬ǚ >E@VJ :+gE>ܝ򶜯y,g_o>o61 HoO&ͯf%܆1\OL&%%*)# |⤝~k衴ux&&1|wZZԥsml9+z~} j1:2I7UͺvuJiT`w`S mf:N̨Wf~3VFByoNv4 R;M̨zSζng5Yi`J'f<h" q@9Zđz:y~yv]?O;}"yՇgin2T(NnY^ԥ#g;3&"/0K0ɿ„@}r]0jݛpbm߼n|}2 6OvBcńj(-|v)^QZg@~2 <~H bO0Fܝj̐(D 7Խ;T0VO~_@\Vؙj?-%o],O}*pn}\\MVWM>7FScmfs3F]Ly;esvs?1ZڼqЉU<(׍<V}FMe<)_aztZJG>'GZOFְǔ:)IbP B y&bSs pQpq&6O*3 }/˱50 /tt}X7DFn{:XUEBbFtc4xsgߞ.Sa!oD;۔ ?un^$zo'7/vcquĞm%/'g_|}f6Vd^;2_arGQL)eǙ]_Q >0ng=<}W.l+ f_/S;4Q%23P&1Zvr_;nr# cseq^f_תX{= џn]|*~"Xw?yrw7|}U4BX*)϶b-E]AJ1(%mưPC՘JJ3Ś vhI).A9pɃbȞYѴk#أK"M\:e{ RԿZ@uRN;aH1Qm½HIQ-x Ҹ*ηլ 0ԇ|(H᣿ uhoOQO}X >UM XDtw8EqL) ]2bdJ,M҂[U*#&Z$iZPͩ>>p+uD>eB`8VDZD?(ľC28Le+I1[RY.$E Uytjr cSUJYm._>@ kLb6_m ZHh\V+C t 3mh ,\5U[ѮpVJVZiAͤΛqF`D#uܨA6+m0SqtXx*.ئQ}jEiYw~?v-]we/ |qˋ_OrK$Ȗ+'vsy z2ԯlD/<^;a!oDlʖS<]<\(>nW<]xᩣB޸Tc:i薧)btp1gxW_9 tt}X76#"; ?9ܹ^Opi?ٵf2)%2r1e{נj{a # 3H.w؉L9V jǼR͸8vM-w|Ey}oJ9uҊju҃R؟ D=߁Ga±?qz 8l+f`ȯǴRPnVZQM:2v4V30t;'G}8HGC(s좱Y, x<՗TsBO:0ǗY9[:2H5_` ZtҒL,gR(\I4<$ - sȅTe,RNgygچ71 34Έ:MC禾$ sSȅ .hOɄG4C{%z3e%z3e .Q F`S{RՐD|0]#̮RrkUѡg/>gww5H+LHK̽,gg)VF(h&"%2_qf \ULcJ/l8b!_y.==B'72_F(!1}o%:CsHUh149&O!ـ:C3n`fj9TbcE%Y6*+_JbgΫTsjRBFSaTj(|gηp.dc;At=7OyOsGYءh*=W).Ll7zRsͽ"Jh憱Ƙϲ % h ޣ| %2Ker?_CU'?> өuTPuu2%H < Ci³) (E.I )2Q- 5H!jƱ6DM >U@Uy d)5'rh[vx@WЧIS;"GFsBpi4)Β4+$%' ՄBUI@FH@@: j!6Jc1=x{TP$no F@rlEڀ h|E: ~wTP0Qjj$5l@ L 'Q2S`"UCzf B CeTide5plOstOXߍ=ɓijHՓ>=$UJ(>|*(!?~~.?|~^7EoCn/NaΟ~M>noT\G?Ov&Ȕ3%BLo+v?2FӒIt BMLnV^[خ`S˭]MR t['P )t Zב/tf[\ᔻ!gzF&w{?v-` i8Z1ͮ'1?l\ kИwy3, {ROUݽ.G[d yȍŊ*,3T)#\ _Rb,Ʈ]'yh ͗~l͗ c|df RFz{҃|)WS4%_Y3*E:34=lܓNYgL|쫓{abj.Vy\17TK3n<`v-nZ?=]V#.n^=i"@}kݪWdt;[ ^-ۙj&:9h3ըc;BwtL/V ܢYPyL}<կ Ԏ7nA6eHQeI,ӅŌBFPtk y&dSDn{:\BbFt#4(}{pGOׇqlSF)qfml^7&}oqG)}l~n.L(.yUg I˟>>.1gHn^$6Nn_{pb__`}l>KV>;YkfԪa_U'#`:|6=.$}Gӛ ٸnȀ^1bk2d@ et5@WVzhWK1Վ3jSbmm+dm'l+*WCLUzGU)_r±~^\mBsm|oOfi̘[尴yxpַx1krXNYY0i&;~sm8IF̟|يIv_𑃯>^A ϵnWܗ U%Dʶ*EC*5nj!fHcbC}UZ 6~R19 )Uj\YJOQJI)o tWE9r}JN-jRʩPK,+@..MP!1nm$H~͡'Ʒ7Dfu6#hh˭i8 5YvK~GQ7b?}|krqJ 8(Hơ|,D1$%W =.QWE"("&zxZT]?+`rLUD7>3w"~Y]~$Yo,]EUPz4A-@ly*lhdJ(@e~:+kHHuiOLn`i#&9|gĽg赫{[Pݐ毟佉fV:i|,3NFɧ1O:nN62 "ZmoQ8FY1Jilf7ɗ׫ēoYVֱ_t>x;o fJ'CMXN %2x>`v]³?d2sSɹb?Dzh-e=WW{lϔoMvţ$rwX1.&,>H1Hզ,":^ +?I߉@&d懎!Vʎh.g`$9 .U+;KUWDCT<S%6@P#=ex)'LWq9R8>Ԃw6M 5{0xUZp= swOm2|YtV綮uy;{Lg΁nԫŷi#9>YWue`7ՁpEw\S3O|1 k+5aU833ǙKNO0=а605ͽb~'ȱrم y"H@ުli:M55}GMwѡU. !@uLR!ؓP_Z}Ӷ'I)cNJǔ}P_x O\J9hc s,֜RL#H Pus]jjK\v<K j,B K*Y7hĔ^$ w| C約IЁEuE\$GҾ Y('0eҋ5;;4;]Θ*oPŦ8}fI(G53C&RFI5U%E$Gb5\GXͤDxY#UB94\x &9 WhXGvkcDQ$:QQH4Kh8q@YI4A I"R!f஛0[xcs-WV|-$2GaLjD% E@-n/}D)h,h2R=w.gj}pxTp"ZiYCފbNtg緋_bюw<vVpv^AS=0GUޠ`Cۋ}Ve!P5͇)^_6f}A,Wi钲|Dd`_*Fr$,")0QעVH&,(5H;FT&SNܜИ6 !RVE|s4X,Ge[^u9kddqܮnM<.LݐiVo0'jB{Gj, VgnR5vU`k6|)\\QBd~{ca~8gVfcI_;=|y=|@Z@(DE"X(LH@Q aRфc:QHU^ȓyy9ʊ=\kM_t9Z)E&NkeqaЂ&n캗BcV0$v/axvU=R*֊wv!FIy]C@E#1Ua"h8h!Bр(DdG3vMËv< BN[M-'D̤ts&6f#oo_ve-GwfCG 8ŷK5y%4sty\LlY.F% bSQ6+aC,rm撺e@ 1z@9Hht,z;t~GwMD+mtW*@gɶ;g8jTgAÇ44_]ҿ] y[Tuk6+np]{.~7 +97;nIcEN#eb1؍qDce*TBD)ž=-sKqT+і;!`i-xQpW[ D:p4n9c*hΘXIb|µ.m]c##oCF_Hފ"Zfi<Tl Tnƫ g|iFy7WR><VݩMyY΃\LSKߛT['kTȾMkl:l˯n.E+ęaٮMDe(zx*YiJ(n^ ۈuF#U~ [ \w JN~2y-R̲w0QƘ5˖4T].j2'wnw`|Q?&ҨIjCi\սs}]QU Um@`jd{7Yd_Ƹ~،85'N/;"HLC|x]@:[(Brthsn}^9-cQ*;r1u=5QvDp)\?";{fxx\Oٳ [m4r+gtqjhvws? :w׽3ֹM3<ҫ@WvLE}I\-7#HB9=1A3^ӂ[sTSC@+z#ϤaB;Ra֏W߿5]7)q)bm)qP$3Ahnr<0Tgrd6-ZOv\6{:6X[zo7hxYp>6NjwoF\ w]S3O|q` S]iTn'ruߗde)ؔ.>>]*XY1kGBqm$SD ctipsh{, KMV隗:ttmH7.d*3}K''>cb}D*' */&AlC}5V5E[ܤ!#AHkPB_MK)nRiK9b&9zRq7-qǧ/R?8Ӥ0<83_qaϼl3_D.RD'pVPB!M'I)dFJUR5gJfdr );qɡ`ƜHESLH eP*Gsňs@AK%*L0X{vFsSI>|._C 0ߠY!A0=1b;ې;BqjT;wC^_KDݾr4/=?NFigDS1BInDo%ſ[N3zOKKJJs!9X}zt*4W }y6:%$)=NuF_Z@V(Jצy{Lٓ*lyyA<C!18e a d8#4fX*($AIpXYRVh i "^n]ňX'^Ѻ޿XTcY$~:_HZlL(LYoF%Ol3e8`$i 2JU6g 8nغ#{KM 3_d!շA_S{ l[ R SmdçJ/j  G,dדS8.:HT6RplcKHT ݲk67dmty*'aGF\J> , Fŵ` h|%\bIRp h˽+;H{ 6Fz`6> -msAq~C9o3Ӈo*@nīPQZNӗw eō^5{*\5TT(fe> 'E})y92|0֧OjU+9PbmWm0-sઅjr$0A&dmw`!gݏzq;ybQ3 A"©Q"@DHP!aS": |A4DcmzˮlRd-ra/&&QTHKREHx'(Q9PMD C’*@2h?x1)e;d`*p%kQ!qG|WAhj1P<ҩ:L¿*H `G$V8 Kq P8B$@'T cjEgiػmHgڴߙrS~ڮ4iEyd bD4\ϲ ǿe>tG5U0xᣖr "ۼ(02]IŁr.4i̅9-b(Y][7{W.vEĸ\ùčcsH_w<*D<=g_u 6b1$ᖹwU"!e:KJ;9^2#8̦A9k. N)w@{t~3!"*TͻʿydZ!?- |p`sgwrڇlgƒB0dtcFRo7Yʾ/+3D!D2jTR0gzkcZ 1|d̀4Մ/ KEރbڜ Tu4֗P g`I,L8u*!b)V@hb8'l:aQ(H Vev3U0cH3cM{~aG>hQHz-yz r4[[Ôq~s>&}r,0 C~Eު@Nu@EET Lo%8^fM (6 %O^~W #v=&'sl3n/z?>]|F4wOZ>Nkn{%G/v2i1/#yjS )_DM1@ hFC:FCYRѐFCzB&əqZ J[@ ́ǢcC$-B3I.#wnՓ=w#nnvYٍin̻[-Ƞw#n!'9W=w.VGTq(v(6]I }L{5|ZA z3^{g sKCiu/ .8M@&:-G@ u'5Q ɻ]v5A}9&ԍ3jC-d̲>8fnЖ?-Œ6oIg^%Xȋ  &%ܤp9^QͭZ.nb|Q>wg|=KS0W/,}?h:Ǯyy2|O7&[RvcOi⮛ReQ)9#qߍM;+-͏)TfLyiGBqm$StTipsh{, 4]% ELYjk:A4]SzQ9|4OԸM5o$ڐo\Dd116Gn5D1co0%H.eS)#@(`0F"JQb! IdjڄiX鹎.ӽWADBcS0&%U1!`LHV\bD?G20 \8 W3Yw\qҊkM,Dg_ Z\h;NcOi+^݇OVӇIy!Qj ]%w!fVd#0 l`0l81r^)m`ϗ{jSRSr$%ޕ4+x Hl $:{nsC&X-b^OBRRUQ—HM9m~G<ӕ!JY oذ仁O5-;3{:2z/:A7hgXlhfI~HMS'^.- =H~xcyUqN <7pTVna|M= Qy͡KP2g! ăd`>ФJa|rA6*7Mg{$ EC/aꨁ";M6Qh*^$B #0s݇JfPALA'HkC{AyR6N:>Dvt5ϠpRM8ѵ[ RqMj')EcNyll{hPijBÞԍ |s1ݾcw+V$݊dV$xq,5B7Z.ΠC[Qmml٨&},ƹ)[[u`hI% H)\ј$lKϕY(L&O!?5?L?]|Gy_6x}xyؙh=z NO1p @&s##p{GABv Z(y EwEJҕ$("}>-؀ -Œr^&Q$ dEw&zeת8+c[_Ub{Cпo8қ%zhGF\TU;±mad9Gr au/~g2yVUz?ԮgE}_LJrU,1y2~|F5c,ԉ2Q˸m4G'Iwl$!Nҩa6vzv)8Zi0->dz":IQzY#tFb Y۔ =SщZw'Eu)z"m_8jmsU8{susu)x6Ғ`,>aQAr%%KLP j)KeZiV*ds->IL8- ^46>9OcvjLs绀PeH]tǺnE+'u F#9 jz84Qbځ\ 䞡qĈ"E0hD:elcs+rt _NXwaW^Y;W[kp؛?4mQ,G/kl-uO~ ;ljnqqA9/{d4KM￾[㿥+'Mċ{nߢZo~g2yVt#InCoJ!cӔ>ܕ2W~҇ss FaS; ,wtGOyݝ>$\"b'}'\R$ QQ6\ȈXXy~1 ` `*<2Da[`6JR$r 8[eD} [Ї&:gysf H>:M^G{F!6[Ur~sE1͂>qkD&lyvTBA#z,%^/ձ %8t%8\ ue}!No;_]UuDlt=#tKdkY8 l\W/(2΍GeOsf:y[$ ELhSEh3wLؽ) @/0 sh]*쇹q県aY33G+{㴕FMͫؐT(6cQmkۊܺ@փC~=@,N|IE"k0lGK%ɕ Ds3y]"%fJ$ THVGӻ!;]@j!9_.Yб2"!"ay )F2М/ \,.RPfշs9ˊ3tX}a /ӻ"Caܙ_sbM]`y5Aʨb\B)@ 9<Ll΃Sϓe1sĄ𰫍m?ŷ@<#ҙ[Z6{ Y0}xXh ˵`?җLy,ZQPȏP-u RX1$ӷ8D T1]IO2.-!Mn#XTjxXgvW˸$K l-e x*igqe#B9(ܯ_WU-^}W hFU7%2_hM'pbfz50qjEXI|+ߴW,ƙIH{a ٥D'/:4K2|\I׌Lc楤0hDv/]ȅ0bx p͓5ZC<QRuqJ&J*}0^ՈWM8v8gi4 EL 2%HӇ8(v8hRNU~-`kC %"+.#ٰPӃAEA0#o&g䂉ĿB~5yc.~d8~/djq6B{2#55=y*|T$OљU<8 h{C,QQtjow3_Riٯ<$E)t:Br["g\$X6u_Xsէץx"aOCY˙ FdVnP$FD*:Nqvi^KK$7,)= <>Ob3CPy!)aTPAz}riEei3 c5(HڵzFJUbO 4' ޓ͑fBR-p4mJ90p;$Rakm"ǞUƦJŷc֞*R(ȗb\7ٟo%=bPp}LQ"rʎJYeа~S$pLضi]ivE&ð/& E2),4)jpvUrL>-=7, UeL(#WrDxjLo]@ -|v7U LGO3lrNRR%M)EyZyXxD(,p'l&2[kh2,[|؂pS}aFR?R!9'B)D(QY0 Ny4f4,T` ~J`}> )(%Z;I\tEb(C͡A~/6(ᰕ˹2XuTQKgKOJ)"T.k%D HMrL9np-6 oKBX)~&xߟWB !8FiW8E!V#&. C&VMVeܭ"K ʓ?NnV3RƔKD3 >i-Au=)aZ1M{ò+Jc`f|> JJPA4X aF!c[$0@!Ir 6Yg-M$VBBUTeWr(!1K42NK| ZEre1沱L/obŨT 1O_' \d(yC#fVG)/1{bC@['5%8jR-Л 4b|7.wQgNtb;0<ƛ2a ?z8Ii dg{Y5󞼌5:(~#'h4><#z{=j@-nGE.a|~^Ҵ[/hѪ,. ` 挾phJ2~- tTto[I0D.XHPUVJJT &!r/—\?{Qes$3Eqz8k'zު'bi۞s zDcZ!=#k`4F@ýA(~zql8(Ksۺ/Qj)A=[vq/Ez6 l>*Mڃ`7B[ ӐJKX,ƐKqZuJ:A_Ci;}b}0Vi_'1]#eFw3kK&p @?R q>R!1&"ׅg2'4ܬa63[d3[RvI0pdI;P8wbQ$<+%㣡Z u3jw騟0FT*IV#TbxF8K[h3K۬+n]\\+zb@$FxGBE>8"q 齗&2f+B̒FRG^Jy2S*>f_8=t_,`P LB0hn/|kId$}4F.XZkP0ڑCUB~ղ.@ny~Ɲojp.AWܸ3VrHjx7LYo5n?ɣI;wF hշ~ B'EbOޜ_MF'ߝ-6G&Ɍ˴%f"C&cL1=NEѩkEW]b?;YM-b@}׹\EbK ]oγt ״ȴ2uiok A}p,)JC^XZ<@U*ܢ`=>Pb:{`bɺX%Y-TM=$V4wwffߝu3׊هz4 j JY?˽B+?LP3\Ą41,&wn>wgDR MC)5*ڪvjWo='73():eT!/Ocv#?✕ ߖtެ_{r̤+8qH.`|$UwAf/$x}W0CV ~,D$0oK>&74ÅM Tx$Wq65#RBReP⡟G).?H: KhGa<V' ug4L^er xZW\olu׽[ J$ź. ݤ0LJ}Y]ߣ|:ˈ'Y/MѤ~iCZGC\jިg{nHOkx[d2#dzܚOX4)xX3RT~U0AG>WECZb5tf E,=+^Z8<)wv</XLrck,)Kn2B`ML0/)xpG(~{Ss12(]WʸKʓ)[%_o74JYW?)yv.Gl[K-QA=F&Ԁ1*\=ིEGdpRN?|i;r|j=Ct튲 4HڅLirȥ"m$Y!oY}A383`Bߘx"[]&,?ՔlS&%Q )J/je RL)\jptΞmx|V TTi&}VIw!8%T4x) {sfR3Bp5V!W!`,PS(AYIGO"8d]H0T=Yǃkuz,|.EZ/3~?$ݽ0_>XV˻_<OkF u(I,>{yA]ۏ$5 l)"w$hqիԤO .'/zC;ɍPT荖X/O&6=/wWdr>ga*uٛwC0iMiUSr)sF.W Yx[rCrT;ߴSt+<ܜi Tո(S:tt8st l dx)|& b4mT s`z\2Z\/ԐUktKf^?qȐgQTL3!= IVw0fbYaA{xwȣ"qNQ4y1բ`A>1xKAuC-;FB98bG;\ueU^ӷ8ZtGn?:A;C'sRd$jU8ԙgdWL7꒨[ H:CIrAU&q8aØa\1 x^3p ; PjOoqθǗdlW~5QrCx#JTQ1Egm1&1+ODF3 6ef:36"MYuw+MٮKTl ű۵V4[4Y)&J!Y*v֧}؛Thkp¤"P6p}?Rg7{݆cŅXPzFqZ\Bh:\kV[YR6iD-'oKB]*O4֣qV];UNib:?k߳t4A,]S&Oiyiyc/mXSJtgW/<(;mT`=%iw[6"h=S?97gT8 h8Gdh2F4P E֊6̅A{PP~:V1NPh@ Χ`ncSF_#sLȵ44ˣ撺<>r,eǩO1pLm̯z>bI|N%ku9^ZД_ʹ0;ub1u{ۘy!Y3W n*EJb\p[kS*#AU_ҕ9i4Ӝg$aΕV+8vo!XsZayGu4ԡ',F9<[>:d6pv3qN7PSPJ=@a 0A(PL ) h=asQ@4}4;5zh' B6sgk)͌ &e٦'5? 6mk'r)s?9:% I 3>(c@MU:Üesbm5EaWRI{F[k +{5hlYQ>],?ǿlI&={v6?2|8Kdm˖MHVHܖޣO.Gs!ni6O(}l϶\o)y{r,ZlUlhJwa/֩梴~:Os|iKʫi))`3`FF_٧8-0pu9^"7-Ltx>%+\&LfVDk^YgotĈ1z ԹϽӈ"ZFm1 2 u6Hwv$櫎i#?"2g[Z!RkJ%jv%4P APQ* %*f1`T\4fkwir. _K##^"S>`B}H35xi كL C SCT_Q-Hij?ƩjZE:-J[҇XZOZzZ* +eP6WCe+P:?S uþرaZ Ȓݟ#GqoA4M'ɘ^a,ՏqT#"4!ZԲ̚s%$ݩtl)5ouv/٥~{y1AFxV$k/'R"o!zכfP`BݻzP}xE&KuRM}BqT2*"ئ$5!F{K59P)v?x7=TK@x3龔P9t"`|uF^_c OH$Q[-ud].&vd]D;uB.#B.OHs(-ʫ~:iMڅV KhO_Jj =3AѲ%I-І-B8bkHHKY]P*eNq]<$e:Eggo 6_U):%Ta`m4 h*PM@x2 WG_Qmf1{ܺ )TM"~/A'vh'/כZOD];ߍ_nd,8Q-pv.{;^\Oexg#i}7,u?E*w= Xx|yk./~o"]l3V|Uy3-*5 A={(bczm۪] ؿNK0;PAL%uݍu!`|5rKxZ^*)|2-nLاF仠iAnw?WҔҒOC<~r}GmCq;'^Q3:c_QdlXNߖ۸5jltXr̄V΀I/1J\+!\E睟Y:`Zt5ģ5|G-Kw0 ˉڟk\ҵ!\ETnW=NJ5K*ju,!# J5?Vҵ!\Etbr'MŦRYS}N5(NYGu_GKS)ȋ!h{SsZ^K[uJ_%TR=*J+in׃RagDcm$8-ۇj"lDԤ6=ѶSook1}c,k)$] eŜ6R"=Y-Tr6u c.%ѡaҿ0bYooOq1;ѷ/q"L's IWD%n=s|OApw*((M J2x…N4gTS$O!E)dBͨ0(۹ sXeadw:L4%C8O`2mQD 2iF4yiʬW<š2MЎ ^*-O1IV9E c% 0rDL_˜cFJg;bb@DrD/dK/݃g A8FOσ7PB۴O9l[W3Fid*sMȴĩ.X4gE!Jsi&Am12υ;3W "f0yI5Ru1#J91CR-r  wMFY!Z$l.d^](?NﺚUVmX)p̒/S󭧗YwX̷Fk?bHx|uѼ {Y0t+ek  80+Mp?$1cN7%طADy bq YYV `b-C#60oĬ\ s%CT+5 HC,K L7?TIs H xȂ';,QZds#ή[4Ȱ`P.8jl`$2vzgý{AR7[^Bб87T괛loffs@?'̬J 㿏 z+#˅(f:V +'Zs:t1S h;⺃ﴽ;~2ҨJ$48 >ZZ@!PO! "!l>5tmo{7_~نV$I|YI6Vc)Jdƅ,aE*0ɵb6('FPv"GP@ 쀹mDɎ;UF1U'^ lU'a"H oQk(ib$X5 7[,CFp$<6TKLIul[m1k1.OP[0= {1r46]rL5CtfĚ-ʟn+X?X?$Wx&f-&g J( H!RZ@㖫}԰ہmFF)JB8JTr잲"I)H"Ms- ͘G CUfaK)L`Fa$jaY%տGc,8}q=S$r88!r8 J/y`V2]Y=nva2̊04ʅ/3m#$twPa=vn{hhh!Dfp*.{XyO о=Z[2EVLYURbQB5yFCئ i (fv3m9r*7OR&va>{i;oͨoް y&cS]OjpU11B<]L ޙ隰7nm nʆ~ۢc O'0Py0t%P_!j 4fqfcv=,P go#`Rin>1ϝ̕r\dlTӴ  yVl='V>cӎK_7;Bd}2_\+cf<*m~ۨk.FKa>:iF>'CY%{KU(Nn;Sh h'8Fz蘅=*q8|u$J%N#UR=*qjV1J) fPbCtcBZ1%}nB|9Uf9-XG0ąi!-R_lwɇrJC>s췄t6mQ0cƙ5Ed[#v VePbrG7e/INQ@&L-k>+}X5n^q\+.XXAcS3ZY?7amvN) V?$~zV;Jo꩹a?m jxr#ev5&%5]RS:˒=1f%U&5*` Ǹa\<][b(p {2' H d PhCnUJ"d`I)n6fxX_w"AH 20IG rp,pDBB,c1L6;gVeIe:"Qyp6:g%y҄E) ㊜3-,gx>*o }Z*QRyU)T[}|Cn>.?~|V4/?><_#1cw{d~ûQq7JR]3qwU}2/|Xf&s gDaMUFH/]Iq;H]C2(} \_1Fm:x*P(NJ)X:ܚ6۹XD9v8|#_yLAl*(rAmw'Ĭu,xj,b^,؊Sbai[s=Am>չ6?Ɇuҹ[:~e_Y=ݹ~eFD#u  Aq$NA ]X€ MYk6~*$Xv,AH"\5讼l(ZyS+ИSI<3a^y dy2&.y(E (/)dPh!!)9R(E)ɼ,1 HafjJ`5fL?E:i>`?+TqA8s+DNQY*5WR->"xI&p TqҜ E36C.ΔBy3VĚ*2"̼ mZzd #`P.A$a&6u0Rv}s.%vjt:^E@(/ !b )&Jd"IS5Efio&XLh%C ePT)21 NlHG3aJv1Jm,7 3wS!goCY*'lp[+Ŏ06R:Xp~P 5Sb}myPK(EF~IF5+4w%<("r Cb~y'/`s}f})u,M0ϋ օ]WUϖzSzSʊلU/LYI`}5Zys=!t>zweIqU}8³a,!gvv^k_FwfYg<ޢpXZ=.Kޟ ]|g:FPRf)7ݗ{Lĸ B(3\?6Zamʗ5rBq}/d܃Q @SdOL6U Le@9nvNʳ *۰pv,|sl "ZVݒ89Ȧl~?W*4;9ac6.dl1fUMhצ#檃.O5qZATG<קZI1֡lR-AĽ4?/PA>=O9V:6}6ݪmtOe;,i~zϪ.wdٿ8Zcʾ<{gLԞ~͈{0BYwmP*r[LLJm̺|rq܌q)Uv=tbcFcxV Vn-L˞.~5a!oDlJDw<lpkXyQ!ѯ¢3O5a!oDl 192.168.126.11:17697: read: connection reset by peer" start-of-body= Mar 12 13:20:59 crc kubenswrapper[4125]: I0312 13:20:59.704617 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47506->192.168.126.11:17697: read: connection reset by peer" Mar 12 13:20:59 crc kubenswrapper[4125]: I0312 13:20:59.705353 4125 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47518->192.168.126.11:17697: read: connection reset by peer" start-of-body= Mar 12 13:20:59 crc kubenswrapper[4125]: I0312 13:20:59.705426 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47518->192.168.126.11:17697: read: connection reset by peer" Mar 12 13:20:59 crc kubenswrapper[4125]: I0312 13:20:59.804897 4125 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Mar 12 13:20:59 crc kubenswrapper[4125]: I0312 13:20:59.804976 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Mar 12 13:20:59 crc kubenswrapper[4125]: W0312 13:20:59.963066 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:20:59Z is after 2025-06-26T12:47:18Z Mar 12 13:20:59 crc kubenswrapper[4125]: E0312 13:20:59.963155 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:20:59Z is after 2025-06-26T12:47:18Z Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.417118 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:00Z is after 2025-06-26T12:47:18Z Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.531931 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/0.log" Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.540510 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="9806c660ce1b507ceb49e8487c0307ff49366e4d1f2190e69f6df2d56cb605e9" exitCode=255 Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.540596 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"9806c660ce1b507ceb49e8487c0307ff49366e4d1f2190e69f6df2d56cb605e9"} Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.541534 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.543980 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.544282 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.544304 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:00 crc kubenswrapper[4125]: I0312 13:21:00.547622 4125 scope.go:117] "RemoveContainer" containerID="9806c660ce1b507ceb49e8487c0307ff49366e4d1f2190e69f6df2d56cb605e9" Mar 12 13:21:01 crc kubenswrapper[4125]: E0312 13:21:01.146783 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:01Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:01 crc kubenswrapper[4125]: I0312 13:21:01.348763 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:01 crc kubenswrapper[4125]: I0312 13:21:01.350797 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:01 crc kubenswrapper[4125]: I0312 13:21:01.350977 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:01 crc kubenswrapper[4125]: I0312 13:21:01.351002 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:01 crc kubenswrapper[4125]: I0312 13:21:01.351045 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:01 crc kubenswrapper[4125]: E0312 13:21:01.357656 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:01Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:01 crc kubenswrapper[4125]: I0312 13:21:01.379443 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:01Z is after 2025-06-26T12:47:18Z Mar 12 13:21:02 crc kubenswrapper[4125]: E0312 13:21:02.172779 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.553445 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/0.log" Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.556341 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69"} Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.556477 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.557759 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.557802 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.557859 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:02 crc kubenswrapper[4125]: I0312 13:21:02.601186 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:02Z is after 2025-06-26T12:47:18Z Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.279555 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.279663 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.279777 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.279985 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.281762 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.281902 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.281924 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.284603 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"7205a6fa1483eb6319289da874dccc045a02b491aadb03f8eeaf2f732d1a9165"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.285317 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://7205a6fa1483eb6319289da874dccc045a02b491aadb03f8eeaf2f732d1a9165" gracePeriod=30 Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.381120 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:03Z is after 2025-06-26T12:47:18Z Mar 12 13:21:03 crc kubenswrapper[4125]: W0312 13:21:03.414869 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:03Z is after 2025-06-26T12:47:18Z Mar 12 13:21:03 crc kubenswrapper[4125]: E0312 13:21:03.415025 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:03Z is after 2025-06-26T12:47:18Z Mar 12 13:21:03 crc kubenswrapper[4125]: W0312 13:21:03.505942 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:03Z is after 2025-06-26T12:47:18Z Mar 12 13:21:03 crc kubenswrapper[4125]: E0312 13:21:03.506019 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:03Z is after 2025-06-26T12:47:18Z Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.561552 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/1.log" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.563498 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/0.log" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.569087 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" exitCode=255 Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.569144 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69"} Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.569198 4125 scope.go:117] "RemoveContainer" containerID="9806c660ce1b507ceb49e8487c0307ff49366e4d1f2190e69f6df2d56cb605e9" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.569388 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.570871 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.570917 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.570932 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.572198 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:03 crc kubenswrapper[4125]: E0312 13:21:03.572562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.581151 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/0.log" Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.581983 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="7205a6fa1483eb6319289da874dccc045a02b491aadb03f8eeaf2f732d1a9165" exitCode=255 Mar 12 13:21:03 crc kubenswrapper[4125]: I0312 13:21:03.582087 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"7205a6fa1483eb6319289da874dccc045a02b491aadb03f8eeaf2f732d1a9165"} Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.185095 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.384284 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:04Z is after 2025-06-26T12:47:18Z Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.595332 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/0.log" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.596572 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"db362b1e057f0470cd5943bb3ee51a9a56041ade8bb719799d8d16e38edf3cc0"} Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.596661 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.599060 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.599183 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.599378 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.602317 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/1.log" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.606226 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.607896 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.608039 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.608074 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.611093 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:04 crc kubenswrapper[4125]: E0312 13:21:04.612646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:04 crc kubenswrapper[4125]: I0312 13:21:04.762487 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.382364 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:05Z is after 2025-06-26T12:47:18Z Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.610323 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.610430 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.613612 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.613759 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.613794 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.616279 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:05 crc kubenswrapper[4125]: E0312 13:21:05.617112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.617257 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.617286 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.617310 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:05 crc kubenswrapper[4125]: I0312 13:21:05.625373 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:06 crc kubenswrapper[4125]: I0312 13:21:06.385290 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:06Z is after 2025-06-26T12:47:18Z Mar 12 13:21:06 crc kubenswrapper[4125]: I0312 13:21:06.615191 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:06 crc kubenswrapper[4125]: I0312 13:21:06.617094 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:06 crc kubenswrapper[4125]: I0312 13:21:06.617359 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:06 crc kubenswrapper[4125]: I0312 13:21:06.617541 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:06 crc kubenswrapper[4125]: I0312 13:21:06.620591 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:06 crc kubenswrapper[4125]: E0312 13:21:06.621641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:07 crc kubenswrapper[4125]: I0312 13:21:07.386388 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:07Z is after 2025-06-26T12:47:18Z Mar 12 13:21:07 crc kubenswrapper[4125]: I0312 13:21:07.520101 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:21:07 crc kubenswrapper[4125]: I0312 13:21:07.520309 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:07 crc kubenswrapper[4125]: I0312 13:21:07.522219 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:07 crc kubenswrapper[4125]: I0312 13:21:07.522355 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:07 crc kubenswrapper[4125]: I0312 13:21:07.522625 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:07 crc kubenswrapper[4125]: E0312 13:21:07.657382 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:07Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:21:08 crc kubenswrapper[4125]: E0312 13:21:08.155503 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:08Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.358684 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.360359 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.360445 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.360496 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.360536 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:08 crc kubenswrapper[4125]: E0312 13:21:08.365528 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:08Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.380040 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:08Z is after 2025-06-26T12:47:18Z Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.624675 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.625096 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.627128 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.627261 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.627293 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:08 crc kubenswrapper[4125]: I0312 13:21:08.630519 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:08 crc kubenswrapper[4125]: E0312 13:21:08.631584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:09 crc kubenswrapper[4125]: I0312 13:21:09.382630 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:09Z is after 2025-06-26T12:47:18Z Mar 12 13:21:10 crc kubenswrapper[4125]: I0312 13:21:10.279493 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:21:10 crc kubenswrapper[4125]: I0312 13:21:10.279951 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:10 crc kubenswrapper[4125]: I0312 13:21:10.282348 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:10 crc kubenswrapper[4125]: I0312 13:21:10.282447 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:10 crc kubenswrapper[4125]: I0312 13:21:10.282478 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:10 crc kubenswrapper[4125]: I0312 13:21:10.384402 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:10Z is after 2025-06-26T12:47:18Z Mar 12 13:21:11 crc kubenswrapper[4125]: I0312 13:21:11.385612 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:11Z is after 2025-06-26T12:47:18Z Mar 12 13:21:11 crc kubenswrapper[4125]: I0312 13:21:11.519010 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:21:11 crc kubenswrapper[4125]: E0312 13:21:11.529274 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:11Z is after 2025-06-26T12:47:18Z Mar 12 13:21:11 crc kubenswrapper[4125]: E0312 13:21:11.529457 4125 certificate_manager.go:440] kubernetes.io/kube-apiserver-client-kubelet: Reached backoff limit, still unable to rotate certs: timed out waiting for the condition Mar 12 13:21:12 crc kubenswrapper[4125]: E0312 13:21:12.174118 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:21:12 crc kubenswrapper[4125]: I0312 13:21:12.383084 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:12Z is after 2025-06-26T12:47:18Z Mar 12 13:21:12 crc kubenswrapper[4125]: W0312 13:21:12.451000 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:12Z is after 2025-06-26T12:47:18Z Mar 12 13:21:12 crc kubenswrapper[4125]: E0312 13:21:12.451227 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:12Z is after 2025-06-26T12:47:18Z Mar 12 13:21:13 crc kubenswrapper[4125]: I0312 13:21:13.280596 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:21:13 crc kubenswrapper[4125]: I0312 13:21:13.280976 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:21:13 crc kubenswrapper[4125]: I0312 13:21:13.380030 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:13Z is after 2025-06-26T12:47:18Z Mar 12 13:21:14 crc kubenswrapper[4125]: W0312 13:21:14.067388 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:14Z is after 2025-06-26T12:47:18Z Mar 12 13:21:14 crc kubenswrapper[4125]: E0312 13:21:14.067536 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:14Z is after 2025-06-26T12:47:18Z Mar 12 13:21:14 crc kubenswrapper[4125]: I0312 13:21:14.386023 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:14Z is after 2025-06-26T12:47:18Z Mar 12 13:21:15 crc kubenswrapper[4125]: E0312 13:21:15.164197 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:15Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:15 crc kubenswrapper[4125]: I0312 13:21:15.366428 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:15 crc kubenswrapper[4125]: I0312 13:21:15.369804 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:15 crc kubenswrapper[4125]: I0312 13:21:15.370076 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:15 crc kubenswrapper[4125]: I0312 13:21:15.370115 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:15 crc kubenswrapper[4125]: I0312 13:21:15.370172 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:15 crc kubenswrapper[4125]: E0312 13:21:15.378987 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:15Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:15 crc kubenswrapper[4125]: I0312 13:21:15.381403 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:15Z is after 2025-06-26T12:47:18Z Mar 12 13:21:16 crc kubenswrapper[4125]: I0312 13:21:16.383244 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:16Z is after 2025-06-26T12:47:18Z Mar 12 13:21:17 crc kubenswrapper[4125]: I0312 13:21:17.384338 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:17Z is after 2025-06-26T12:47:18Z Mar 12 13:21:17 crc kubenswrapper[4125]: E0312 13:21:17.667132 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:17Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:21:18 crc kubenswrapper[4125]: I0312 13:21:18.381968 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:18Z is after 2025-06-26T12:47:18Z Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.025591 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.028378 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.028528 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.028563 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.031104 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.385296 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:19Z is after 2025-06-26T12:47:18Z Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.685114 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/1.log" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.689141 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad"} Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.689311 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.690443 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.690503 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:19 crc kubenswrapper[4125]: I0312 13:21:19.690523 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:19 crc kubenswrapper[4125]: W0312 13:21:19.913551 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:19Z is after 2025-06-26T12:47:18Z Mar 12 13:21:19 crc kubenswrapper[4125]: E0312 13:21:19.913642 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:19Z is after 2025-06-26T12:47:18Z Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.381398 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:20Z is after 2025-06-26T12:47:18Z Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.698035 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/2.log" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.699288 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/1.log" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.704166 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad" exitCode=255 Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.704260 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad"} Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.704419 4125 scope.go:117] "RemoveContainer" containerID="e412857329820a1f7cc1e4502f3eccaa2089e4072ae6db1bf1fd1ccfb72bbc69" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.704622 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.706640 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.706709 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.706765 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:20 crc kubenswrapper[4125]: I0312 13:21:20.710945 4125 scope.go:117] "RemoveContainer" containerID="c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad" Mar 12 13:21:20 crc kubenswrapper[4125]: E0312 13:21:20.712309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:21 crc kubenswrapper[4125]: I0312 13:21:21.379108 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:21Z is after 2025-06-26T12:47:18Z Mar 12 13:21:21 crc kubenswrapper[4125]: I0312 13:21:21.712778 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/2.log" Mar 12 13:21:22 crc kubenswrapper[4125]: E0312 13:21:22.174063 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:22Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:22 crc kubenswrapper[4125]: E0312 13:21:22.175054 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:21:22 crc kubenswrapper[4125]: I0312 13:21:22.380117 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:22 crc kubenswrapper[4125]: I0312 13:21:22.382297 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:22 crc kubenswrapper[4125]: I0312 13:21:22.382481 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:22 crc kubenswrapper[4125]: I0312 13:21:22.382533 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:22 crc kubenswrapper[4125]: I0312 13:21:22.382601 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:22 crc kubenswrapper[4125]: I0312 13:21:22.382651 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:22Z is after 2025-06-26T12:47:18Z Mar 12 13:21:22 crc kubenswrapper[4125]: E0312 13:21:22.390653 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:22Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:23 crc kubenswrapper[4125]: I0312 13:21:23.279652 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:21:23 crc kubenswrapper[4125]: I0312 13:21:23.279943 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:21:23 crc kubenswrapper[4125]: I0312 13:21:23.383861 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:23Z is after 2025-06-26T12:47:18Z Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.185003 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.185200 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.187418 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.187703 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.187783 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.190901 4125 scope.go:117] "RemoveContainer" containerID="c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad" Mar 12 13:21:24 crc kubenswrapper[4125]: E0312 13:21:24.191636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:24 crc kubenswrapper[4125]: I0312 13:21:24.382102 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:24Z is after 2025-06-26T12:47:18Z Mar 12 13:21:25 crc kubenswrapper[4125]: I0312 13:21:25.383283 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:25Z is after 2025-06-26T12:47:18Z Mar 12 13:21:26 crc kubenswrapper[4125]: W0312 13:21:26.074267 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:26Z is after 2025-06-26T12:47:18Z Mar 12 13:21:26 crc kubenswrapper[4125]: E0312 13:21:26.074424 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:26Z is after 2025-06-26T12:47:18Z Mar 12 13:21:26 crc kubenswrapper[4125]: I0312 13:21:26.382367 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:26Z is after 2025-06-26T12:47:18Z Mar 12 13:21:27 crc kubenswrapper[4125]: I0312 13:21:27.382604 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:27Z is after 2025-06-26T12:47:18Z Mar 12 13:21:27 crc kubenswrapper[4125]: E0312 13:21:27.676334 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:27Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.384253 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:28Z is after 2025-06-26T12:47:18Z Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.447722 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.448028 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.449457 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.449544 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.449560 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.626084 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.626451 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.628223 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.628273 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.628432 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:28 crc kubenswrapper[4125]: I0312 13:21:28.631096 4125 scope.go:117] "RemoveContainer" containerID="c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad" Mar 12 13:21:28 crc kubenswrapper[4125]: E0312 13:21:28.632650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:29 crc kubenswrapper[4125]: E0312 13:21:29.183457 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:29Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:29 crc kubenswrapper[4125]: I0312 13:21:29.382111 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:29Z is after 2025-06-26T12:47:18Z Mar 12 13:21:29 crc kubenswrapper[4125]: I0312 13:21:29.390936 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:29 crc kubenswrapper[4125]: I0312 13:21:29.393686 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:29 crc kubenswrapper[4125]: I0312 13:21:29.393991 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:29 crc kubenswrapper[4125]: I0312 13:21:29.394047 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:29 crc kubenswrapper[4125]: I0312 13:21:29.394264 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:29 crc kubenswrapper[4125]: E0312 13:21:29.401597 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:29Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:30 crc kubenswrapper[4125]: I0312 13:21:30.382529 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:30Z is after 2025-06-26T12:47:18Z Mar 12 13:21:31 crc kubenswrapper[4125]: I0312 13:21:31.383609 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:31Z is after 2025-06-26T12:47:18Z Mar 12 13:21:31 crc kubenswrapper[4125]: I0312 13:21:31.408609 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:21:31 crc kubenswrapper[4125]: I0312 13:21:31.408925 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:21:31 crc kubenswrapper[4125]: I0312 13:21:31.408985 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:21:31 crc kubenswrapper[4125]: I0312 13:21:31.409051 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:21:31 crc kubenswrapper[4125]: I0312 13:21:31.409108 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:21:32 crc kubenswrapper[4125]: E0312 13:21:32.176333 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:21:32 crc kubenswrapper[4125]: I0312 13:21:32.383611 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:32Z is after 2025-06-26T12:47:18Z Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.279129 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.279381 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.279503 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.279984 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.283068 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.283194 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.283230 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.287064 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"db362b1e057f0470cd5943bb3ee51a9a56041ade8bb719799d8d16e38edf3cc0"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.287897 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://db362b1e057f0470cd5943bb3ee51a9a56041ade8bb719799d8d16e38edf3cc0" gracePeriod=30 Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.383897 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:33Z is after 2025-06-26T12:47:18Z Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.777604 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/1.log" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.779731 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/0.log" Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.780475 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="db362b1e057f0470cd5943bb3ee51a9a56041ade8bb719799d8d16e38edf3cc0" exitCode=255 Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.780550 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"db362b1e057f0470cd5943bb3ee51a9a56041ade8bb719799d8d16e38edf3cc0"} Mar 12 13:21:33 crc kubenswrapper[4125]: I0312 13:21:33.780603 4125 scope.go:117] "RemoveContainer" containerID="7205a6fa1483eb6319289da874dccc045a02b491aadb03f8eeaf2f732d1a9165" Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.382610 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:34Z is after 2025-06-26T12:47:18Z Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.790526 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/1.log" Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.794733 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"0558473c01f993ce455f1c37f700d50b8eb36a3a54afcff7192c0669afaa68d4"} Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.795049 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.796696 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.796750 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:34 crc kubenswrapper[4125]: I0312 13:21:34.796776 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:35 crc kubenswrapper[4125]: I0312 13:21:35.384758 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:35Z is after 2025-06-26T12:47:18Z Mar 12 13:21:35 crc kubenswrapper[4125]: I0312 13:21:35.800050 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:35 crc kubenswrapper[4125]: I0312 13:21:35.802145 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:35 crc kubenswrapper[4125]: I0312 13:21:35.802386 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:35 crc kubenswrapper[4125]: I0312 13:21:35.802426 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:36 crc kubenswrapper[4125]: E0312 13:21:36.191149 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:36Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:36 crc kubenswrapper[4125]: I0312 13:21:36.382146 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:36Z is after 2025-06-26T12:47:18Z Mar 12 13:21:36 crc kubenswrapper[4125]: I0312 13:21:36.401774 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:36 crc kubenswrapper[4125]: I0312 13:21:36.404236 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:36 crc kubenswrapper[4125]: I0312 13:21:36.404582 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:36 crc kubenswrapper[4125]: I0312 13:21:36.404661 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:36 crc kubenswrapper[4125]: I0312 13:21:36.404721 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:36 crc kubenswrapper[4125]: E0312 13:21:36.413161 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:36Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:37 crc kubenswrapper[4125]: I0312 13:21:37.381650 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:37Z is after 2025-06-26T12:47:18Z Mar 12 13:21:37 crc kubenswrapper[4125]: I0312 13:21:37.519702 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:21:37 crc kubenswrapper[4125]: I0312 13:21:37.520125 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:37 crc kubenswrapper[4125]: I0312 13:21:37.522158 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:37 crc kubenswrapper[4125]: I0312 13:21:37.522359 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:37 crc kubenswrapper[4125]: I0312 13:21:37.522395 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:37 crc kubenswrapper[4125]: E0312 13:21:37.684403 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:37Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:21:38 crc kubenswrapper[4125]: I0312 13:21:38.385296 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:38Z is after 2025-06-26T12:47:18Z Mar 12 13:21:39 crc kubenswrapper[4125]: I0312 13:21:39.382003 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:39Z is after 2025-06-26T12:47:18Z Mar 12 13:21:40 crc kubenswrapper[4125]: I0312 13:21:40.279140 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:21:40 crc kubenswrapper[4125]: I0312 13:21:40.279592 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:40 crc kubenswrapper[4125]: I0312 13:21:40.282954 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:40 crc kubenswrapper[4125]: I0312 13:21:40.283506 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:40 crc kubenswrapper[4125]: I0312 13:21:40.283770 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:40 crc kubenswrapper[4125]: I0312 13:21:40.381515 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:40Z is after 2025-06-26T12:47:18Z Mar 12 13:21:40 crc kubenswrapper[4125]: W0312 13:21:40.479420 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:40Z is after 2025-06-26T12:47:18Z Mar 12 13:21:40 crc kubenswrapper[4125]: E0312 13:21:40.479573 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:40Z is after 2025-06-26T12:47:18Z Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.024979 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.026681 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.026980 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.027022 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.029452 4125 scope.go:117] "RemoveContainer" containerID="c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.378675 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:41Z is after 2025-06-26T12:47:18Z Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.843114 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/2.log" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.850494 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5"} Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.850998 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.853192 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.853318 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:41 crc kubenswrapper[4125]: I0312 13:21:41.853450 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:42 crc kubenswrapper[4125]: E0312 13:21:42.177504 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.383590 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:42Z is after 2025-06-26T12:47:18Z Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.859739 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/3.log" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.861965 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/2.log" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.866803 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" exitCode=255 Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.866985 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5"} Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.867069 4125 scope.go:117] "RemoveContainer" containerID="c69e0cbc6232ddf9b9b854f1d10e670dc1d0ed3cea2fd0ea0af003d0012e4fad" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.867318 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.870139 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.870224 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.870256 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:42 crc kubenswrapper[4125]: I0312 13:21:42.872550 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:21:42 crc kubenswrapper[4125]: E0312 13:21:42.873374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:43 crc kubenswrapper[4125]: E0312 13:21:43.199006 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:43Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.279390 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.279575 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.383620 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:43Z is after 2025-06-26T12:47:18Z Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.414027 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.416282 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.416404 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.416443 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.416502 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:43 crc kubenswrapper[4125]: E0312 13:21:43.424292 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:43Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.532045 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:21:43 crc kubenswrapper[4125]: E0312 13:21:43.541028 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:43Z is after 2025-06-26T12:47:18Z Mar 12 13:21:43 crc kubenswrapper[4125]: I0312 13:21:43.876987 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/3.log" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.185308 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.185634 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.187731 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.187984 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.188022 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.192297 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:21:44 crc kubenswrapper[4125]: E0312 13:21:44.197171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:44 crc kubenswrapper[4125]: I0312 13:21:44.378728 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:44Z is after 2025-06-26T12:47:18Z Mar 12 13:21:45 crc kubenswrapper[4125]: I0312 13:21:45.382500 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:45Z is after 2025-06-26T12:47:18Z Mar 12 13:21:46 crc kubenswrapper[4125]: I0312 13:21:46.379723 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:46Z is after 2025-06-26T12:47:18Z Mar 12 13:21:47 crc kubenswrapper[4125]: I0312 13:21:47.379708 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:47Z is after 2025-06-26T12:47:18Z Mar 12 13:21:47 crc kubenswrapper[4125]: E0312 13:21:47.693750 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:47Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.381059 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:48Z is after 2025-06-26T12:47:18Z Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.624600 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.625034 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.627293 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.628130 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.628392 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:48 crc kubenswrapper[4125]: I0312 13:21:48.631267 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:21:48 crc kubenswrapper[4125]: E0312 13:21:48.632426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:21:49 crc kubenswrapper[4125]: I0312 13:21:49.382692 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:49Z is after 2025-06-26T12:47:18Z Mar 12 13:21:50 crc kubenswrapper[4125]: E0312 13:21:50.211711 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:50Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:50 crc kubenswrapper[4125]: I0312 13:21:50.380053 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:50Z is after 2025-06-26T12:47:18Z Mar 12 13:21:50 crc kubenswrapper[4125]: I0312 13:21:50.425452 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:50 crc kubenswrapper[4125]: I0312 13:21:50.427631 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:50 crc kubenswrapper[4125]: I0312 13:21:50.427762 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:50 crc kubenswrapper[4125]: I0312 13:21:50.427799 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:50 crc kubenswrapper[4125]: I0312 13:21:50.427995 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:50 crc kubenswrapper[4125]: E0312 13:21:50.433130 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:50Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:51 crc kubenswrapper[4125]: I0312 13:21:51.382260 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:51Z is after 2025-06-26T12:47:18Z Mar 12 13:21:52 crc kubenswrapper[4125]: E0312 13:21:52.179288 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:21:52 crc kubenswrapper[4125]: I0312 13:21:52.383072 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:52Z is after 2025-06-26T12:47:18Z Mar 12 13:21:53 crc kubenswrapper[4125]: I0312 13:21:53.280091 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:21:53 crc kubenswrapper[4125]: I0312 13:21:53.280291 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:21:53 crc kubenswrapper[4125]: I0312 13:21:53.382540 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:53Z is after 2025-06-26T12:47:18Z Mar 12 13:21:54 crc kubenswrapper[4125]: I0312 13:21:54.383359 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:54Z is after 2025-06-26T12:47:18Z Mar 12 13:21:55 crc kubenswrapper[4125]: W0312 13:21:55.093094 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:55Z is after 2025-06-26T12:47:18Z Mar 12 13:21:55 crc kubenswrapper[4125]: E0312 13:21:55.093280 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:55Z is after 2025-06-26T12:47:18Z Mar 12 13:21:55 crc kubenswrapper[4125]: I0312 13:21:55.383254 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:55Z is after 2025-06-26T12:47:18Z Mar 12 13:21:55 crc kubenswrapper[4125]: W0312 13:21:55.644521 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:55Z is after 2025-06-26T12:47:18Z Mar 12 13:21:55 crc kubenswrapper[4125]: E0312 13:21:55.645597 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:55Z is after 2025-06-26T12:47:18Z Mar 12 13:21:56 crc kubenswrapper[4125]: I0312 13:21:56.383087 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:56Z is after 2025-06-26T12:47:18Z Mar 12 13:21:57 crc kubenswrapper[4125]: E0312 13:21:57.220295 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:57Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:21:57 crc kubenswrapper[4125]: I0312 13:21:57.383132 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:57Z is after 2025-06-26T12:47:18Z Mar 12 13:21:57 crc kubenswrapper[4125]: I0312 13:21:57.433801 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:21:57 crc kubenswrapper[4125]: I0312 13:21:57.436701 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:21:57 crc kubenswrapper[4125]: I0312 13:21:57.436997 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:21:57 crc kubenswrapper[4125]: I0312 13:21:57.437043 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:21:57 crc kubenswrapper[4125]: I0312 13:21:57.437095 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:21:57 crc kubenswrapper[4125]: E0312 13:21:57.446407 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:57Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:21:57 crc kubenswrapper[4125]: E0312 13:21:57.702247 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:57Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:21:58 crc kubenswrapper[4125]: I0312 13:21:58.382522 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:58Z is after 2025-06-26T12:47:18Z Mar 12 13:21:59 crc kubenswrapper[4125]: I0312 13:21:59.384048 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:21:59Z is after 2025-06-26T12:47:18Z Mar 12 13:22:00 crc kubenswrapper[4125]: I0312 13:22:00.025420 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:00 crc kubenswrapper[4125]: I0312 13:22:00.029420 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:00 crc kubenswrapper[4125]: I0312 13:22:00.029581 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:00 crc kubenswrapper[4125]: I0312 13:22:00.029614 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:00 crc kubenswrapper[4125]: I0312 13:22:00.032316 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:22:00 crc kubenswrapper[4125]: E0312 13:22:00.033211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:22:00 crc kubenswrapper[4125]: I0312 13:22:00.382262 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:00Z is after 2025-06-26T12:47:18Z Mar 12 13:22:01 crc kubenswrapper[4125]: I0312 13:22:01.383317 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:01Z is after 2025-06-26T12:47:18Z Mar 12 13:22:02 crc kubenswrapper[4125]: E0312 13:22:02.179514 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:22:02 crc kubenswrapper[4125]: I0312 13:22:02.382076 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:02Z is after 2025-06-26T12:47:18Z Mar 12 13:22:03 crc kubenswrapper[4125]: W0312 13:22:03.211396 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:03Z is after 2025-06-26T12:47:18Z Mar 12 13:22:03 crc kubenswrapper[4125]: E0312 13:22:03.211545 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:03Z is after 2025-06-26T12:47:18Z Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.279498 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.279655 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.279732 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.280089 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.282585 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.283108 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.283151 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.286558 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"0558473c01f993ce455f1c37f700d50b8eb36a3a54afcff7192c0669afaa68d4"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.287435 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://0558473c01f993ce455f1c37f700d50b8eb36a3a54afcff7192c0669afaa68d4" gracePeriod=30 Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.384195 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:03Z is after 2025-06-26T12:47:18Z Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.984072 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/2.log" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.985416 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/1.log" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.988893 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="0558473c01f993ce455f1c37f700d50b8eb36a3a54afcff7192c0669afaa68d4" exitCode=255 Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.988999 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"0558473c01f993ce455f1c37f700d50b8eb36a3a54afcff7192c0669afaa68d4"} Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.989051 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"923fd6af2adaa24fd45060a55803a82ab3b7ecd49bf83bb2d2af759943abd4f9"} Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.989085 4125 scope.go:117] "RemoveContainer" containerID="db362b1e057f0470cd5943bb3ee51a9a56041ade8bb719799d8d16e38edf3cc0" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.989347 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.992158 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.992622 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:03 crc kubenswrapper[4125]: I0312 13:22:03.992660 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:04 crc kubenswrapper[4125]: E0312 13:22:04.234093 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:04Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.383125 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:04Z is after 2025-06-26T12:47:18Z Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.447658 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.450391 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.450494 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.450517 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.450553 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:04 crc kubenswrapper[4125]: E0312 13:22:04.458426 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:04Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:04 crc kubenswrapper[4125]: I0312 13:22:04.999465 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/2.log" Mar 12 13:22:05 crc kubenswrapper[4125]: I0312 13:22:05.394121 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:05Z is after 2025-06-26T12:47:18Z Mar 12 13:22:06 crc kubenswrapper[4125]: I0312 13:22:06.381668 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:06Z is after 2025-06-26T12:47:18Z Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.026441 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.029106 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.029203 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.029232 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.380188 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:07Z is after 2025-06-26T12:47:18Z Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.519802 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.520244 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.522712 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.522778 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:07 crc kubenswrapper[4125]: I0312 13:22:07.522929 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:07 crc kubenswrapper[4125]: E0312 13:22:07.709958 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:07Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:08 crc kubenswrapper[4125]: I0312 13:22:08.383183 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:08Z is after 2025-06-26T12:47:18Z Mar 12 13:22:09 crc kubenswrapper[4125]: I0312 13:22:09.386520 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:09Z is after 2025-06-26T12:47:18Z Mar 12 13:22:10 crc kubenswrapper[4125]: I0312 13:22:10.278350 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:22:10 crc kubenswrapper[4125]: I0312 13:22:10.278730 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:10 crc kubenswrapper[4125]: I0312 13:22:10.281326 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:10 crc kubenswrapper[4125]: I0312 13:22:10.281463 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:10 crc kubenswrapper[4125]: I0312 13:22:10.281497 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:10 crc kubenswrapper[4125]: I0312 13:22:10.384352 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:10Z is after 2025-06-26T12:47:18Z Mar 12 13:22:11 crc kubenswrapper[4125]: E0312 13:22:11.241289 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:11Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:11 crc kubenswrapper[4125]: I0312 13:22:11.386202 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:11Z is after 2025-06-26T12:47:18Z Mar 12 13:22:11 crc kubenswrapper[4125]: I0312 13:22:11.459523 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:11 crc kubenswrapper[4125]: I0312 13:22:11.462930 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:11 crc kubenswrapper[4125]: I0312 13:22:11.463343 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:11 crc kubenswrapper[4125]: I0312 13:22:11.463550 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:11 crc kubenswrapper[4125]: I0312 13:22:11.463748 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:11 crc kubenswrapper[4125]: E0312 13:22:11.476454 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:11Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:12 crc kubenswrapper[4125]: I0312 13:22:12.026236 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:12 crc kubenswrapper[4125]: I0312 13:22:12.028592 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:12 crc kubenswrapper[4125]: I0312 13:22:12.028693 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:12 crc kubenswrapper[4125]: I0312 13:22:12.028725 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:12 crc kubenswrapper[4125]: I0312 13:22:12.032064 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:22:12 crc kubenswrapper[4125]: E0312 13:22:12.032994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:22:12 crc kubenswrapper[4125]: E0312 13:22:12.180056 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:22:12 crc kubenswrapper[4125]: I0312 13:22:12.380297 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:12Z is after 2025-06-26T12:47:18Z Mar 12 13:22:13 crc kubenswrapper[4125]: I0312 13:22:13.278531 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:22:13 crc kubenswrapper[4125]: I0312 13:22:13.278753 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:22:13 crc kubenswrapper[4125]: I0312 13:22:13.379911 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:13Z is after 2025-06-26T12:47:18Z Mar 12 13:22:14 crc kubenswrapper[4125]: W0312 13:22:14.350525 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:14Z is after 2025-06-26T12:47:18Z Mar 12 13:22:14 crc kubenswrapper[4125]: E0312 13:22:14.350707 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:14Z is after 2025-06-26T12:47:18Z Mar 12 13:22:14 crc kubenswrapper[4125]: I0312 13:22:14.382138 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:14Z is after 2025-06-26T12:47:18Z Mar 12 13:22:15 crc kubenswrapper[4125]: I0312 13:22:15.381689 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:15Z is after 2025-06-26T12:47:18Z Mar 12 13:22:15 crc kubenswrapper[4125]: I0312 13:22:15.531928 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:22:15 crc kubenswrapper[4125]: E0312 13:22:15.540386 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:15Z is after 2025-06-26T12:47:18Z Mar 12 13:22:16 crc kubenswrapper[4125]: I0312 13:22:16.382996 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:16Z is after 2025-06-26T12:47:18Z Mar 12 13:22:17 crc kubenswrapper[4125]: I0312 13:22:17.383199 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:17Z is after 2025-06-26T12:47:18Z Mar 12 13:22:17 crc kubenswrapper[4125]: E0312 13:22:17.719173 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:17Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:18 crc kubenswrapper[4125]: E0312 13:22:18.248702 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:18Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:18 crc kubenswrapper[4125]: I0312 13:22:18.381975 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:18Z is after 2025-06-26T12:47:18Z Mar 12 13:22:18 crc kubenswrapper[4125]: I0312 13:22:18.478153 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:18 crc kubenswrapper[4125]: I0312 13:22:18.480970 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:18 crc kubenswrapper[4125]: I0312 13:22:18.481188 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:18 crc kubenswrapper[4125]: I0312 13:22:18.481246 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:18 crc kubenswrapper[4125]: I0312 13:22:18.481314 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:18 crc kubenswrapper[4125]: E0312 13:22:18.492789 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:18Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:19 crc kubenswrapper[4125]: I0312 13:22:19.382788 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:19Z is after 2025-06-26T12:47:18Z Mar 12 13:22:20 crc kubenswrapper[4125]: I0312 13:22:20.383550 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:20Z is after 2025-06-26T12:47:18Z Mar 12 13:22:21 crc kubenswrapper[4125]: I0312 13:22:21.026375 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:21 crc kubenswrapper[4125]: I0312 13:22:21.028775 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:21 crc kubenswrapper[4125]: I0312 13:22:21.029319 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:21 crc kubenswrapper[4125]: I0312 13:22:21.029677 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:21 crc kubenswrapper[4125]: I0312 13:22:21.381987 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:21Z is after 2025-06-26T12:47:18Z Mar 12 13:22:22 crc kubenswrapper[4125]: E0312 13:22:22.180640 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:22:22 crc kubenswrapper[4125]: I0312 13:22:22.382216 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:22Z is after 2025-06-26T12:47:18Z Mar 12 13:22:23 crc kubenswrapper[4125]: I0312 13:22:23.279729 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:22:23 crc kubenswrapper[4125]: I0312 13:22:23.281122 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:22:23 crc kubenswrapper[4125]: I0312 13:22:23.381318 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:23Z is after 2025-06-26T12:47:18Z Mar 12 13:22:24 crc kubenswrapper[4125]: I0312 13:22:24.382929 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:24Z is after 2025-06-26T12:47:18Z Mar 12 13:22:25 crc kubenswrapper[4125]: E0312 13:22:25.257527 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:25Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:25 crc kubenswrapper[4125]: I0312 13:22:25.381607 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:25Z is after 2025-06-26T12:47:18Z Mar 12 13:22:25 crc kubenswrapper[4125]: I0312 13:22:25.493675 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:25 crc kubenswrapper[4125]: I0312 13:22:25.496361 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:25 crc kubenswrapper[4125]: I0312 13:22:25.496473 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:25 crc kubenswrapper[4125]: I0312 13:22:25.496504 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:25 crc kubenswrapper[4125]: I0312 13:22:25.496551 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:25 crc kubenswrapper[4125]: E0312 13:22:25.501637 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:25Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:26 crc kubenswrapper[4125]: I0312 13:22:26.381938 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:26Z is after 2025-06-26T12:47:18Z Mar 12 13:22:27 crc kubenswrapper[4125]: I0312 13:22:27.026204 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:27 crc kubenswrapper[4125]: I0312 13:22:27.028931 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:27 crc kubenswrapper[4125]: I0312 13:22:27.028992 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:27 crc kubenswrapper[4125]: I0312 13:22:27.029009 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:27 crc kubenswrapper[4125]: I0312 13:22:27.030501 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:22:27 crc kubenswrapper[4125]: I0312 13:22:27.380540 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:27Z is after 2025-06-26T12:47:18Z Mar 12 13:22:27 crc kubenswrapper[4125]: E0312 13:22:27.727757 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:27Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:27 crc kubenswrapper[4125]: E0312 13:22:27.728028 4125 event.go:294] "Unable to write event (retry limit exceeded!)" event="&Event{ObjectMeta:{crc.189c1aa0d66b7a8a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,LastTimestamp:2026-03-12 13:20:31.371590282 +0000 UTC m=+1.694977331,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:27 crc kubenswrapper[4125]: E0312 13:22:27.735165 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:27Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.121701 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/3.log" Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.127804 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4"} Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.128788 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.131544 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.131674 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.131707 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:28 crc kubenswrapper[4125]: I0312 13:22:28.382531 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:28Z is after 2025-06-26T12:47:18Z Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.137562 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/4.log" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.139282 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/3.log" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.144686 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" exitCode=255 Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.144792 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4"} Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.145007 4125 scope.go:117] "RemoveContainer" containerID="92fad5df074c4ce035edad43f9966644cf8f710ece6bfdaa4545765c0c8131a5" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.145331 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.148001 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.148328 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.148410 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.151524 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:22:29 crc kubenswrapper[4125]: E0312 13:22:29.152472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:22:29 crc kubenswrapper[4125]: I0312 13:22:29.380766 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:29Z is after 2025-06-26T12:47:18Z Mar 12 13:22:30 crc kubenswrapper[4125]: I0312 13:22:30.025706 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:30 crc kubenswrapper[4125]: I0312 13:22:30.028740 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:30 crc kubenswrapper[4125]: I0312 13:22:30.029050 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:30 crc kubenswrapper[4125]: I0312 13:22:30.029071 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:30 crc kubenswrapper[4125]: I0312 13:22:30.152643 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/4.log" Mar 12 13:22:30 crc kubenswrapper[4125]: I0312 13:22:30.381739 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:30Z is after 2025-06-26T12:47:18Z Mar 12 13:22:30 crc kubenswrapper[4125]: W0312 13:22:30.893802 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:30Z is after 2025-06-26T12:47:18Z Mar 12 13:22:30 crc kubenswrapper[4125]: E0312 13:22:30.894059 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:30Z is after 2025-06-26T12:47:18Z Mar 12 13:22:31 crc kubenswrapper[4125]: I0312 13:22:31.381509 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:31Z is after 2025-06-26T12:47:18Z Mar 12 13:22:31 crc kubenswrapper[4125]: I0312 13:22:31.409434 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:22:31 crc kubenswrapper[4125]: I0312 13:22:31.409539 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:22:31 crc kubenswrapper[4125]: I0312 13:22:31.409567 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:22:31 crc kubenswrapper[4125]: I0312 13:22:31.409619 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:22:31 crc kubenswrapper[4125]: I0312 13:22:31.409653 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:22:32 crc kubenswrapper[4125]: E0312 13:22:32.182245 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:22:32 crc kubenswrapper[4125]: E0312 13:22:32.262265 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:32Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:32 crc kubenswrapper[4125]: I0312 13:22:32.382702 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:32Z is after 2025-06-26T12:47:18Z Mar 12 13:22:32 crc kubenswrapper[4125]: I0312 13:22:32.502891 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:32 crc kubenswrapper[4125]: I0312 13:22:32.505796 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:32 crc kubenswrapper[4125]: I0312 13:22:32.506038 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:32 crc kubenswrapper[4125]: I0312 13:22:32.506071 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:32 crc kubenswrapper[4125]: I0312 13:22:32.506310 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:32 crc kubenswrapper[4125]: E0312 13:22:32.514008 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:32Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.278966 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.279161 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.279248 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.279496 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.282700 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.282946 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.282983 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.287004 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"923fd6af2adaa24fd45060a55803a82ab3b7ecd49bf83bb2d2af759943abd4f9"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.287781 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://923fd6af2adaa24fd45060a55803a82ab3b7ecd49bf83bb2d2af759943abd4f9" gracePeriod=30 Mar 12 13:22:33 crc kubenswrapper[4125]: I0312 13:22:33.381691 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:33Z is after 2025-06-26T12:47:18Z Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.185188 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.185203 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/3.log" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.185402 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.187614 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.187675 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.187698 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.187971 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/2.log" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.189240 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:22:34 crc kubenswrapper[4125]: E0312 13:22:34.189627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.191554 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="923fd6af2adaa24fd45060a55803a82ab3b7ecd49bf83bb2d2af759943abd4f9" exitCode=255 Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.191663 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"923fd6af2adaa24fd45060a55803a82ab3b7ecd49bf83bb2d2af759943abd4f9"} Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.191772 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3"} Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.192002 4125 scope.go:117] "RemoveContainer" containerID="0558473c01f993ce455f1c37f700d50b8eb36a3a54afcff7192c0669afaa68d4" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.192252 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.194004 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.194198 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.194237 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:34 crc kubenswrapper[4125]: I0312 13:22:34.381687 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:34Z is after 2025-06-26T12:47:18Z Mar 12 13:22:35 crc kubenswrapper[4125]: I0312 13:22:35.200440 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/3.log" Mar 12 13:22:35 crc kubenswrapper[4125]: I0312 13:22:35.382329 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:35Z is after 2025-06-26T12:47:18Z Mar 12 13:22:35 crc kubenswrapper[4125]: E0312 13:22:35.639405 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:35Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:36 crc kubenswrapper[4125]: I0312 13:22:36.383751 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:36Z is after 2025-06-26T12:47:18Z Mar 12 13:22:37 crc kubenswrapper[4125]: I0312 13:22:37.382575 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:37Z is after 2025-06-26T12:47:18Z Mar 12 13:22:37 crc kubenswrapper[4125]: I0312 13:22:37.519752 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:22:37 crc kubenswrapper[4125]: I0312 13:22:37.520277 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:37 crc kubenswrapper[4125]: I0312 13:22:37.522669 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:37 crc kubenswrapper[4125]: I0312 13:22:37.522898 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:37 crc kubenswrapper[4125]: I0312 13:22:37.522940 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.382058 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:38Z is after 2025-06-26T12:47:18Z Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.625286 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.625717 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.628574 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.628716 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.628753 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:38 crc kubenswrapper[4125]: I0312 13:22:38.631298 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:22:38 crc kubenswrapper[4125]: E0312 13:22:38.632265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:22:38 crc kubenswrapper[4125]: W0312 13:22:38.688422 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:38Z is after 2025-06-26T12:47:18Z Mar 12 13:22:38 crc kubenswrapper[4125]: E0312 13:22:38.688562 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:38Z is after 2025-06-26T12:47:18Z Mar 12 13:22:39 crc kubenswrapper[4125]: E0312 13:22:39.269960 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:39Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:39 crc kubenswrapper[4125]: I0312 13:22:39.382688 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:39Z is after 2025-06-26T12:47:18Z Mar 12 13:22:39 crc kubenswrapper[4125]: I0312 13:22:39.514955 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:39 crc kubenswrapper[4125]: I0312 13:22:39.517434 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:39 crc kubenswrapper[4125]: I0312 13:22:39.517553 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:39 crc kubenswrapper[4125]: I0312 13:22:39.517588 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:39 crc kubenswrapper[4125]: I0312 13:22:39.517642 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:39 crc kubenswrapper[4125]: E0312 13:22:39.525399 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:39Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:40 crc kubenswrapper[4125]: I0312 13:22:40.278375 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:22:40 crc kubenswrapper[4125]: I0312 13:22:40.278658 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:40 crc kubenswrapper[4125]: I0312 13:22:40.280954 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:40 crc kubenswrapper[4125]: I0312 13:22:40.281212 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:40 crc kubenswrapper[4125]: I0312 13:22:40.281249 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:40 crc kubenswrapper[4125]: I0312 13:22:40.382275 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:40Z is after 2025-06-26T12:47:18Z Mar 12 13:22:41 crc kubenswrapper[4125]: I0312 13:22:41.382462 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:41Z is after 2025-06-26T12:47:18Z Mar 12 13:22:42 crc kubenswrapper[4125]: E0312 13:22:42.183165 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:22:42 crc kubenswrapper[4125]: I0312 13:22:42.382274 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:42Z is after 2025-06-26T12:47:18Z Mar 12 13:22:43 crc kubenswrapper[4125]: I0312 13:22:43.279179 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:22:43 crc kubenswrapper[4125]: I0312 13:22:43.279396 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:22:43 crc kubenswrapper[4125]: I0312 13:22:43.382286 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:43Z is after 2025-06-26T12:47:18Z Mar 12 13:22:44 crc kubenswrapper[4125]: I0312 13:22:44.386384 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:44Z is after 2025-06-26T12:47:18Z Mar 12 13:22:45 crc kubenswrapper[4125]: I0312 13:22:45.388384 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:45Z is after 2025-06-26T12:47:18Z Mar 12 13:22:45 crc kubenswrapper[4125]: E0312 13:22:45.648550 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:45Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:46 crc kubenswrapper[4125]: E0312 13:22:46.278981 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:46Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:46 crc kubenswrapper[4125]: I0312 13:22:46.383720 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:46Z is after 2025-06-26T12:47:18Z Mar 12 13:22:46 crc kubenswrapper[4125]: I0312 13:22:46.526426 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:46 crc kubenswrapper[4125]: I0312 13:22:46.529671 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:46 crc kubenswrapper[4125]: I0312 13:22:46.529930 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:46 crc kubenswrapper[4125]: I0312 13:22:46.529992 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:46 crc kubenswrapper[4125]: I0312 13:22:46.530046 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:46 crc kubenswrapper[4125]: E0312 13:22:46.538317 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:46Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:47 crc kubenswrapper[4125]: I0312 13:22:47.382903 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:47Z is after 2025-06-26T12:47:18Z Mar 12 13:22:47 crc kubenswrapper[4125]: I0312 13:22:47.532196 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:22:47 crc kubenswrapper[4125]: E0312 13:22:47.540625 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:47Z is after 2025-06-26T12:47:18Z Mar 12 13:22:48 crc kubenswrapper[4125]: I0312 13:22:48.383701 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:48Z is after 2025-06-26T12:47:18Z Mar 12 13:22:49 crc kubenswrapper[4125]: I0312 13:22:49.381763 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:49Z is after 2025-06-26T12:47:18Z Mar 12 13:22:50 crc kubenswrapper[4125]: I0312 13:22:50.383204 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:50Z is after 2025-06-26T12:47:18Z Mar 12 13:22:51 crc kubenswrapper[4125]: I0312 13:22:51.383020 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:51Z is after 2025-06-26T12:47:18Z Mar 12 13:22:52 crc kubenswrapper[4125]: E0312 13:22:52.184560 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:22:52 crc kubenswrapper[4125]: I0312 13:22:52.384203 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:52Z is after 2025-06-26T12:47:18Z Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.025563 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.028395 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.028506 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.028539 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.031447 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:22:53 crc kubenswrapper[4125]: E0312 13:22:53.032329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:22:53 crc kubenswrapper[4125]: W0312 13:22:53.069991 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:53Z is after 2025-06-26T12:47:18Z Mar 12 13:22:53 crc kubenswrapper[4125]: E0312 13:22:53.070276 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:53Z is after 2025-06-26T12:47:18Z Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.279956 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.281443 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 12 13:22:53 crc kubenswrapper[4125]: E0312 13:22:53.290446 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:53Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.382753 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:53Z is after 2025-06-26T12:47:18Z Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.538935 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.541641 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.541940 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.542000 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:22:53 crc kubenswrapper[4125]: I0312 13:22:53.542127 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:22:53 crc kubenswrapper[4125]: E0312 13:22:53.550197 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:53Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:22:54 crc kubenswrapper[4125]: I0312 13:22:54.382572 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:54Z is after 2025-06-26T12:47:18Z Mar 12 13:22:55 crc kubenswrapper[4125]: I0312 13:22:55.382501 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:55Z is after 2025-06-26T12:47:18Z Mar 12 13:22:55 crc kubenswrapper[4125]: E0312 13:22:55.656643 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:55Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:22:56 crc kubenswrapper[4125]: I0312 13:22:56.382150 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:56Z is after 2025-06-26T12:47:18Z Mar 12 13:22:57 crc kubenswrapper[4125]: I0312 13:22:57.383309 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:57Z is after 2025-06-26T12:47:18Z Mar 12 13:22:58 crc kubenswrapper[4125]: I0312 13:22:58.382730 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:58Z is after 2025-06-26T12:47:18Z Mar 12 13:22:59 crc kubenswrapper[4125]: I0312 13:22:59.381265 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:22:59Z is after 2025-06-26T12:47:18Z Mar 12 13:23:00 crc kubenswrapper[4125]: E0312 13:23:00.300756 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:00Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:00 crc kubenswrapper[4125]: I0312 13:23:00.383141 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:00Z is after 2025-06-26T12:47:18Z Mar 12 13:23:00 crc kubenswrapper[4125]: I0312 13:23:00.550694 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:00 crc kubenswrapper[4125]: I0312 13:23:00.553462 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:00 crc kubenswrapper[4125]: I0312 13:23:00.553551 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:00 crc kubenswrapper[4125]: I0312 13:23:00.553581 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:00 crc kubenswrapper[4125]: I0312 13:23:00.553628 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:00 crc kubenswrapper[4125]: E0312 13:23:00.562460 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:00Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:01 crc kubenswrapper[4125]: I0312 13:23:01.381603 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:01Z is after 2025-06-26T12:47:18Z Mar 12 13:23:02 crc kubenswrapper[4125]: E0312 13:23:02.185369 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:23:02 crc kubenswrapper[4125]: I0312 13:23:02.381746 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:02Z is after 2025-06-26T12:47:18Z Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.280582 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.280802 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.286988 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.287724 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.291260 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.291466 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.291628 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.294759 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.296234 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" gracePeriod=30 Mar 12 13:23:03 crc kubenswrapper[4125]: I0312 13:23:03.381725 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:03Z is after 2025-06-26T12:47:18Z Mar 12 13:23:03 crc kubenswrapper[4125]: E0312 13:23:03.395147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.025355 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.028244 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.028491 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.028671 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.031273 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:23:04 crc kubenswrapper[4125]: E0312 13:23:04.032309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.342301 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/4.log" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.344223 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/3.log" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.347176 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" exitCode=255 Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.347290 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3"} Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.347359 4125 scope.go:117] "RemoveContainer" containerID="923fd6af2adaa24fd45060a55803a82ab3b7ecd49bf83bb2d2af759943abd4f9" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.347642 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.353040 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.353131 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.353161 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.356394 4125 scope.go:117] "RemoveContainer" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" Mar 12 13:23:04 crc kubenswrapper[4125]: E0312 13:23:04.357960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:23:04 crc kubenswrapper[4125]: I0312 13:23:04.384510 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:04Z is after 2025-06-26T12:47:18Z Mar 12 13:23:05 crc kubenswrapper[4125]: I0312 13:23:05.355550 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/4.log" Mar 12 13:23:05 crc kubenswrapper[4125]: I0312 13:23:05.381550 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:05Z is after 2025-06-26T12:47:18Z Mar 12 13:23:05 crc kubenswrapper[4125]: E0312 13:23:05.665310 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:05Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:23:06 crc kubenswrapper[4125]: I0312 13:23:06.382973 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:06Z is after 2025-06-26T12:47:18Z Mar 12 13:23:07 crc kubenswrapper[4125]: E0312 13:23:07.308267 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:07Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:07 crc kubenswrapper[4125]: I0312 13:23:07.382053 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:07Z is after 2025-06-26T12:47:18Z Mar 12 13:23:07 crc kubenswrapper[4125]: I0312 13:23:07.563537 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:07 crc kubenswrapper[4125]: I0312 13:23:07.566131 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:07 crc kubenswrapper[4125]: I0312 13:23:07.566297 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:07 crc kubenswrapper[4125]: I0312 13:23:07.566345 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:07 crc kubenswrapper[4125]: I0312 13:23:07.566409 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:07 crc kubenswrapper[4125]: E0312 13:23:07.575435 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:07Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:08 crc kubenswrapper[4125]: I0312 13:23:08.382408 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:08Z is after 2025-06-26T12:47:18Z Mar 12 13:23:09 crc kubenswrapper[4125]: W0312 13:23:09.276642 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:09Z is after 2025-06-26T12:47:18Z Mar 12 13:23:09 crc kubenswrapper[4125]: E0312 13:23:09.276794 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:09Z is after 2025-06-26T12:47:18Z Mar 12 13:23:09 crc kubenswrapper[4125]: I0312 13:23:09.383203 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:09Z is after 2025-06-26T12:47:18Z Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.385543 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:10Z is after 2025-06-26T12:47:18Z Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.473307 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.473590 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.476034 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.476156 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.476189 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:10 crc kubenswrapper[4125]: I0312 13:23:10.479410 4125 scope.go:117] "RemoveContainer" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" Mar 12 13:23:10 crc kubenswrapper[4125]: E0312 13:23:10.481026 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:23:11 crc kubenswrapper[4125]: I0312 13:23:11.382910 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:11Z is after 2025-06-26T12:47:18Z Mar 12 13:23:12 crc kubenswrapper[4125]: E0312 13:23:12.186641 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:23:12 crc kubenswrapper[4125]: I0312 13:23:12.381227 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:12Z is after 2025-06-26T12:47:18Z Mar 12 13:23:13 crc kubenswrapper[4125]: I0312 13:23:13.381265 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:13Z is after 2025-06-26T12:47:18Z Mar 12 13:23:14 crc kubenswrapper[4125]: E0312 13:23:14.316348 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:14Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:14 crc kubenswrapper[4125]: I0312 13:23:14.382417 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:14Z is after 2025-06-26T12:47:18Z Mar 12 13:23:14 crc kubenswrapper[4125]: I0312 13:23:14.576756 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:14 crc kubenswrapper[4125]: I0312 13:23:14.579490 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:14 crc kubenswrapper[4125]: I0312 13:23:14.579613 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:14 crc kubenswrapper[4125]: I0312 13:23:14.579637 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:14 crc kubenswrapper[4125]: I0312 13:23:14.579678 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:14 crc kubenswrapper[4125]: E0312 13:23:14.587284 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:14Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:15 crc kubenswrapper[4125]: I0312 13:23:15.382696 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:15Z is after 2025-06-26T12:47:18Z Mar 12 13:23:15 crc kubenswrapper[4125]: E0312 13:23:15.675895 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:15Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:23:16 crc kubenswrapper[4125]: I0312 13:23:16.025806 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:16 crc kubenswrapper[4125]: I0312 13:23:16.027899 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:16 crc kubenswrapper[4125]: I0312 13:23:16.028063 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:16 crc kubenswrapper[4125]: I0312 13:23:16.028097 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:16 crc kubenswrapper[4125]: I0312 13:23:16.030524 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:23:16 crc kubenswrapper[4125]: E0312 13:23:16.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:23:16 crc kubenswrapper[4125]: I0312 13:23:16.385418 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:16Z is after 2025-06-26T12:47:18Z Mar 12 13:23:17 crc kubenswrapper[4125]: I0312 13:23:17.382735 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:17Z is after 2025-06-26T12:47:18Z Mar 12 13:23:18 crc kubenswrapper[4125]: I0312 13:23:18.383555 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:18Z is after 2025-06-26T12:47:18Z Mar 12 13:23:19 crc kubenswrapper[4125]: I0312 13:23:19.381687 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:19Z is after 2025-06-26T12:47:18Z Mar 12 13:23:19 crc kubenswrapper[4125]: I0312 13:23:19.531173 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:23:19 crc kubenswrapper[4125]: E0312 13:23:19.539456 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:19Z is after 2025-06-26T12:47:18Z Mar 12 13:23:20 crc kubenswrapper[4125]: I0312 13:23:20.382488 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:20Z is after 2025-06-26T12:47:18Z Mar 12 13:23:21 crc kubenswrapper[4125]: E0312 13:23:21.326486 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:21Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:21 crc kubenswrapper[4125]: I0312 13:23:21.383005 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:21Z is after 2025-06-26T12:47:18Z Mar 12 13:23:21 crc kubenswrapper[4125]: I0312 13:23:21.588510 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:21 crc kubenswrapper[4125]: I0312 13:23:21.591167 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:21 crc kubenswrapper[4125]: I0312 13:23:21.591313 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:21 crc kubenswrapper[4125]: I0312 13:23:21.591349 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:21 crc kubenswrapper[4125]: I0312 13:23:21.591417 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:21 crc kubenswrapper[4125]: E0312 13:23:21.600167 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:21Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:22 crc kubenswrapper[4125]: E0312 13:23:22.187980 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:23:22 crc kubenswrapper[4125]: I0312 13:23:22.384224 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:22Z is after 2025-06-26T12:47:18Z Mar 12 13:23:23 crc kubenswrapper[4125]: I0312 13:23:23.380743 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:23Z is after 2025-06-26T12:47:18Z Mar 12 13:23:24 crc kubenswrapper[4125]: I0312 13:23:24.383452 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:24Z is after 2025-06-26T12:47:18Z Mar 12 13:23:24 crc kubenswrapper[4125]: W0312 13:23:24.861188 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:24Z is after 2025-06-26T12:47:18Z Mar 12 13:23:24 crc kubenswrapper[4125]: E0312 13:23:24.861355 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:24Z is after 2025-06-26T12:47:18Z Mar 12 13:23:25 crc kubenswrapper[4125]: I0312 13:23:25.025399 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:25 crc kubenswrapper[4125]: I0312 13:23:25.028197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:25 crc kubenswrapper[4125]: I0312 13:23:25.028313 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:25 crc kubenswrapper[4125]: I0312 13:23:25.028354 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:25 crc kubenswrapper[4125]: I0312 13:23:25.031635 4125 scope.go:117] "RemoveContainer" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" Mar 12 13:23:25 crc kubenswrapper[4125]: E0312 13:23:25.033250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:23:25 crc kubenswrapper[4125]: I0312 13:23:25.381044 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:25Z is after 2025-06-26T12:47:18Z Mar 12 13:23:25 crc kubenswrapper[4125]: E0312 13:23:25.685409 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:25Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:23:26 crc kubenswrapper[4125]: I0312 13:23:26.383084 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:26Z is after 2025-06-26T12:47:18Z Mar 12 13:23:26 crc kubenswrapper[4125]: W0312 13:23:26.945455 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:26Z is after 2025-06-26T12:47:18Z Mar 12 13:23:26 crc kubenswrapper[4125]: E0312 13:23:26.945604 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:26Z is after 2025-06-26T12:47:18Z Mar 12 13:23:27 crc kubenswrapper[4125]: I0312 13:23:27.384715 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:27Z is after 2025-06-26T12:47:18Z Mar 12 13:23:27 crc kubenswrapper[4125]: W0312 13:23:27.461451 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:27Z is after 2025-06-26T12:47:18Z Mar 12 13:23:27 crc kubenswrapper[4125]: E0312 13:23:27.462001 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:27Z is after 2025-06-26T12:47:18Z Mar 12 13:23:28 crc kubenswrapper[4125]: E0312 13:23:28.334513 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:28Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:28 crc kubenswrapper[4125]: I0312 13:23:28.381403 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:28Z is after 2025-06-26T12:47:18Z Mar 12 13:23:28 crc kubenswrapper[4125]: I0312 13:23:28.600511 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:28 crc kubenswrapper[4125]: I0312 13:23:28.603574 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:28 crc kubenswrapper[4125]: I0312 13:23:28.603735 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:28 crc kubenswrapper[4125]: I0312 13:23:28.603771 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:28 crc kubenswrapper[4125]: I0312 13:23:28.603992 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:28 crc kubenswrapper[4125]: E0312 13:23:28.612330 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:28Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:29 crc kubenswrapper[4125]: I0312 13:23:29.382582 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:29Z is after 2025-06-26T12:47:18Z Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.025505 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.025621 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.028503 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.028605 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.028635 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.030056 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.030368 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.030566 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.031157 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:23:30 crc kubenswrapper[4125]: E0312 13:23:30.032153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:23:30 crc kubenswrapper[4125]: I0312 13:23:30.382440 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:30Z is after 2025-06-26T12:47:18Z Mar 12 13:23:31 crc kubenswrapper[4125]: I0312 13:23:31.382205 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:31Z is after 2025-06-26T12:47:18Z Mar 12 13:23:31 crc kubenswrapper[4125]: I0312 13:23:31.409803 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:23:31 crc kubenswrapper[4125]: I0312 13:23:31.410114 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:23:31 crc kubenswrapper[4125]: I0312 13:23:31.410156 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:23:31 crc kubenswrapper[4125]: I0312 13:23:31.410184 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:23:31 crc kubenswrapper[4125]: I0312 13:23:31.410225 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:23:32 crc kubenswrapper[4125]: E0312 13:23:32.188578 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:23:32 crc kubenswrapper[4125]: I0312 13:23:32.381987 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:32Z is after 2025-06-26T12:47:18Z Mar 12 13:23:33 crc kubenswrapper[4125]: I0312 13:23:33.024719 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:33 crc kubenswrapper[4125]: I0312 13:23:33.026519 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:33 crc kubenswrapper[4125]: I0312 13:23:33.026629 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:33 crc kubenswrapper[4125]: I0312 13:23:33.026654 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:33 crc kubenswrapper[4125]: I0312 13:23:33.382195 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:33Z is after 2025-06-26T12:47:18Z Mar 12 13:23:34 crc kubenswrapper[4125]: I0312 13:23:34.383351 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:34Z is after 2025-06-26T12:47:18Z Mar 12 13:23:35 crc kubenswrapper[4125]: E0312 13:23:35.341266 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:35Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:35 crc kubenswrapper[4125]: I0312 13:23:35.382273 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:35Z is after 2025-06-26T12:47:18Z Mar 12 13:23:35 crc kubenswrapper[4125]: I0312 13:23:35.613372 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:35 crc kubenswrapper[4125]: I0312 13:23:35.618228 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:35 crc kubenswrapper[4125]: I0312 13:23:35.618380 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:35 crc kubenswrapper[4125]: I0312 13:23:35.618415 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:35 crc kubenswrapper[4125]: I0312 13:23:35.618474 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:35 crc kubenswrapper[4125]: E0312 13:23:35.631212 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:35Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:35 crc kubenswrapper[4125]: E0312 13:23:35.694760 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:35Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:23:36 crc kubenswrapper[4125]: I0312 13:23:36.026051 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:36 crc kubenswrapper[4125]: I0312 13:23:36.028728 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:36 crc kubenswrapper[4125]: I0312 13:23:36.028986 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:36 crc kubenswrapper[4125]: I0312 13:23:36.029028 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:36 crc kubenswrapper[4125]: I0312 13:23:36.032009 4125 scope.go:117] "RemoveContainer" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" Mar 12 13:23:36 crc kubenswrapper[4125]: E0312 13:23:36.033619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:23:36 crc kubenswrapper[4125]: I0312 13:23:36.381792 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:36Z is after 2025-06-26T12:47:18Z Mar 12 13:23:37 crc kubenswrapper[4125]: I0312 13:23:37.384970 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:37Z is after 2025-06-26T12:47:18Z Mar 12 13:23:38 crc kubenswrapper[4125]: I0312 13:23:38.385407 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:38Z is after 2025-06-26T12:47:18Z Mar 12 13:23:39 crc kubenswrapper[4125]: I0312 13:23:39.382489 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:39Z is after 2025-06-26T12:47:18Z Mar 12 13:23:40 crc kubenswrapper[4125]: I0312 13:23:40.380442 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:40Z is after 2025-06-26T12:47:18Z Mar 12 13:23:41 crc kubenswrapper[4125]: I0312 13:23:41.383518 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:41Z is after 2025-06-26T12:47:18Z Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.025633 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.028441 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.028615 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.028670 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.032152 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:23:42 crc kubenswrapper[4125]: E0312 13:23:42.033208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:23:42 crc kubenswrapper[4125]: E0312 13:23:42.189541 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.389277 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:42Z is after 2025-06-26T12:47:18Z Mar 12 13:23:42 crc kubenswrapper[4125]: E0312 13:23:42.391746 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:42Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.632432 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.636410 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.636524 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.636559 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:42 crc kubenswrapper[4125]: I0312 13:23:42.636611 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:42 crc kubenswrapper[4125]: E0312 13:23:42.644381 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:42Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:43 crc kubenswrapper[4125]: I0312 13:23:43.381960 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:43Z is after 2025-06-26T12:47:18Z Mar 12 13:23:44 crc kubenswrapper[4125]: I0312 13:23:44.379459 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:44Z is after 2025-06-26T12:47:18Z Mar 12 13:23:45 crc kubenswrapper[4125]: I0312 13:23:45.380732 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:45Z is after 2025-06-26T12:47:18Z Mar 12 13:23:45 crc kubenswrapper[4125]: E0312 13:23:45.703984 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:45Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:23:46 crc kubenswrapper[4125]: I0312 13:23:46.381681 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:46Z is after 2025-06-26T12:47:18Z Mar 12 13:23:47 crc kubenswrapper[4125]: I0312 13:23:47.380774 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:47Z is after 2025-06-26T12:47:18Z Mar 12 13:23:48 crc kubenswrapper[4125]: I0312 13:23:48.379320 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:48Z is after 2025-06-26T12:47:18Z Mar 12 13:23:49 crc kubenswrapper[4125]: I0312 13:23:49.382607 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:49Z is after 2025-06-26T12:47:18Z Mar 12 13:23:49 crc kubenswrapper[4125]: E0312 13:23:49.399700 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:49Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:49 crc kubenswrapper[4125]: I0312 13:23:49.645526 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:49 crc kubenswrapper[4125]: I0312 13:23:49.647109 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:49 crc kubenswrapper[4125]: I0312 13:23:49.647194 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:49 crc kubenswrapper[4125]: I0312 13:23:49.647211 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:49 crc kubenswrapper[4125]: I0312 13:23:49.647244 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:49 crc kubenswrapper[4125]: E0312 13:23:49.653338 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:49Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:50 crc kubenswrapper[4125]: I0312 13:23:50.384322 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:50Z is after 2025-06-26T12:47:18Z Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.025228 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.031301 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.031735 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.032196 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.035766 4125 scope.go:117] "RemoveContainer" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.379756 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:51Z is after 2025-06-26T12:47:18Z Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.532008 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:23:51 crc kubenswrapper[4125]: E0312 13:23:51.538063 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:51Z is after 2025-06-26T12:47:18Z Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.558688 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/4.log" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.563509 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0"} Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.563793 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.565443 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.565573 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:51 crc kubenswrapper[4125]: I0312 13:23:51.565607 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:52 crc kubenswrapper[4125]: E0312 13:23:52.190529 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:23:52 crc kubenswrapper[4125]: I0312 13:23:52.382238 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:52Z is after 2025-06-26T12:47:18Z Mar 12 13:23:53 crc kubenswrapper[4125]: I0312 13:23:53.382468 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:53Z is after 2025-06-26T12:47:18Z Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.025501 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.027569 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.027692 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.027725 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.030719 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.378480 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:54Z is after 2025-06-26T12:47:18Z Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.582681 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/4.log" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.587551 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2"} Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.587937 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.589661 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.589990 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:54 crc kubenswrapper[4125]: I0312 13:23:54.590096 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:55 crc kubenswrapper[4125]: W0312 13:23:55.212566 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:55Z is after 2025-06-26T12:47:18Z Mar 12 13:23:55 crc kubenswrapper[4125]: E0312 13:23:55.212652 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:55Z is after 2025-06-26T12:47:18Z Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.380306 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:55Z is after 2025-06-26T12:47:18Z Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.596125 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/5.log" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.597458 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/4.log" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.602695 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" exitCode=255 Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.602797 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2"} Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.602976 4125 scope.go:117] "RemoveContainer" containerID="149c49bb0f1958095a72dc4281a1b3262ce1e34eb6923b6eb3c0d9f3c862f8c4" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.603164 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.604690 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.604925 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.605038 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:55 crc kubenswrapper[4125]: I0312 13:23:55.607196 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:23:55 crc kubenswrapper[4125]: E0312 13:23:55.608204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:23:55 crc kubenswrapper[4125]: E0312 13:23:55.711398 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:55Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.383283 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:56Z is after 2025-06-26T12:47:18Z Mar 12 13:23:56 crc kubenswrapper[4125]: E0312 13:23:56.408108 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:56Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.609346 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/5.log" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.653654 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.655644 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.655743 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.655777 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:56 crc kubenswrapper[4125]: I0312 13:23:56.655928 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:23:56 crc kubenswrapper[4125]: E0312 13:23:56.660427 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:56Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:23:57 crc kubenswrapper[4125]: I0312 13:23:57.384405 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:57Z is after 2025-06-26T12:47:18Z Mar 12 13:23:57 crc kubenswrapper[4125]: I0312 13:23:57.520446 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:23:57 crc kubenswrapper[4125]: I0312 13:23:57.520725 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:57 crc kubenswrapper[4125]: I0312 13:23:57.526104 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:57 crc kubenswrapper[4125]: I0312 13:23:57.526251 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:57 crc kubenswrapper[4125]: I0312 13:23:57.526287 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.383971 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:58Z is after 2025-06-26T12:47:18Z Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.625087 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.625363 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.627247 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.627393 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.627431 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:23:58 crc kubenswrapper[4125]: I0312 13:23:58.630096 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:23:58 crc kubenswrapper[4125]: E0312 13:23:58.630925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:23:59 crc kubenswrapper[4125]: I0312 13:23:59.379690 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:23:59Z is after 2025-06-26T12:47:18Z Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.025425 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.027757 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.028119 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.028158 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.278791 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.279521 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.282092 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.282394 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.282655 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:00 crc kubenswrapper[4125]: I0312 13:24:00.382056 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:00Z is after 2025-06-26T12:47:18Z Mar 12 13:24:01 crc kubenswrapper[4125]: I0312 13:24:01.382554 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:01Z is after 2025-06-26T12:47:18Z Mar 12 13:24:02 crc kubenswrapper[4125]: E0312 13:24:02.191674 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:24:02 crc kubenswrapper[4125]: I0312 13:24:02.381365 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:02Z is after 2025-06-26T12:47:18Z Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.279214 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.279335 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.381168 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:03Z is after 2025-06-26T12:47:18Z Mar 12 13:24:03 crc kubenswrapper[4125]: E0312 13:24:03.418149 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:03Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.660752 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.663403 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.663676 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.663716 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:03 crc kubenswrapper[4125]: I0312 13:24:03.663769 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:03 crc kubenswrapper[4125]: E0312 13:24:03.670663 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:03Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.185303 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.185706 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.187945 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.188265 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.188403 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.190947 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:24:04 crc kubenswrapper[4125]: E0312 13:24:04.191735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:24:04 crc kubenswrapper[4125]: I0312 13:24:04.383111 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:04Z is after 2025-06-26T12:47:18Z Mar 12 13:24:05 crc kubenswrapper[4125]: I0312 13:24:05.380325 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:05Z is after 2025-06-26T12:47:18Z Mar 12 13:24:05 crc kubenswrapper[4125]: E0312 13:24:05.719223 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:05Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:06 crc kubenswrapper[4125]: I0312 13:24:06.383638 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:06Z is after 2025-06-26T12:47:18Z Mar 12 13:24:07 crc kubenswrapper[4125]: I0312 13:24:07.382093 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:07Z is after 2025-06-26T12:47:18Z Mar 12 13:24:08 crc kubenswrapper[4125]: W0312 13:24:08.014088 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:08Z is after 2025-06-26T12:47:18Z Mar 12 13:24:08 crc kubenswrapper[4125]: E0312 13:24:08.014240 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:08Z is after 2025-06-26T12:47:18Z Mar 12 13:24:08 crc kubenswrapper[4125]: I0312 13:24:08.383418 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:08Z is after 2025-06-26T12:47:18Z Mar 12 13:24:09 crc kubenswrapper[4125]: I0312 13:24:09.382035 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:09Z is after 2025-06-26T12:47:18Z Mar 12 13:24:10 crc kubenswrapper[4125]: I0312 13:24:10.383385 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:10Z is after 2025-06-26T12:47:18Z Mar 12 13:24:10 crc kubenswrapper[4125]: E0312 13:24:10.425345 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:10Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:10 crc kubenswrapper[4125]: I0312 13:24:10.671107 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:10 crc kubenswrapper[4125]: I0312 13:24:10.674444 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:10 crc kubenswrapper[4125]: I0312 13:24:10.674565 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:10 crc kubenswrapper[4125]: I0312 13:24:10.674598 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:10 crc kubenswrapper[4125]: I0312 13:24:10.674657 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:10 crc kubenswrapper[4125]: E0312 13:24:10.683438 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:10Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:11 crc kubenswrapper[4125]: I0312 13:24:11.381068 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:11Z is after 2025-06-26T12:47:18Z Mar 12 13:24:12 crc kubenswrapper[4125]: E0312 13:24:12.192609 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:24:12 crc kubenswrapper[4125]: I0312 13:24:12.383114 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:12Z is after 2025-06-26T12:47:18Z Mar 12 13:24:13 crc kubenswrapper[4125]: I0312 13:24:13.279005 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:24:13 crc kubenswrapper[4125]: I0312 13:24:13.279276 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:24:13 crc kubenswrapper[4125]: I0312 13:24:13.382075 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:13Z is after 2025-06-26T12:47:18Z Mar 12 13:24:14 crc kubenswrapper[4125]: I0312 13:24:14.382389 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:14Z is after 2025-06-26T12:47:18Z Mar 12 13:24:15 crc kubenswrapper[4125]: I0312 13:24:15.383146 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:15Z is after 2025-06-26T12:47:18Z Mar 12 13:24:15 crc kubenswrapper[4125]: E0312 13:24:15.729356 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:15Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:15 crc kubenswrapper[4125]: E0312 13:24:15.729499 4125 event.go:294] "Unable to write event (retry limit exceeded!)" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:15 crc kubenswrapper[4125]: E0312 13:24:15.737087 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:15Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:16 crc kubenswrapper[4125]: I0312 13:24:16.025440 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:16 crc kubenswrapper[4125]: I0312 13:24:16.027630 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:16 crc kubenswrapper[4125]: I0312 13:24:16.027938 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:16 crc kubenswrapper[4125]: I0312 13:24:16.028144 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:16 crc kubenswrapper[4125]: I0312 13:24:16.031415 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:24:16 crc kubenswrapper[4125]: E0312 13:24:16.032669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:24:16 crc kubenswrapper[4125]: W0312 13:24:16.145329 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:16Z is after 2025-06-26T12:47:18Z Mar 12 13:24:16 crc kubenswrapper[4125]: E0312 13:24:16.145472 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:16Z is after 2025-06-26T12:47:18Z Mar 12 13:24:16 crc kubenswrapper[4125]: I0312 13:24:16.384699 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:16Z is after 2025-06-26T12:47:18Z Mar 12 13:24:17 crc kubenswrapper[4125]: I0312 13:24:17.382444 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:17Z is after 2025-06-26T12:47:18Z Mar 12 13:24:17 crc kubenswrapper[4125]: E0312 13:24:17.434579 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:17Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:17 crc kubenswrapper[4125]: I0312 13:24:17.684180 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:17 crc kubenswrapper[4125]: I0312 13:24:17.687253 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:17 crc kubenswrapper[4125]: I0312 13:24:17.687535 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:17 crc kubenswrapper[4125]: I0312 13:24:17.687573 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:17 crc kubenswrapper[4125]: I0312 13:24:17.687629 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:17 crc kubenswrapper[4125]: E0312 13:24:17.696139 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:17Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:18 crc kubenswrapper[4125]: I0312 13:24:18.382462 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:18Z is after 2025-06-26T12:47:18Z Mar 12 13:24:18 crc kubenswrapper[4125]: W0312 13:24:18.721447 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:18Z is after 2025-06-26T12:47:18Z Mar 12 13:24:18 crc kubenswrapper[4125]: E0312 13:24:18.721589 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:18Z is after 2025-06-26T12:47:18Z Mar 12 13:24:19 crc kubenswrapper[4125]: I0312 13:24:19.382910 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:19Z is after 2025-06-26T12:47:18Z Mar 12 13:24:20 crc kubenswrapper[4125]: I0312 13:24:20.382138 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:20Z is after 2025-06-26T12:47:18Z Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.382437 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:21Z is after 2025-06-26T12:47:18Z Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.821543 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:47908->192.168.126.11:10357: read: connection reset by peer" start-of-body= Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.821669 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:47908->192.168.126.11:10357: read: connection reset by peer" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.821724 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.822468 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.826144 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.826193 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.826209 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.828685 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:24:21 crc kubenswrapper[4125]: I0312 13:24:21.834682 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" gracePeriod=30 Mar 12 13:24:21 crc kubenswrapper[4125]: E0312 13:24:21.862456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:24:22 crc kubenswrapper[4125]: E0312 13:24:22.193621 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.382158 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:22Z is after 2025-06-26T12:47:18Z Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.728128 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/5.log" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.729763 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/4.log" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.732286 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" exitCode=255 Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.732446 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0"} Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.732525 4125 scope.go:117] "RemoveContainer" containerID="0c8b1958819a20f3ea4f703f69c8c052b00ea16e5ce399751bb1413a8e814bb3" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.732717 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.734662 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.734729 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.734746 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:22 crc kubenswrapper[4125]: I0312 13:24:22.736267 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:24:22 crc kubenswrapper[4125]: E0312 13:24:22.736985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:24:23 crc kubenswrapper[4125]: I0312 13:24:23.378398 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:23Z is after 2025-06-26T12:47:18Z Mar 12 13:24:23 crc kubenswrapper[4125]: E0312 13:24:23.450995 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:23Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:23 crc kubenswrapper[4125]: I0312 13:24:23.532230 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:24:23 crc kubenswrapper[4125]: E0312 13:24:23.540138 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:23Z is after 2025-06-26T12:47:18Z Mar 12 13:24:23 crc kubenswrapper[4125]: I0312 13:24:23.741755 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/5.log" Mar 12 13:24:24 crc kubenswrapper[4125]: I0312 13:24:24.381495 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:24Z is after 2025-06-26T12:47:18Z Mar 12 13:24:24 crc kubenswrapper[4125]: E0312 13:24:24.447607 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:24Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:24 crc kubenswrapper[4125]: I0312 13:24:24.697506 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:24 crc kubenswrapper[4125]: I0312 13:24:24.700638 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:24 crc kubenswrapper[4125]: I0312 13:24:24.700691 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:24 crc kubenswrapper[4125]: I0312 13:24:24.700719 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:24 crc kubenswrapper[4125]: I0312 13:24:24.700909 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:24 crc kubenswrapper[4125]: E0312 13:24:24.705031 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:24Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:25 crc kubenswrapper[4125]: I0312 13:24:25.381359 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:25Z is after 2025-06-26T12:47:18Z Mar 12 13:24:26 crc kubenswrapper[4125]: I0312 13:24:26.383214 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:26Z is after 2025-06-26T12:47:18Z Mar 12 13:24:27 crc kubenswrapper[4125]: I0312 13:24:27.380527 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:27Z is after 2025-06-26T12:47:18Z Mar 12 13:24:28 crc kubenswrapper[4125]: I0312 13:24:28.384292 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:28Z is after 2025-06-26T12:47:18Z Mar 12 13:24:29 crc kubenswrapper[4125]: I0312 13:24:29.381609 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:29Z is after 2025-06-26T12:47:18Z Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.025367 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.027491 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.027963 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.028216 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.031064 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:24:30 crc kubenswrapper[4125]: E0312 13:24:30.032133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.382924 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:30Z is after 2025-06-26T12:47:18Z Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.473073 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.473381 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.476090 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.477001 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.477317 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:30 crc kubenswrapper[4125]: I0312 13:24:30.481319 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:24:30 crc kubenswrapper[4125]: E0312 13:24:30.483093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.384445 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:31Z is after 2025-06-26T12:47:18Z Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.411242 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.411402 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.411450 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.411623 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.411676 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:24:31 crc kubenswrapper[4125]: E0312 13:24:31.456252 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:31Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.705924 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.708175 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.708246 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.708272 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:31 crc kubenswrapper[4125]: I0312 13:24:31.708326 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:31 crc kubenswrapper[4125]: E0312 13:24:31.716793 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:31Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:32 crc kubenswrapper[4125]: E0312 13:24:32.194084 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:24:32 crc kubenswrapper[4125]: I0312 13:24:32.380070 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:32Z is after 2025-06-26T12:47:18Z Mar 12 13:24:33 crc kubenswrapper[4125]: I0312 13:24:33.380786 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:33Z is after 2025-06-26T12:47:18Z Mar 12 13:24:33 crc kubenswrapper[4125]: E0312 13:24:33.459806 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:33Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:34 crc kubenswrapper[4125]: I0312 13:24:34.382768 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:34Z is after 2025-06-26T12:47:18Z Mar 12 13:24:35 crc kubenswrapper[4125]: I0312 13:24:35.381631 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:35Z is after 2025-06-26T12:47:18Z Mar 12 13:24:36 crc kubenswrapper[4125]: I0312 13:24:36.381209 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:36Z is after 2025-06-26T12:47:18Z Mar 12 13:24:37 crc kubenswrapper[4125]: I0312 13:24:37.384114 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:37Z is after 2025-06-26T12:47:18Z Mar 12 13:24:37 crc kubenswrapper[4125]: W0312 13:24:37.446237 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:37Z is after 2025-06-26T12:47:18Z Mar 12 13:24:37 crc kubenswrapper[4125]: E0312 13:24:37.446413 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:37Z is after 2025-06-26T12:47:18Z Mar 12 13:24:38 crc kubenswrapper[4125]: I0312 13:24:38.382444 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:38Z is after 2025-06-26T12:47:18Z Mar 12 13:24:38 crc kubenswrapper[4125]: E0312 13:24:38.464246 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:38Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:38 crc kubenswrapper[4125]: I0312 13:24:38.717269 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:38 crc kubenswrapper[4125]: I0312 13:24:38.719570 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:38 crc kubenswrapper[4125]: I0312 13:24:38.719691 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:38 crc kubenswrapper[4125]: I0312 13:24:38.719797 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:38 crc kubenswrapper[4125]: I0312 13:24:38.719985 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:38 crc kubenswrapper[4125]: E0312 13:24:38.728358 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:38Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:39 crc kubenswrapper[4125]: I0312 13:24:39.383560 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:39Z is after 2025-06-26T12:47:18Z Mar 12 13:24:40 crc kubenswrapper[4125]: I0312 13:24:40.383392 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:40Z is after 2025-06-26T12:47:18Z Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.025370 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.025604 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.025936 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.027516 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.027626 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.027659 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.028599 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.028657 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.028682 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.029591 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.029806 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.029976 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.031327 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:24:41 crc kubenswrapper[4125]: E0312 13:24:41.032164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.033168 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:24:41 crc kubenswrapper[4125]: E0312 13:24:41.034928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:24:41 crc kubenswrapper[4125]: I0312 13:24:41.383644 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:41Z is after 2025-06-26T12:47:18Z Mar 12 13:24:42 crc kubenswrapper[4125]: E0312 13:24:42.195193 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:24:42 crc kubenswrapper[4125]: I0312 13:24:42.384128 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:42Z is after 2025-06-26T12:47:18Z Mar 12 13:24:43 crc kubenswrapper[4125]: I0312 13:24:43.383138 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:43Z is after 2025-06-26T12:47:18Z Mar 12 13:24:43 crc kubenswrapper[4125]: E0312 13:24:43.467306 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:43Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:44 crc kubenswrapper[4125]: I0312 13:24:44.382200 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:44Z is after 2025-06-26T12:47:18Z Mar 12 13:24:45 crc kubenswrapper[4125]: I0312 13:24:45.383224 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:45Z is after 2025-06-26T12:47:18Z Mar 12 13:24:45 crc kubenswrapper[4125]: E0312 13:24:45.472055 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:45Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:45 crc kubenswrapper[4125]: I0312 13:24:45.729240 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:45 crc kubenswrapper[4125]: I0312 13:24:45.732188 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:45 crc kubenswrapper[4125]: I0312 13:24:45.732331 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:45 crc kubenswrapper[4125]: I0312 13:24:45.732370 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:45 crc kubenswrapper[4125]: I0312 13:24:45.732428 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:45 crc kubenswrapper[4125]: E0312 13:24:45.740639 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:45Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:46 crc kubenswrapper[4125]: I0312 13:24:46.025954 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:46 crc kubenswrapper[4125]: I0312 13:24:46.028080 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:46 crc kubenswrapper[4125]: I0312 13:24:46.028219 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:46 crc kubenswrapper[4125]: I0312 13:24:46.028343 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:46 crc kubenswrapper[4125]: I0312 13:24:46.391179 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:46Z is after 2025-06-26T12:47:18Z Mar 12 13:24:47 crc kubenswrapper[4125]: I0312 13:24:47.378754 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:47Z is after 2025-06-26T12:47:18Z Mar 12 13:24:47 crc kubenswrapper[4125]: W0312 13:24:47.682777 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:47Z is after 2025-06-26T12:47:18Z Mar 12 13:24:47 crc kubenswrapper[4125]: E0312 13:24:47.683010 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:47Z is after 2025-06-26T12:47:18Z Mar 12 13:24:48 crc kubenswrapper[4125]: I0312 13:24:48.383368 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:48Z is after 2025-06-26T12:47:18Z Mar 12 13:24:49 crc kubenswrapper[4125]: I0312 13:24:49.379779 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:49Z is after 2025-06-26T12:47:18Z Mar 12 13:24:50 crc kubenswrapper[4125]: I0312 13:24:50.383621 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:50Z is after 2025-06-26T12:47:18Z Mar 12 13:24:51 crc kubenswrapper[4125]: I0312 13:24:51.382153 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:51Z is after 2025-06-26T12:47:18Z Mar 12 13:24:52 crc kubenswrapper[4125]: E0312 13:24:52.195438 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:24:52 crc kubenswrapper[4125]: I0312 13:24:52.384643 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:52Z is after 2025-06-26T12:47:18Z Mar 12 13:24:52 crc kubenswrapper[4125]: E0312 13:24:52.479589 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:52Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:52 crc kubenswrapper[4125]: I0312 13:24:52.742047 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:52 crc kubenswrapper[4125]: I0312 13:24:52.744976 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:52 crc kubenswrapper[4125]: I0312 13:24:52.745443 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:52 crc kubenswrapper[4125]: I0312 13:24:52.745765 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:52 crc kubenswrapper[4125]: I0312 13:24:52.746215 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:52 crc kubenswrapper[4125]: E0312 13:24:52.753559 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:52Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:24:53 crc kubenswrapper[4125]: I0312 13:24:53.381320 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:53Z is after 2025-06-26T12:47:18Z Mar 12 13:24:53 crc kubenswrapper[4125]: E0312 13:24:53.473076 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:53Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:24:54 crc kubenswrapper[4125]: I0312 13:24:54.025581 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:54 crc kubenswrapper[4125]: I0312 13:24:54.028594 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:54 crc kubenswrapper[4125]: I0312 13:24:54.029018 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:54 crc kubenswrapper[4125]: I0312 13:24:54.029125 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:54 crc kubenswrapper[4125]: I0312 13:24:54.033151 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:24:54 crc kubenswrapper[4125]: E0312 13:24:54.035111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:24:54 crc kubenswrapper[4125]: I0312 13:24:54.379519 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:54Z is after 2025-06-26T12:47:18Z Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.025454 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.028420 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.028534 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.028566 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.037476 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:24:55 crc kubenswrapper[4125]: E0312 13:24:55.038700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.380626 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:55Z is after 2025-06-26T12:47:18Z Mar 12 13:24:55 crc kubenswrapper[4125]: I0312 13:24:55.532236 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:24:55 crc kubenswrapper[4125]: E0312 13:24:55.538914 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:55Z is after 2025-06-26T12:47:18Z Mar 12 13:24:56 crc kubenswrapper[4125]: I0312 13:24:56.382991 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:56Z is after 2025-06-26T12:47:18Z Mar 12 13:24:57 crc kubenswrapper[4125]: I0312 13:24:57.382999 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:57Z is after 2025-06-26T12:47:18Z Mar 12 13:24:58 crc kubenswrapper[4125]: I0312 13:24:58.378985 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:58Z is after 2025-06-26T12:47:18Z Mar 12 13:24:59 crc kubenswrapper[4125]: I0312 13:24:59.386013 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:59Z is after 2025-06-26T12:47:18Z Mar 12 13:24:59 crc kubenswrapper[4125]: E0312 13:24:59.485793 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:59Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:24:59 crc kubenswrapper[4125]: W0312 13:24:59.721646 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:59Z is after 2025-06-26T12:47:18Z Mar 12 13:24:59 crc kubenswrapper[4125]: E0312 13:24:59.721918 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:59Z is after 2025-06-26T12:47:18Z Mar 12 13:24:59 crc kubenswrapper[4125]: I0312 13:24:59.754345 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:24:59 crc kubenswrapper[4125]: I0312 13:24:59.756159 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:24:59 crc kubenswrapper[4125]: I0312 13:24:59.756383 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:24:59 crc kubenswrapper[4125]: I0312 13:24:59.756416 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:24:59 crc kubenswrapper[4125]: I0312 13:24:59.756459 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:24:59 crc kubenswrapper[4125]: E0312 13:24:59.761942 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:24:59Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:00 crc kubenswrapper[4125]: I0312 13:25:00.383396 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:00Z is after 2025-06-26T12:47:18Z Mar 12 13:25:01 crc kubenswrapper[4125]: I0312 13:25:01.382240 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:01Z is after 2025-06-26T12:47:18Z Mar 12 13:25:02 crc kubenswrapper[4125]: E0312 13:25:02.197777 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:25:02 crc kubenswrapper[4125]: I0312 13:25:02.380979 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:02Z is after 2025-06-26T12:47:18Z Mar 12 13:25:03 crc kubenswrapper[4125]: I0312 13:25:03.382238 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:03Z is after 2025-06-26T12:47:18Z Mar 12 13:25:03 crc kubenswrapper[4125]: E0312 13:25:03.482280 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:03Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:25:04 crc kubenswrapper[4125]: I0312 13:25:04.381949 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:04Z is after 2025-06-26T12:47:18Z Mar 12 13:25:05 crc kubenswrapper[4125]: I0312 13:25:05.382679 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:05Z is after 2025-06-26T12:47:18Z Mar 12 13:25:06 crc kubenswrapper[4125]: I0312 13:25:06.380699 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:06Z is after 2025-06-26T12:47:18Z Mar 12 13:25:06 crc kubenswrapper[4125]: E0312 13:25:06.495080 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:06Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:06 crc kubenswrapper[4125]: I0312 13:25:06.762122 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:06 crc kubenswrapper[4125]: I0312 13:25:06.765125 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:06 crc kubenswrapper[4125]: I0312 13:25:06.765252 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:06 crc kubenswrapper[4125]: I0312 13:25:06.765293 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:06 crc kubenswrapper[4125]: I0312 13:25:06.765339 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:06 crc kubenswrapper[4125]: E0312 13:25:06.774261 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:06Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:07 crc kubenswrapper[4125]: I0312 13:25:07.025449 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:07 crc kubenswrapper[4125]: I0312 13:25:07.027570 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:07 crc kubenswrapper[4125]: I0312 13:25:07.027685 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:07 crc kubenswrapper[4125]: I0312 13:25:07.027914 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:07 crc kubenswrapper[4125]: I0312 13:25:07.032769 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:25:07 crc kubenswrapper[4125]: E0312 13:25:07.034435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:25:07 crc kubenswrapper[4125]: I0312 13:25:07.382449 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:07Z is after 2025-06-26T12:47:18Z Mar 12 13:25:08 crc kubenswrapper[4125]: I0312 13:25:08.025082 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:08 crc kubenswrapper[4125]: I0312 13:25:08.027149 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:08 crc kubenswrapper[4125]: I0312 13:25:08.027309 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:08 crc kubenswrapper[4125]: I0312 13:25:08.027597 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:08 crc kubenswrapper[4125]: I0312 13:25:08.030343 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:25:08 crc kubenswrapper[4125]: E0312 13:25:08.031157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:25:08 crc kubenswrapper[4125]: I0312 13:25:08.386495 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:08Z is after 2025-06-26T12:47:18Z Mar 12 13:25:09 crc kubenswrapper[4125]: I0312 13:25:09.379776 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:09Z is after 2025-06-26T12:47:18Z Mar 12 13:25:10 crc kubenswrapper[4125]: I0312 13:25:10.380277 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:10Z is after 2025-06-26T12:47:18Z Mar 12 13:25:11 crc kubenswrapper[4125]: I0312 13:25:11.380802 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:11Z is after 2025-06-26T12:47:18Z Mar 12 13:25:12 crc kubenswrapper[4125]: E0312 13:25:12.199055 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:25:12 crc kubenswrapper[4125]: I0312 13:25:12.382439 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:12Z is after 2025-06-26T12:47:18Z Mar 12 13:25:13 crc kubenswrapper[4125]: I0312 13:25:13.385130 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:13Z is after 2025-06-26T12:47:18Z Mar 12 13:25:13 crc kubenswrapper[4125]: E0312 13:25:13.493960 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:13Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:25:13 crc kubenswrapper[4125]: E0312 13:25:13.504458 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:13Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:13 crc kubenswrapper[4125]: I0312 13:25:13.775034 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:13 crc kubenswrapper[4125]: I0312 13:25:13.778017 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:13 crc kubenswrapper[4125]: I0312 13:25:13.778163 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:13 crc kubenswrapper[4125]: I0312 13:25:13.778199 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:13 crc kubenswrapper[4125]: I0312 13:25:13.778252 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:13 crc kubenswrapper[4125]: E0312 13:25:13.785656 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:13Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:14 crc kubenswrapper[4125]: I0312 13:25:14.382108 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:14Z is after 2025-06-26T12:47:18Z Mar 12 13:25:15 crc kubenswrapper[4125]: I0312 13:25:15.024659 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:15 crc kubenswrapper[4125]: I0312 13:25:15.027188 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:15 crc kubenswrapper[4125]: I0312 13:25:15.027263 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:15 crc kubenswrapper[4125]: I0312 13:25:15.027299 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:15 crc kubenswrapper[4125]: W0312 13:25:15.320632 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:15Z is after 2025-06-26T12:47:18Z Mar 12 13:25:15 crc kubenswrapper[4125]: E0312 13:25:15.320933 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:15Z is after 2025-06-26T12:47:18Z Mar 12 13:25:15 crc kubenswrapper[4125]: I0312 13:25:15.382400 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:15Z is after 2025-06-26T12:47:18Z Mar 12 13:25:16 crc kubenswrapper[4125]: I0312 13:25:16.388946 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:16Z is after 2025-06-26T12:47:18Z Mar 12 13:25:17 crc kubenswrapper[4125]: I0312 13:25:17.383424 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:17Z is after 2025-06-26T12:47:18Z Mar 12 13:25:18 crc kubenswrapper[4125]: I0312 13:25:18.381175 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:18Z is after 2025-06-26T12:47:18Z Mar 12 13:25:19 crc kubenswrapper[4125]: I0312 13:25:19.025302 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:19 crc kubenswrapper[4125]: I0312 13:25:19.028088 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:19 crc kubenswrapper[4125]: I0312 13:25:19.028266 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:19 crc kubenswrapper[4125]: I0312 13:25:19.028406 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:19 crc kubenswrapper[4125]: I0312 13:25:19.033957 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:25:19 crc kubenswrapper[4125]: E0312 13:25:19.035238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:25:19 crc kubenswrapper[4125]: I0312 13:25:19.381061 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:19Z is after 2025-06-26T12:47:18Z Mar 12 13:25:20 crc kubenswrapper[4125]: I0312 13:25:20.382970 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:20Z is after 2025-06-26T12:47:18Z Mar 12 13:25:20 crc kubenswrapper[4125]: E0312 13:25:20.512251 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:20Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:20 crc kubenswrapper[4125]: I0312 13:25:20.786220 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:20 crc kubenswrapper[4125]: I0312 13:25:20.787921 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:20 crc kubenswrapper[4125]: I0312 13:25:20.788026 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:20 crc kubenswrapper[4125]: I0312 13:25:20.788060 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:20 crc kubenswrapper[4125]: I0312 13:25:20.788111 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:20 crc kubenswrapper[4125]: E0312 13:25:20.792140 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:20Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:21 crc kubenswrapper[4125]: I0312 13:25:21.025474 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:21 crc kubenswrapper[4125]: I0312 13:25:21.027673 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:21 crc kubenswrapper[4125]: I0312 13:25:21.027901 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:21 crc kubenswrapper[4125]: I0312 13:25:21.027926 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:21 crc kubenswrapper[4125]: I0312 13:25:21.030026 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:25:21 crc kubenswrapper[4125]: E0312 13:25:21.031056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:25:21 crc kubenswrapper[4125]: W0312 13:25:21.320505 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:21Z is after 2025-06-26T12:47:18Z Mar 12 13:25:21 crc kubenswrapper[4125]: E0312 13:25:21.320638 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:21Z is after 2025-06-26T12:47:18Z Mar 12 13:25:21 crc kubenswrapper[4125]: I0312 13:25:21.382374 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:21Z is after 2025-06-26T12:47:18Z Mar 12 13:25:22 crc kubenswrapper[4125]: E0312 13:25:22.199495 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:25:22 crc kubenswrapper[4125]: I0312 13:25:22.381181 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:22Z is after 2025-06-26T12:47:18Z Mar 12 13:25:23 crc kubenswrapper[4125]: I0312 13:25:23.384325 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:23Z is after 2025-06-26T12:47:18Z Mar 12 13:25:23 crc kubenswrapper[4125]: E0312 13:25:23.500299 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:23Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:25:24 crc kubenswrapper[4125]: I0312 13:25:24.382987 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:24Z is after 2025-06-26T12:47:18Z Mar 12 13:25:25 crc kubenswrapper[4125]: I0312 13:25:25.381341 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:25Z is after 2025-06-26T12:47:18Z Mar 12 13:25:26 crc kubenswrapper[4125]: I0312 13:25:26.384534 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:26Z is after 2025-06-26T12:47:18Z Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.381705 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:27Z is after 2025-06-26T12:47:18Z Mar 12 13:25:27 crc kubenswrapper[4125]: E0312 13:25:27.520988 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:27Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.531464 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:25:27 crc kubenswrapper[4125]: E0312 13:25:27.540985 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:27Z is after 2025-06-26T12:47:18Z Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.793095 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.795577 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.795702 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.795794 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:27 crc kubenswrapper[4125]: I0312 13:25:27.796156 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:27 crc kubenswrapper[4125]: E0312 13:25:27.804259 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:27Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:28 crc kubenswrapper[4125]: I0312 13:25:28.382427 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:28Z is after 2025-06-26T12:47:18Z Mar 12 13:25:29 crc kubenswrapper[4125]: I0312 13:25:29.381891 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:29Z is after 2025-06-26T12:47:18Z Mar 12 13:25:30 crc kubenswrapper[4125]: I0312 13:25:30.382476 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:30Z is after 2025-06-26T12:47:18Z Mar 12 13:25:30 crc kubenswrapper[4125]: E0312 13:25:30.896279 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:30 crc kubenswrapper[4125]: E0312 13:25:30.914083 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.026589 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.036125 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.036195 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.036223 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.040090 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:25:31 crc kubenswrapper[4125]: E0312 13:25:31.041157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.380438 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:31Z is after 2025-06-26T12:47:18Z Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.412333 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.412955 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.413172 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.413363 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:25:31 crc kubenswrapper[4125]: I0312 13:25:31.413579 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:25:31 crc kubenswrapper[4125]: E0312 13:25:31.895581 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:32 crc kubenswrapper[4125]: E0312 13:25:32.200210 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:25:32 crc kubenswrapper[4125]: I0312 13:25:32.383106 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:32Z is after 2025-06-26T12:47:18Z Mar 12 13:25:32 crc kubenswrapper[4125]: E0312 13:25:32.894999 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:33 crc kubenswrapper[4125]: I0312 13:25:33.384139 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:33Z is after 2025-06-26T12:47:18Z Mar 12 13:25:33 crc kubenswrapper[4125]: E0312 13:25:33.510078 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:33Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:25:33 crc kubenswrapper[4125]: E0312 13:25:33.896075 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:34 crc kubenswrapper[4125]: I0312 13:25:34.383548 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:34Z is after 2025-06-26T12:47:18Z Mar 12 13:25:34 crc kubenswrapper[4125]: E0312 13:25:34.531300 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:34Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:34 crc kubenswrapper[4125]: I0312 13:25:34.804540 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:34 crc kubenswrapper[4125]: I0312 13:25:34.808612 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:34 crc kubenswrapper[4125]: I0312 13:25:34.808902 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:34 crc kubenswrapper[4125]: I0312 13:25:34.808944 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:34 crc kubenswrapper[4125]: I0312 13:25:34.809012 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:34 crc kubenswrapper[4125]: E0312 13:25:34.818721 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:34Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:34 crc kubenswrapper[4125]: E0312 13:25:34.895666 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:35 crc kubenswrapper[4125]: I0312 13:25:35.026010 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:35 crc kubenswrapper[4125]: I0312 13:25:35.030371 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:35 crc kubenswrapper[4125]: I0312 13:25:35.030928 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:35 crc kubenswrapper[4125]: I0312 13:25:35.031079 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:35 crc kubenswrapper[4125]: I0312 13:25:35.034317 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:25:35 crc kubenswrapper[4125]: E0312 13:25:35.036030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:25:35 crc kubenswrapper[4125]: I0312 13:25:35.380221 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:35Z is after 2025-06-26T12:47:18Z Mar 12 13:25:35 crc kubenswrapper[4125]: E0312 13:25:35.895471 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:36 crc kubenswrapper[4125]: I0312 13:25:36.383058 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:36Z is after 2025-06-26T12:47:18Z Mar 12 13:25:36 crc kubenswrapper[4125]: E0312 13:25:36.895532 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:37 crc kubenswrapper[4125]: I0312 13:25:37.380685 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:37Z is after 2025-06-26T12:47:18Z Mar 12 13:25:37 crc kubenswrapper[4125]: E0312 13:25:37.895645 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:39 crc kubenswrapper[4125]: E0312 13:25:39.215123 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:39 crc kubenswrapper[4125]: I0312 13:25:39.220334 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:39Z is after 2025-06-26T12:47:18Z Mar 12 13:25:39 crc kubenswrapper[4125]: I0312 13:25:39.380910 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:39Z is after 2025-06-26T12:47:18Z Mar 12 13:25:39 crc kubenswrapper[4125]: E0312 13:25:39.895594 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:40 crc kubenswrapper[4125]: I0312 13:25:40.382634 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:40Z is after 2025-06-26T12:47:18Z Mar 12 13:25:40 crc kubenswrapper[4125]: W0312 13:25:40.429948 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:40Z is after 2025-06-26T12:47:18Z Mar 12 13:25:40 crc kubenswrapper[4125]: E0312 13:25:40.430057 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:40Z is after 2025-06-26T12:47:18Z Mar 12 13:25:40 crc kubenswrapper[4125]: E0312 13:25:40.896066 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:40 crc kubenswrapper[4125]: E0312 13:25:40.915220 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:41 crc kubenswrapper[4125]: I0312 13:25:41.379568 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:41Z is after 2025-06-26T12:47:18Z Mar 12 13:25:41 crc kubenswrapper[4125]: E0312 13:25:41.539505 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:41Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:41 crc kubenswrapper[4125]: I0312 13:25:41.819685 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:41 crc kubenswrapper[4125]: I0312 13:25:41.821906 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:41 crc kubenswrapper[4125]: I0312 13:25:41.822453 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:41 crc kubenswrapper[4125]: I0312 13:25:41.823217 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:41 crc kubenswrapper[4125]: I0312 13:25:41.823664 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:41 crc kubenswrapper[4125]: E0312 13:25:41.829926 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:41Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:41 crc kubenswrapper[4125]: E0312 13:25:41.895501 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:42 crc kubenswrapper[4125]: E0312 13:25:42.200973 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:25:42 crc kubenswrapper[4125]: I0312 13:25:42.382599 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:42Z is after 2025-06-26T12:47:18Z Mar 12 13:25:42 crc kubenswrapper[4125]: E0312 13:25:42.895722 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:42 crc kubenswrapper[4125]: W0312 13:25:42.970026 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:42Z is after 2025-06-26T12:47:18Z Mar 12 13:25:42 crc kubenswrapper[4125]: E0312 13:25:42.970208 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:42Z is after 2025-06-26T12:47:18Z Mar 12 13:25:43 crc kubenswrapper[4125]: I0312 13:25:43.383414 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:43Z is after 2025-06-26T12:47:18Z Mar 12 13:25:43 crc kubenswrapper[4125]: E0312 13:25:43.517573 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:43Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:25:43 crc kubenswrapper[4125]: E0312 13:25:43.895096 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:44 crc kubenswrapper[4125]: I0312 13:25:44.382955 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:44Z is after 2025-06-26T12:47:18Z Mar 12 13:25:44 crc kubenswrapper[4125]: E0312 13:25:44.895924 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:45 crc kubenswrapper[4125]: I0312 13:25:45.026277 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:45 crc kubenswrapper[4125]: I0312 13:25:45.029197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:45 crc kubenswrapper[4125]: I0312 13:25:45.029312 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:45 crc kubenswrapper[4125]: I0312 13:25:45.029344 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:45 crc kubenswrapper[4125]: I0312 13:25:45.032160 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:25:45 crc kubenswrapper[4125]: E0312 13:25:45.033169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:25:45 crc kubenswrapper[4125]: I0312 13:25:45.382706 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:45Z is after 2025-06-26T12:47:18Z Mar 12 13:25:45 crc kubenswrapper[4125]: E0312 13:25:45.895034 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:46 crc kubenswrapper[4125]: I0312 13:25:46.381611 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:46Z is after 2025-06-26T12:47:18Z Mar 12 13:25:46 crc kubenswrapper[4125]: E0312 13:25:46.895209 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.405602 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.407320 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:49 crc kubenswrapper[4125]: E0312 13:25:49.413115 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414158 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414205 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414220 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414251 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414545 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414585 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.414598 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.418908 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:25:49 crc kubenswrapper[4125]: I0312 13:25:49.421619 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:49Z is after 2025-06-26T12:47:18Z Mar 12 13:25:49 crc kubenswrapper[4125]: E0312 13:25:49.422884 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:49Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:49 crc kubenswrapper[4125]: E0312 13:25:49.423220 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:49Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:49 crc kubenswrapper[4125]: E0312 13:25:49.895908 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.381379 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:50Z is after 2025-06-26T12:47:18Z Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.427078 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/5.log" Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.429272 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.429420 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.430915 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.431054 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:50 crc kubenswrapper[4125]: I0312 13:25:50.431092 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:50 crc kubenswrapper[4125]: E0312 13:25:50.895729 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:50 crc kubenswrapper[4125]: E0312 13:25:50.916165 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:51 crc kubenswrapper[4125]: I0312 13:25:51.379208 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:51Z is after 2025-06-26T12:47:18Z Mar 12 13:25:51 crc kubenswrapper[4125]: E0312 13:25:51.895983 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:52 crc kubenswrapper[4125]: I0312 13:25:52.025336 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:52 crc kubenswrapper[4125]: I0312 13:25:52.027172 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:52 crc kubenswrapper[4125]: I0312 13:25:52.027256 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:52 crc kubenswrapper[4125]: I0312 13:25:52.027299 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:52 crc kubenswrapper[4125]: E0312 13:25:52.201145 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:25:52 crc kubenswrapper[4125]: I0312 13:25:52.382510 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:52Z is after 2025-06-26T12:47:18Z Mar 12 13:25:52 crc kubenswrapper[4125]: E0312 13:25:52.895028 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:53 crc kubenswrapper[4125]: I0312 13:25:53.383633 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:53Z is after 2025-06-26T12:47:18Z Mar 12 13:25:53 crc kubenswrapper[4125]: E0312 13:25:53.523650 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:53Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:25:53 crc kubenswrapper[4125]: E0312 13:25:53.895052 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:54 crc kubenswrapper[4125]: I0312 13:25:54.382107 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:54Z is after 2025-06-26T12:47:18Z Mar 12 13:25:54 crc kubenswrapper[4125]: E0312 13:25:54.895407 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:55 crc kubenswrapper[4125]: I0312 13:25:55.025501 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:55 crc kubenswrapper[4125]: I0312 13:25:55.027240 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:55 crc kubenswrapper[4125]: I0312 13:25:55.027347 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:55 crc kubenswrapper[4125]: I0312 13:25:55.027377 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:55 crc kubenswrapper[4125]: I0312 13:25:55.379976 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:55Z is after 2025-06-26T12:47:18Z Mar 12 13:25:55 crc kubenswrapper[4125]: E0312 13:25:55.895976 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:56 crc kubenswrapper[4125]: I0312 13:25:56.383475 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:56Z is after 2025-06-26T12:47:18Z Mar 12 13:25:56 crc kubenswrapper[4125]: I0312 13:25:56.423246 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:56 crc kubenswrapper[4125]: I0312 13:25:56.426290 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:56 crc kubenswrapper[4125]: I0312 13:25:56.426609 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:56 crc kubenswrapper[4125]: I0312 13:25:56.426990 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:56 crc kubenswrapper[4125]: I0312 13:25:56.427233 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:25:56 crc kubenswrapper[4125]: E0312 13:25:56.431230 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:56Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:25:56 crc kubenswrapper[4125]: E0312 13:25:56.435093 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:56Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:25:56 crc kubenswrapper[4125]: E0312 13:25:56.895650 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:57 crc kubenswrapper[4125]: I0312 13:25:57.383607 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:57Z is after 2025-06-26T12:47:18Z Mar 12 13:25:57 crc kubenswrapper[4125]: I0312 13:25:57.520489 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:25:57 crc kubenswrapper[4125]: I0312 13:25:57.520943 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:57 crc kubenswrapper[4125]: I0312 13:25:57.523496 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:57 crc kubenswrapper[4125]: I0312 13:25:57.523911 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:57 crc kubenswrapper[4125]: I0312 13:25:57.523955 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:57 crc kubenswrapper[4125]: E0312 13:25:57.896174 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:58 crc kubenswrapper[4125]: I0312 13:25:58.384441 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:58Z is after 2025-06-26T12:47:18Z Mar 12 13:25:58 crc kubenswrapper[4125]: E0312 13:25:58.895623 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.026063 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.028895 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.028993 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.029028 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.031334 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:25:59 crc kubenswrapper[4125]: E0312 13:25:59.032095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.382073 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:59Z is after 2025-06-26T12:47:18Z Mar 12 13:25:59 crc kubenswrapper[4125]: I0312 13:25:59.531696 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:25:59 crc kubenswrapper[4125]: E0312 13:25:59.540702 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:25:59Z is after 2025-06-26T12:47:18Z Mar 12 13:25:59 crc kubenswrapper[4125]: E0312 13:25:59.896013 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:00 crc kubenswrapper[4125]: I0312 13:26:00.279906 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:26:00 crc kubenswrapper[4125]: I0312 13:26:00.280264 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:00 crc kubenswrapper[4125]: I0312 13:26:00.282578 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:00 crc kubenswrapper[4125]: I0312 13:26:00.282674 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:00 crc kubenswrapper[4125]: I0312 13:26:00.282697 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:00 crc kubenswrapper[4125]: I0312 13:26:00.383250 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:00Z is after 2025-06-26T12:47:18Z Mar 12 13:26:00 crc kubenswrapper[4125]: E0312 13:26:00.895495 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:00 crc kubenswrapper[4125]: E0312 13:26:00.917183 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:01 crc kubenswrapper[4125]: I0312 13:26:01.382486 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:01Z is after 2025-06-26T12:47:18Z Mar 12 13:26:01 crc kubenswrapper[4125]: E0312 13:26:01.895986 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:02 crc kubenswrapper[4125]: E0312 13:26:02.202096 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:26:02 crc kubenswrapper[4125]: I0312 13:26:02.382993 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:02Z is after 2025-06-26T12:47:18Z Mar 12 13:26:02 crc kubenswrapper[4125]: W0312 13:26:02.849595 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:02Z is after 2025-06-26T12:47:18Z Mar 12 13:26:02 crc kubenswrapper[4125]: E0312 13:26:02.849678 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:02Z is after 2025-06-26T12:47:18Z Mar 12 13:26:02 crc kubenswrapper[4125]: E0312 13:26:02.895905 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.281067 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.281278 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.382509 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:03Z is after 2025-06-26T12:47:18Z Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.436345 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.438608 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.438697 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.438997 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:03 crc kubenswrapper[4125]: I0312 13:26:03.439057 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:03 crc kubenswrapper[4125]: E0312 13:26:03.441592 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:03Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:03 crc kubenswrapper[4125]: E0312 13:26:03.446960 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:03Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:03 crc kubenswrapper[4125]: E0312 13:26:03.533688 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:03Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:03 crc kubenswrapper[4125]: E0312 13:26:03.534093 4125 event.go:294] "Unable to write event (retry limit exceeded!)" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:03 crc kubenswrapper[4125]: E0312 13:26:03.543325 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:03Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:03 crc kubenswrapper[4125]: E0312 13:26:03.895671 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:04 crc kubenswrapper[4125]: I0312 13:26:04.382943 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:04Z is after 2025-06-26T12:47:18Z Mar 12 13:26:04 crc kubenswrapper[4125]: E0312 13:26:04.895021 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:05 crc kubenswrapper[4125]: W0312 13:26:05.383001 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:05Z is after 2025-06-26T12:47:18Z Mar 12 13:26:05 crc kubenswrapper[4125]: E0312 13:26:05.384133 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:05Z is after 2025-06-26T12:47:18Z Mar 12 13:26:05 crc kubenswrapper[4125]: I0312 13:26:05.385253 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:05Z is after 2025-06-26T12:47:18Z Mar 12 13:26:05 crc kubenswrapper[4125]: E0312 13:26:05.491058 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:05Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:05 crc kubenswrapper[4125]: E0312 13:26:05.895982 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:06 crc kubenswrapper[4125]: I0312 13:26:06.384315 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:06Z is after 2025-06-26T12:47:18Z Mar 12 13:26:06 crc kubenswrapper[4125]: E0312 13:26:06.895432 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:07 crc kubenswrapper[4125]: I0312 13:26:07.382544 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:07Z is after 2025-06-26T12:47:18Z Mar 12 13:26:07 crc kubenswrapper[4125]: E0312 13:26:07.896106 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:08 crc kubenswrapper[4125]: I0312 13:26:08.382444 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:08Z is after 2025-06-26T12:47:18Z Mar 12 13:26:08 crc kubenswrapper[4125]: E0312 13:26:08.895992 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:09 crc kubenswrapper[4125]: I0312 13:26:09.384713 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:09Z is after 2025-06-26T12:47:18Z Mar 12 13:26:09 crc kubenswrapper[4125]: E0312 13:26:09.895384 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:10 crc kubenswrapper[4125]: I0312 13:26:10.384048 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:10Z is after 2025-06-26T12:47:18Z Mar 12 13:26:10 crc kubenswrapper[4125]: I0312 13:26:10.447593 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:10 crc kubenswrapper[4125]: E0312 13:26:10.449896 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:10Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:10 crc kubenswrapper[4125]: I0312 13:26:10.450197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:10 crc kubenswrapper[4125]: I0312 13:26:10.450354 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:10 crc kubenswrapper[4125]: I0312 13:26:10.450374 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:10 crc kubenswrapper[4125]: I0312 13:26:10.450407 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:10 crc kubenswrapper[4125]: E0312 13:26:10.456894 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:10Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:10 crc kubenswrapper[4125]: E0312 13:26:10.895440 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:10 crc kubenswrapper[4125]: E0312 13:26:10.918203 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:11 crc kubenswrapper[4125]: I0312 13:26:11.382650 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:11Z is after 2025-06-26T12:47:18Z Mar 12 13:26:11 crc kubenswrapper[4125]: E0312 13:26:11.895051 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:12 crc kubenswrapper[4125]: I0312 13:26:12.025620 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:12 crc kubenswrapper[4125]: I0312 13:26:12.027677 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:12 crc kubenswrapper[4125]: I0312 13:26:12.028006 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:12 crc kubenswrapper[4125]: I0312 13:26:12.028049 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:12 crc kubenswrapper[4125]: I0312 13:26:12.030568 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:26:12 crc kubenswrapper[4125]: E0312 13:26:12.031473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:26:12 crc kubenswrapper[4125]: E0312 13:26:12.203084 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:26:12 crc kubenswrapper[4125]: I0312 13:26:12.381743 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:12Z is after 2025-06-26T12:47:18Z Mar 12 13:26:12 crc kubenswrapper[4125]: E0312 13:26:12.895957 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:13 crc kubenswrapper[4125]: I0312 13:26:13.280292 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:26:13 crc kubenswrapper[4125]: I0312 13:26:13.280421 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:26:13 crc kubenswrapper[4125]: W0312 13:26:13.342351 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:13Z is after 2025-06-26T12:47:18Z Mar 12 13:26:13 crc kubenswrapper[4125]: E0312 13:26:13.342512 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:13Z is after 2025-06-26T12:47:18Z Mar 12 13:26:13 crc kubenswrapper[4125]: I0312 13:26:13.382393 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:13Z is after 2025-06-26T12:47:18Z Mar 12 13:26:13 crc kubenswrapper[4125]: E0312 13:26:13.895467 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:14 crc kubenswrapper[4125]: I0312 13:26:14.383067 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:14Z is after 2025-06-26T12:47:18Z Mar 12 13:26:14 crc kubenswrapper[4125]: E0312 13:26:14.894982 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:15 crc kubenswrapper[4125]: I0312 13:26:15.382658 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:15Z is after 2025-06-26T12:47:18Z Mar 12 13:26:15 crc kubenswrapper[4125]: E0312 13:26:15.498144 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:15Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:15 crc kubenswrapper[4125]: E0312 13:26:15.895726 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:16 crc kubenswrapper[4125]: I0312 13:26:16.382243 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:16Z is after 2025-06-26T12:47:18Z Mar 12 13:26:16 crc kubenswrapper[4125]: E0312 13:26:16.895076 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:17 crc kubenswrapper[4125]: I0312 13:26:17.382273 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:17Z is after 2025-06-26T12:47:18Z Mar 12 13:26:17 crc kubenswrapper[4125]: I0312 13:26:17.458557 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:17 crc kubenswrapper[4125]: E0312 13:26:17.459046 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:17Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:17 crc kubenswrapper[4125]: I0312 13:26:17.461030 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:17 crc kubenswrapper[4125]: I0312 13:26:17.461137 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:17 crc kubenswrapper[4125]: I0312 13:26:17.461171 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:17 crc kubenswrapper[4125]: I0312 13:26:17.461230 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:17 crc kubenswrapper[4125]: E0312 13:26:17.467891 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:17Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:17 crc kubenswrapper[4125]: E0312 13:26:17.896039 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:18 crc kubenswrapper[4125]: I0312 13:26:18.383036 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:18Z is after 2025-06-26T12:47:18Z Mar 12 13:26:18 crc kubenswrapper[4125]: W0312 13:26:18.584125 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:18Z is after 2025-06-26T12:47:18Z Mar 12 13:26:18 crc kubenswrapper[4125]: E0312 13:26:18.584267 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:18Z is after 2025-06-26T12:47:18Z Mar 12 13:26:18 crc kubenswrapper[4125]: E0312 13:26:18.895068 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:19 crc kubenswrapper[4125]: I0312 13:26:19.386404 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z Mar 12 13:26:19 crc kubenswrapper[4125]: E0312 13:26:19.896026 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.279195 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" start-of-body= Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.279361 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.279436 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.279633 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.282292 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.282393 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.282425 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.285737 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.286533 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" gracePeriod=30 Mar 12 13:26:20 crc kubenswrapper[4125]: E0312 13:26:20.305321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.381245 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:20Z is after 2025-06-26T12:47:18Z Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.604259 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/6.log" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.606400 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/5.log" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.612302 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" exitCode=255 Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.612495 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerDied","Data":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.612665 4125 scope.go:117] "RemoveContainer" containerID="80c164bc1d85755e8543d9bdefc137eb094a5b2d1582c40841037bd2cf4511c0" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.612995 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.614488 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.614629 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.614673 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:20 crc kubenswrapper[4125]: I0312 13:26:20.617567 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:26:20 crc kubenswrapper[4125]: E0312 13:26:20.619050 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:26:20 crc kubenswrapper[4125]: E0312 13:26:20.895932 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:20 crc kubenswrapper[4125]: E0312 13:26:20.919234 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:21 crc kubenswrapper[4125]: I0312 13:26:21.378511 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:21Z is after 2025-06-26T12:47:18Z Mar 12 13:26:21 crc kubenswrapper[4125]: I0312 13:26:21.620092 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/6.log" Mar 12 13:26:21 crc kubenswrapper[4125]: E0312 13:26:21.895044 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:22 crc kubenswrapper[4125]: E0312 13:26:22.204398 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:26:22 crc kubenswrapper[4125]: I0312 13:26:22.380140 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:22Z is after 2025-06-26T12:47:18Z Mar 12 13:26:22 crc kubenswrapper[4125]: E0312 13:26:22.896177 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:23 crc kubenswrapper[4125]: I0312 13:26:23.379599 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:23Z is after 2025-06-26T12:47:18Z Mar 12 13:26:23 crc kubenswrapper[4125]: E0312 13:26:23.895597 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:24 crc kubenswrapper[4125]: I0312 13:26:24.383500 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:24Z is after 2025-06-26T12:47:18Z Mar 12 13:26:24 crc kubenswrapper[4125]: E0312 13:26:24.467099 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:24Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:24 crc kubenswrapper[4125]: I0312 13:26:24.468401 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:24 crc kubenswrapper[4125]: I0312 13:26:24.470674 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:24 crc kubenswrapper[4125]: I0312 13:26:24.470784 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:24 crc kubenswrapper[4125]: I0312 13:26:24.470938 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:24 crc kubenswrapper[4125]: I0312 13:26:24.470992 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:24 crc kubenswrapper[4125]: E0312 13:26:24.477742 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:24Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:24 crc kubenswrapper[4125]: E0312 13:26:24.895568 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:25 crc kubenswrapper[4125]: I0312 13:26:25.383237 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:25Z is after 2025-06-26T12:47:18Z Mar 12 13:26:25 crc kubenswrapper[4125]: E0312 13:26:25.507399 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:25Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:25 crc kubenswrapper[4125]: E0312 13:26:25.895338 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:26 crc kubenswrapper[4125]: I0312 13:26:26.382582 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:26Z is after 2025-06-26T12:47:18Z Mar 12 13:26:26 crc kubenswrapper[4125]: E0312 13:26:26.894967 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:27 crc kubenswrapper[4125]: I0312 13:26:27.025359 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:27 crc kubenswrapper[4125]: I0312 13:26:27.027052 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:27 crc kubenswrapper[4125]: I0312 13:26:27.027080 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:27 crc kubenswrapper[4125]: I0312 13:26:27.027093 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:27 crc kubenswrapper[4125]: I0312 13:26:27.028096 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:26:27 crc kubenswrapper[4125]: E0312 13:26:27.028402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:26:27 crc kubenswrapper[4125]: I0312 13:26:27.380970 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:27Z is after 2025-06-26T12:47:18Z Mar 12 13:26:27 crc kubenswrapper[4125]: E0312 13:26:27.895970 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:28 crc kubenswrapper[4125]: I0312 13:26:28.382277 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:28Z is after 2025-06-26T12:47:18Z Mar 12 13:26:28 crc kubenswrapper[4125]: E0312 13:26:28.895011 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:29 crc kubenswrapper[4125]: I0312 13:26:29.025408 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:29 crc kubenswrapper[4125]: I0312 13:26:29.028381 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:29 crc kubenswrapper[4125]: I0312 13:26:29.028488 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:29 crc kubenswrapper[4125]: I0312 13:26:29.028517 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:29 crc kubenswrapper[4125]: I0312 13:26:29.387187 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:29Z is after 2025-06-26T12:47:18Z Mar 12 13:26:29 crc kubenswrapper[4125]: E0312 13:26:29.895120 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.382581 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:30Z is after 2025-06-26T12:47:18Z Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.472539 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.473019 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.474940 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.475042 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.475071 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:30 crc kubenswrapper[4125]: I0312 13:26:30.477977 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:26:30 crc kubenswrapper[4125]: E0312 13:26:30.479790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:26:30 crc kubenswrapper[4125]: E0312 13:26:30.895081 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:30 crc kubenswrapper[4125]: E0312 13:26:30.919413 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.381632 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:31Z is after 2025-06-26T12:47:18Z Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.415021 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.415178 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.415231 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.415268 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.415305 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:26:31 crc kubenswrapper[4125]: E0312 13:26:31.475610 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:31Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.478683 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.481134 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.481229 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.481260 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.481309 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:31 crc kubenswrapper[4125]: E0312 13:26:31.488135 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:31Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:31 crc kubenswrapper[4125]: I0312 13:26:31.531096 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:26:31 crc kubenswrapper[4125]: E0312 13:26:31.540406 4125 certificate_manager.go:562] kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post "https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:31Z is after 2025-06-26T12:47:18Z Mar 12 13:26:31 crc kubenswrapper[4125]: E0312 13:26:31.896141 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:32 crc kubenswrapper[4125]: E0312 13:26:32.205703 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:26:32 crc kubenswrapper[4125]: I0312 13:26:32.382610 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:32Z is after 2025-06-26T12:47:18Z Mar 12 13:26:32 crc kubenswrapper[4125]: E0312 13:26:32.895359 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:33 crc kubenswrapper[4125]: I0312 13:26:33.381740 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:33Z is after 2025-06-26T12:47:18Z Mar 12 13:26:33 crc kubenswrapper[4125]: E0312 13:26:33.895343 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:34 crc kubenswrapper[4125]: I0312 13:26:34.382365 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:34Z is after 2025-06-26T12:47:18Z Mar 12 13:26:34 crc kubenswrapper[4125]: E0312 13:26:34.896138 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:35 crc kubenswrapper[4125]: I0312 13:26:35.382569 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:35Z is after 2025-06-26T12:47:18Z Mar 12 13:26:35 crc kubenswrapper[4125]: E0312 13:26:35.516969 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:35Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:35 crc kubenswrapper[4125]: E0312 13:26:35.895754 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:36 crc kubenswrapper[4125]: I0312 13:26:36.380591 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:36Z is after 2025-06-26T12:47:18Z Mar 12 13:26:36 crc kubenswrapper[4125]: E0312 13:26:36.896050 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:37 crc kubenswrapper[4125]: I0312 13:26:37.383225 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:37Z is after 2025-06-26T12:47:18Z Mar 12 13:26:37 crc kubenswrapper[4125]: E0312 13:26:37.895043 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:38 crc kubenswrapper[4125]: I0312 13:26:38.383193 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:38Z is after 2025-06-26T12:47:18Z Mar 12 13:26:38 crc kubenswrapper[4125]: E0312 13:26:38.487718 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:38Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:38 crc kubenswrapper[4125]: I0312 13:26:38.489378 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:38 crc kubenswrapper[4125]: I0312 13:26:38.491705 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:38 crc kubenswrapper[4125]: I0312 13:26:38.491915 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:38 crc kubenswrapper[4125]: I0312 13:26:38.491959 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:38 crc kubenswrapper[4125]: I0312 13:26:38.492141 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:38 crc kubenswrapper[4125]: E0312 13:26:38.498956 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:38Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:38 crc kubenswrapper[4125]: E0312 13:26:38.895263 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:39 crc kubenswrapper[4125]: I0312 13:26:39.382539 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:39Z is after 2025-06-26T12:47:18Z Mar 12 13:26:39 crc kubenswrapper[4125]: E0312 13:26:39.896170 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:40 crc kubenswrapper[4125]: I0312 13:26:40.381667 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:40Z is after 2025-06-26T12:47:18Z Mar 12 13:26:40 crc kubenswrapper[4125]: E0312 13:26:40.895552 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:40 crc kubenswrapper[4125]: E0312 13:26:40.920526 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.025632 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.027932 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.028024 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.028057 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.030659 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.384021 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:41Z is after 2025-06-26T12:47:18Z Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.711210 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/5.log" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.717198 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba"} Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.717522 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.719383 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.719523 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:41 crc kubenswrapper[4125]: I0312 13:26:41.719592 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:41 crc kubenswrapper[4125]: W0312 13:26:41.881162 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:41Z is after 2025-06-26T12:47:18Z Mar 12 13:26:41 crc kubenswrapper[4125]: E0312 13:26:41.881250 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:41Z is after 2025-06-26T12:47:18Z Mar 12 13:26:41 crc kubenswrapper[4125]: E0312 13:26:41.895666 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:42 crc kubenswrapper[4125]: E0312 13:26:42.206549 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.384372 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:47:18Z Mar 12 13:26:42 crc kubenswrapper[4125]: W0312 13:26:42.395472 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:47:18Z Mar 12 13:26:42 crc kubenswrapper[4125]: E0312 13:26:42.395791 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:47:18Z Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.725180 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/6.log" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.727682 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/5.log" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.736269 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" exitCode=255 Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.736377 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerDied","Data":"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba"} Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.736535 4125 scope.go:117] "RemoveContainer" containerID="13a401c1adcabda0f01a7b331e458e7498f356a5a9c92517eb7b770bb4fe14f2" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.736781 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.743492 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.743584 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.743612 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:42 crc kubenswrapper[4125]: I0312 13:26:42.747111 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:26:42 crc kubenswrapper[4125]: E0312 13:26:42.747952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:26:42 crc kubenswrapper[4125]: E0312 13:26:42.895732 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:43 crc kubenswrapper[4125]: I0312 13:26:43.382532 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:43Z is after 2025-06-26T12:47:18Z Mar 12 13:26:43 crc kubenswrapper[4125]: I0312 13:26:43.744664 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/6.log" Mar 12 13:26:43 crc kubenswrapper[4125]: E0312 13:26:43.895112 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.026613 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.029233 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.029802 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.030033 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.034784 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:26:44 crc kubenswrapper[4125]: E0312 13:26:44.036419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.184363 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.185101 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.187401 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.187679 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.187715 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.190717 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:26:44 crc kubenswrapper[4125]: E0312 13:26:44.191567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:26:44 crc kubenswrapper[4125]: I0312 13:26:44.381208 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:44Z is after 2025-06-26T12:47:18Z Mar 12 13:26:44 crc kubenswrapper[4125]: E0312 13:26:44.896127 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:45 crc kubenswrapper[4125]: I0312 13:26:45.380098 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:45Z is after 2025-06-26T12:47:18Z Mar 12 13:26:45 crc kubenswrapper[4125]: E0312 13:26:45.497769 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:45Z is after 2025-06-26T12:47:18Z" interval="7s" Mar 12 13:26:45 crc kubenswrapper[4125]: I0312 13:26:45.500016 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:45 crc kubenswrapper[4125]: I0312 13:26:45.502198 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:45 crc kubenswrapper[4125]: I0312 13:26:45.502324 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:45 crc kubenswrapper[4125]: I0312 13:26:45.502363 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:45 crc kubenswrapper[4125]: I0312 13:26:45.502414 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:45 crc kubenswrapper[4125]: E0312 13:26:45.510478 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:45Z is after 2025-06-26T12:47:18Z" node="crc" Mar 12 13:26:45 crc kubenswrapper[4125]: E0312 13:26:45.526431 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:45Z is after 2025-06-26T12:47:18Z" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:45 crc kubenswrapper[4125]: E0312 13:26:45.895135 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:46 crc kubenswrapper[4125]: I0312 13:26:46.380339 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:46Z is after 2025-06-26T12:47:18Z Mar 12 13:26:46 crc kubenswrapper[4125]: E0312 13:26:46.895784 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:47 crc kubenswrapper[4125]: I0312 13:26:47.382685 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:47Z is after 2025-06-26T12:47:18Z Mar 12 13:26:47 crc kubenswrapper[4125]: E0312 13:26:47.896001 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.385549 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:48Z is after 2025-06-26T12:47:18Z Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.625767 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.626296 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.629282 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.630196 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.630237 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:48 crc kubenswrapper[4125]: I0312 13:26:48.633628 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:26:48 crc kubenswrapper[4125]: E0312 13:26:48.634623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:26:48 crc kubenswrapper[4125]: E0312 13:26:48.895058 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:49 crc kubenswrapper[4125]: I0312 13:26:49.382720 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:49 crc kubenswrapper[4125]: E0312 13:26:49.895081 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:50 crc kubenswrapper[4125]: I0312 13:26:50.386705 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:50 crc kubenswrapper[4125]: E0312 13:26:50.895140 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:50 crc kubenswrapper[4125]: E0312 13:26:50.921175 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:51 crc kubenswrapper[4125]: W0312 13:26:51.146080 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: nodes "crc" is forbidden: User "system:anonymous" cannot list resource "nodes" in API group "" at the cluster scope Mar 12 13:26:51 crc kubenswrapper[4125]: E0312 13:26:51.146155 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: nodes "crc" is forbidden: User "system:anonymous" cannot list resource "nodes" in API group "" at the cluster scope Mar 12 13:26:51 crc kubenswrapper[4125]: I0312 13:26:51.386092 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:51 crc kubenswrapper[4125]: E0312 13:26:51.895647 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:52 crc kubenswrapper[4125]: E0312 13:26:52.207009 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:26:52 crc kubenswrapper[4125]: I0312 13:26:52.388749 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:52 crc kubenswrapper[4125]: I0312 13:26:52.511038 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:52 crc kubenswrapper[4125]: E0312 13:26:52.511167 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Mar 12 13:26:52 crc kubenswrapper[4125]: I0312 13:26:52.513265 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:52 crc kubenswrapper[4125]: I0312 13:26:52.513324 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:52 crc kubenswrapper[4125]: I0312 13:26:52.513351 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:52 crc kubenswrapper[4125]: I0312 13:26:52.513402 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:52 crc kubenswrapper[4125]: E0312 13:26:52.525191 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Mar 12 13:26:52 crc kubenswrapper[4125]: E0312 13:26:52.895654 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:53 crc kubenswrapper[4125]: I0312 13:26:53.388100 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:53 crc kubenswrapper[4125]: E0312 13:26:53.895654 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:54 crc kubenswrapper[4125]: I0312 13:26:54.382626 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:54 crc kubenswrapper[4125]: E0312 13:26:54.895590 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:55 crc kubenswrapper[4125]: I0312 13:26:55.385787 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.535370 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.542539 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.850796388 +0000 UTC m=+2.174182487,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.545245 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.851587302 +0000 UTC m=+2.174993972,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.555576 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.851701946 +0000 UTC m=+2.175088075,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.564145 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:31.911308131 +0000 UTC m=+2.234693990,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.574495 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:31.911331802 +0000 UTC m=+2.234717581,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.583978 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:31.911503117 +0000 UTC m=+2.234889016,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.590077 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0f9a174e7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeAllocatableEnforced,Message:Updated Node Allocatable limit across pods,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.962330343 +0000 UTC m=+2.285716312,LastTimestamp:2026-03-12 13:20:31.962330343 +0000 UTC m=+2.285716312,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.601319 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:32.143612485 +0000 UTC m=+2.466998444,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.608484 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:32.143635465 +0000 UTC m=+2.467021364,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.615066 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:32.143646766 +0000 UTC m=+2.467032545,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.633985 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:32.148002079 +0000 UTC m=+2.471387878,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.641111 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:32.14801535 +0000 UTC m=+2.471401249,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.651479 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:32.14802619 +0000 UTC m=+2.471411999,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.653445 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:32.148946258 +0000 UTC m=+2.472332177,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.663903 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:32.148965018 +0000 UTC m=+2.472350807,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.670309 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:32.148975599 +0000 UTC m=+2.472361408,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.677574 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:32.15062026 +0000 UTC m=+2.474006199,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.684671 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:32.15063694 +0000 UTC m=+2.474022719,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.691557 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:32.150647741 +0000 UTC m=+2.474033540,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.698342 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea26ffb6\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea26ffb6 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.70264671 +0000 UTC m=+2.026032749,LastTimestamp:2026-03-12 13:20:32.151864109 +0000 UTC m=+2.475249898,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.705223 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea31b8f5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea31b8f5 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703349493 +0000 UTC m=+2.026735402,LastTimestamp:2026-03-12 13:20:32.151879311 +0000 UTC m=+2.475265218,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.711720 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"crc.189c1aa0ea32148f\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.189c1aa0ea32148f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:31.703372943 +0000 UTC m=+2.026758862,LastTimestamp:2026-03-12 13:20:32.15190075 +0000 UTC m=+2.475286679,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.722749 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa122884f45 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:32.648548165 +0000 UTC m=+2.971933964,LastTimestamp:2026-03-12 13:20:32.648548165 +0000 UTC m=+2.971933964,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.732360 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa123bf094f openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:32.668911951 +0000 UTC m=+2.992297800,LastTimestamp:2026-03-12 13:20:32.668911951 +0000 UTC m=+2.992297800,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.740114 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa124e0914a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:32.687886666 +0000 UTC m=+3.011272515,LastTimestamp:2026-03-12 13:20:32.687886666 +0000 UTC m=+3.011272515,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.747374 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.189c1aa1266744f3 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d3ae206906481b4831fd849b559269c8,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:32.713491699 +0000 UTC m=+3.036877598,LastTimestamp:2026-03-12 13:20:32.713491699 +0000 UTC m=+3.036877598,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.755540 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa127b877de openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:32.735590366 +0000 UTC m=+3.058997095,LastTimestamp:2026-03-12 13:20:32.735590366 +0000 UTC m=+3.058997095,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.764269 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa195e50e9a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.584006298 +0000 UTC m=+4.907392087,LastTimestamp:2026-03-12 13:20:34.584006298 +0000 UTC m=+4.907392087,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.777292 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa1a15b702d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.776313901 +0000 UTC m=+5.099699800,LastTimestamp:2026-03-12 13:20:34.776313901 +0000 UTC m=+5.099699800,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.785990 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.189c1aa1a363b238 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d3ae206906481b4831fd849b559269c8,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.810409528 +0000 UTC m=+5.133795527,LastTimestamp:2026-03-12 13:20:34.810409528 +0000 UTC m=+5.133795527,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.793596 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1a4fe68d6 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Created,Message:Created container kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.837326038 +0000 UTC m=+5.160711937,LastTimestamp:2026-03-12 13:20:34.837326038 +0000 UTC m=+5.160711937,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.803118 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa1a52bc965 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.840299877 +0000 UTC m=+5.163685759,LastTimestamp:2026-03-12 13:20:34.840299877 +0000 UTC m=+5.163685759,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.811517 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa1a55e89b7 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Created,Message:Created container wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.843625911 +0000 UTC m=+5.167011830,LastTimestamp:2026-03-12 13:20:34.843625911 +0000 UTC m=+5.167011830,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.818015 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa1a6430eee openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.858602222 +0000 UTC m=+5.181988111,LastTimestamp:2026-03-12 13:20:34.858602222 +0000 UTC m=+5.181988111,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.824948 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1a7ba9efc openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Started,Message:Started container kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.8832151 +0000 UTC m=+5.206600989,LastTimestamp:2026-03-12 13:20:34.8832151 +0000 UTC m=+5.206600989,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.832966 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1a7cd023d openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.884420157 +0000 UTC m=+5.207806106,LastTimestamp:2026-03-12 13:20:34.884420157 +0000 UTC m=+5.207806106,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.840752 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa1abde0382 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Started,Message:Started container wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.952643458 +0000 UTC m=+5.276029367,LastTimestamp:2026-03-12 13:20:34.952643458 +0000 UTC m=+5.276029367,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.852501 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa1b3b4a439 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.084149817 +0000 UTC m=+5.407535706,LastTimestamp:2026-03-12 13:20:35.084149817 +0000 UTC m=+5.407535706,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.863602 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa1b476fe4b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.096886859 +0000 UTC m=+5.420272898,LastTimestamp:2026-03-12 13:20:35.096886859 +0000 UTC m=+5.420272898,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.871426 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa1b5095638 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.106477624 +0000 UTC m=+5.429863413,LastTimestamp:2026-03-12 13:20:35.106477624 +0000 UTC m=+5.429863413,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.878451 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1ba86f0bd openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Created,Message:Created container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.198595261 +0000 UTC m=+5.521981170,LastTimestamp:2026-03-12 13:20:35.198595261 +0000 UTC m=+5.521981170,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.885775 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1c2a4b232 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Started,Message:Started container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.334763058 +0000 UTC m=+5.658148857,LastTimestamp:2026-03-12 13:20:35.334763058 +0000 UTC m=+5.658148857,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.892113 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1c2c791ff openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.337048575 +0000 UTC m=+5.660434464,LastTimestamp:2026-03-12 13:20:35.337048575 +0000 UTC m=+5.660434464,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.897943 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.899085 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.189c1aa1d51a6500 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d3ae206906481b4831fd849b559269c8,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.644466432 +0000 UTC m=+5.967852481,LastTimestamp:2026-03-12 13:20:35.644466432 +0000 UTC m=+5.967852481,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.906641 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa1f1739d50 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Created,Message:Created container kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.1200756 +0000 UTC m=+6.443461609,LastTimestamp:2026-03-12 13:20:36.1200756 +0000 UTC m=+6.443461609,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.915473 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.189c1aa1f19153d7 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d3ae206906481b4831fd849b559269c8,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.122022871 +0000 UTC m=+6.445408760,LastTimestamp:2026-03-12 13:20:36.122022871 +0000 UTC m=+6.445408760,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.923127 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa1f1b01f27 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Created,Message:Created container kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.124040999 +0000 UTC m=+6.447426818,LastTimestamp:2026-03-12 13:20:36.124040999 +0000 UTC m=+6.447426818,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.929174 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa1f76092cf openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Started,Message:Started container kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.219491023 +0000 UTC m=+6.542876812,LastTimestamp:2026-03-12 13:20:36.219491023 +0000 UTC m=+6.542876812,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.936209 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa1f784e741 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.221871937 +0000 UTC m=+6.545257826,LastTimestamp:2026-03-12 13:20:36.221871937 +0000 UTC m=+6.545257826,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.942135 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa2033960ba openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Created,Message:Created container kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.41824889 +0000 UTC m=+6.741634939,LastTimestamp:2026-03-12 13:20:36.41824889 +0000 UTC m=+6.741634939,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.953737 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa2035b95f3 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Created,Message:Created container etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.420490739 +0000 UTC m=+6.743876918,LastTimestamp:2026-03-12 13:20:36.420490739 +0000 UTC m=+6.743876918,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.962978 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa206d52993 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Started,Message:Started container kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.478790035 +0000 UTC m=+6.802175824,LastTimestamp:2026-03-12 13:20:36.478790035 +0000 UTC m=+6.802175824,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.970441 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa206f50a69 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:36.480879209 +0000 UTC m=+6.804265258,LastTimestamp:2026-03-12 13:20:36.480879209 +0000 UTC m=+6.804265258,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.979657 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa2281b66af openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Started,Message:Started container etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.037041327 +0000 UTC m=+7.360427356,LastTimestamp:2026-03-12 13:20:37.037041327 +0000 UTC m=+7.360427356,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.988602 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa22de7d19c openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Started,Message:Started container kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.134324124 +0000 UTC m=+7.457710033,LastTimestamp:2026-03-12 13:20:37.134324124 +0000 UTC m=+7.457710033,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:55 crc kubenswrapper[4125]: E0312 13:26:55.997550 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa22df7032e openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.135319854 +0000 UTC m=+7.458705753,LastTimestamp:2026-03-12 13:20:37.135319854 +0000 UTC m=+7.458705753,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.008396 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa22f0434c9 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.152961737 +0000 UTC m=+7.476347616,LastTimestamp:2026-03-12 13:20:37.152961737 +0000 UTC m=+7.476347616,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.016030 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa23f61b73e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Created,Message:Created container kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.427525438 +0000 UTC m=+7.750911327,LastTimestamp:2026-03-12 13:20:37.427525438 +0000 UTC m=+7.750911327,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.023751 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa241ed3104 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Created,Message:Created container kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.470220548 +0000 UTC m=+7.793606557,LastTimestamp:2026-03-12 13:20:37.470220548 +0000 UTC m=+7.793606557,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.033630 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.189c1aa2455e3d91 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d3ae206906481b4831fd849b559269c8,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Created,Message:Created container kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.527960977 +0000 UTC m=+7.851347027,LastTimestamp:2026-03-12 13:20:37.527960977 +0000 UTC m=+7.851347027,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.042242 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa25687c32a openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Started,Message:Started container kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.815894826 +0000 UTC m=+8.139280865,LastTimestamp:2026-03-12 13:20:37.815894826 +0000 UTC m=+8.139280865,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.050231 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa256f077ff openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.822756863 +0000 UTC m=+8.146142752,LastTimestamp:2026-03-12 13:20:37.822756863 +0000 UTC m=+8.146142752,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.057565 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.189c1aa25b2719ae openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d3ae206906481b4831fd849b559269c8,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Started,Message:Started container kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:37.893446062 +0000 UTC m=+8.216831881,LastTimestamp:2026-03-12 13:20:37.893446062 +0000 UTC m=+8.216831881,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.063800 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa2650f279d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Started,Message:Started container kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:38.059648925 +0000 UTC m=+8.383034814,LastTimestamp:2026-03-12 13:20:38.059648925 +0000 UTC m=+8.383034814,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.069329 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa26564b35b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:38.065255259 +0000 UTC m=+8.388641158,LastTimestamp:2026-03-12 13:20:38.065255259 +0000 UTC m=+8.388641158,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.075233 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa26e8fcb0e openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Created,Message:Created container etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:38.219074318 +0000 UTC m=+8.542460227,LastTimestamp:2026-03-12 13:20:38.219074318 +0000 UTC m=+8.542460227,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.078271 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa278c252c1 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Created,Message:Created container kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:38.390158017 +0000 UTC m=+8.713544046,LastTimestamp:2026-03-12 13:20:38.390158017 +0000 UTC m=+8.713544046,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.082378 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa2874ab285 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Started,Message:Started container kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:38.633976453 +0000 UTC m=+8.957362442,LastTimestamp:2026-03-12 13:20:38.633976453 +0000 UTC m=+8.957362442,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.085706 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa29fe14dd5 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Started,Message:Started container etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.046499797 +0000 UTC m=+9.369885686,LastTimestamp:2026-03-12 13:20:39.046499797 +0000 UTC m=+9.369885686,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.090914 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa2aaa98648 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Created,Message:Created container kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.227393608 +0000 UTC m=+9.550779687,LastTimestamp:2026-03-12 13:20:39.227393608 +0000 UTC m=+9.550779687,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.098659 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa2ab355d1d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.236558109 +0000 UTC m=+9.559943919,LastTimestamp:2026-03-12 13:20:39.236558109 +0000 UTC m=+9.559943919,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.104494 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa2ad6ce40d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Created,Message:Created container kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.273751565 +0000 UTC m=+9.597137555,LastTimestamp:2026-03-12 13:20:39.273751565 +0000 UTC m=+9.597137555,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.113425 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.189c1aa2b2927121 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:631cdb37fbb54e809ecc5e719aebd371,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Started,Message:Started container kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.360098593 +0000 UTC m=+9.683484502,LastTimestamp:2026-03-12 13:20:39.360098593 +0000 UTC m=+9.683484502,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.125189 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa2b303c624 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Started,Message:Started container kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.367525924 +0000 UTC m=+9.690911713,LastTimestamp:2026-03-12 13:20:39.367525924 +0000 UTC m=+9.690911713,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.140000 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa2b312db8d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:39.368514445 +0000 UTC m=+9.691900244,LastTimestamp:2026-03-12 13:20:39.368514445 +0000 UTC m=+9.691900244,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.150396 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa2dd384ec9 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Created,Message:Created container etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:40.075611849 +0000 UTC m=+10.398997638,LastTimestamp:2026-03-12 13:20:40.075611849 +0000 UTC m=+10.398997638,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.159418 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa2f7936bb4 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Started,Message:Started container etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:40.517790644 +0000 UTC m=+10.841176663,LastTimestamp:2026-03-12 13:20:40.517790644 +0000 UTC m=+10.841176663,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.167187 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa2f7db38f8 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:40.522496248 +0000 UTC m=+10.845882067,LastTimestamp:2026-03-12 13:20:40.522496248 +0000 UTC m=+10.845882067,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.174925 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa312fb7214 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Created,Message:Created container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:40.977592852 +0000 UTC m=+11.300978791,LastTimestamp:2026-03-12 13:20:40.977592852 +0000 UTC m=+11.300978791,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.190179 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa321e1a01c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Started,Message:Started container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:41.22755894 +0000 UTC m=+11.550944909,LastTimestamp:2026-03-12 13:20:41.22755894 +0000 UTC m=+11.550944909,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.199602 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa321fc0a77 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:41.229290103 +0000 UTC m=+11.552675892,LastTimestamp:2026-03-12 13:20:41.229290103 +0000 UTC m=+11.552675892,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.207346 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa332af1cee openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Created,Message:Created container etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:41.50946123 +0000 UTC m=+11.832847159,LastTimestamp:2026-03-12 13:20:41.50946123 +0000 UTC m=+11.832847159,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.217535 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa33b13c8b8 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Started,Message:Started container etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:41.650276536 +0000 UTC m=+11.973662515,LastTimestamp:2026-03-12 13:20:41.650276536 +0000 UTC m=+11.973662515,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.223740 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa33b359e52 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:41.652493906 +0000 UTC m=+11.975879815,LastTimestamp:2026-03-12 13:20:41.652493906 +0000 UTC m=+11.975879815,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.231247 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa33d15931d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:41.683948317 +0000 UTC m=+12.007334226,LastTimestamp:2026-03-12 13:20:41.683948317 +0000 UTC m=+12.007334226,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.237799 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa3552fd872 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:42.088323186 +0000 UTC m=+12.411709075,LastTimestamp:2026-03-12 13:20:42.088323186 +0000 UTC m=+12.411709075,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.245033 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa36913e8b6 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Created,Message:Created container etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:42.422036662 +0000 UTC m=+12.745423051,LastTimestamp:2026-03-12 13:20:42.422036662 +0000 UTC m=+12.745423051,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.252331 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa379c8359a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Started,Message:Started container etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:42.702288282 +0000 UTC m=+13.025674941,LastTimestamp:2026-03-12 13:20:42.702288282 +0000 UTC m=+13.025674941,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.259783 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa37a015a73 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:42.706033267 +0000 UTC m=+13.029420006,LastTimestamp:2026-03-12 13:20:42.706033267 +0000 UTC m=+13.029420006,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.267488 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-controller-manager-crc.189c1aa39c39cab7 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:43.280157367 +0000 UTC m=+13.603543366,LastTimestamp:2026-03-12 13:20:43.280157367 +0000 UTC m=+13.603543366,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.274032 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa39c3da1ac openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:43.280409004 +0000 UTC m=+13.603794853,LastTimestamp:2026-03-12 13:20:43.280409004 +0000 UTC m=+13.603794853,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.280548 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa39dae63cd openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Created,Message:Created container etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:43.304575949 +0000 UTC m=+13.627961868,LastTimestamp:2026-03-12 13:20:43.304575949 +0000 UTC m=+13.627961868,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.287610 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.189c1aa3a4163d0d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:b2a6a3b2ca08062d24afa4c01aaf9e4f,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Started,Message:Started container etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:43.412045069 +0000 UTC m=+13.735431488,LastTimestamp:2026-03-12 13:20:43.412045069 +0000 UTC m=+13.735431488,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.291439 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f052153b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.280970043 +0000 UTC m=+23.604356192,LastTimestamp:2026-03-12 13:20:53.280970043 +0000 UTC m=+23.604356192,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.297892 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f057c77b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.281343355 +0000 UTC m=+23.604730052,LastTimestamp:2026-03-12 13:20:53.281343355 +0000 UTC m=+23.604730052,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.303606 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-apiserver-crc.189c1aa6479e2518 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:54.745572632 +0000 UTC m=+25.068958781,LastTimestamp:2026-03-12 13:20:54.745572632 +0000 UTC m=+25.068958781,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.310306 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa6479f3b3a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:54.745643834 +0000 UTC m=+25.069029783,LastTimestamp:2026-03-12 13:20:54.745643834 +0000 UTC m=+25.069029783,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.317572 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-apiserver-crc.189c1aa752fcc93a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Mar 12 13:26:56 crc kubenswrapper[4125]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} Mar 12 13:26:56 crc kubenswrapper[4125]: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:59.231291706 +0000 UTC m=+29.554677765,LastTimestamp:2026-03-12 13:20:59.231291706 +0000 UTC m=+29.554677765,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.324781 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa752fe5a05 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:59.231394309 +0000 UTC m=+29.554780208,LastTimestamp:2026-03-12 13:20:59.231394309 +0000 UTC m=+29.554780208,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.330632 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.189c1aa752fcc93a\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-apiserver-crc.189c1aa752fcc93a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Mar 12 13:26:56 crc kubenswrapper[4125]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} Mar 12 13:26:56 crc kubenswrapper[4125]: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:59.231291706 +0000 UTC m=+29.554677765,LastTimestamp:2026-03-12 13:20:59.24372461 +0000 UTC m=+29.567110789,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.340797 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.189c1aa752fe5a05\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.189c1aa752fe5a05 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:59.231394309 +0000 UTC m=+29.554780208,LastTimestamp:2026-03-12 13:20:59.243983808 +0000 UTC m=+29.567369757,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.350767 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-apiserver-crc.189c1aa76f321f20 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:53c1db1508241fbac1bedf9130341ffe,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:17697/healthz": read tcp 192.168.126.11:47506->192.168.126.11:17697: read: connection reset by peer Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:59.704549152 +0000 UTC m=+30.027935121,LastTimestamp:2026-03-12 13:20:59.704549152 +0000 UTC m=+30.027935121,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.357548 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa5f052153b\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f052153b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.280970043 +0000 UTC m=+23.604356192,LastTimestamp:2026-03-12 13:21:03.279622202 +0000 UTC m=+33.603008311,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.365648 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa5f057c77b\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f057c77b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.281343355 +0000 UTC m=+23.604730052,LastTimestamp:2026-03-12 13:21:03.279679213 +0000 UTC m=+33.603065152,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.372603 4125 event.go:346] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa8449c5dd6 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Killing,Message:Container cluster-policy-controller failed startup probe, will be restarted,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:21:03.28505903 +0000 UTC m=+33.608445239,LastTimestamp:2026-03-12 13:21:03.28505903 +0000 UTC m=+33.608445239,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.379127 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa1a7cd023d\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1a7cd023d openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:34.884420157 +0000 UTC m=+5.207806106,LastTimestamp:2026-03-12 13:21:03.540141147 +0000 UTC m=+33.863527446,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: I0312 13:26:56.379716 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.382190 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa1ba86f0bd\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1ba86f0bd openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Created,Message:Created container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.198595261 +0000 UTC m=+5.521981170,LastTimestamp:2026-03-12 13:21:03.937855882 +0000 UTC m=+34.261241891,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.386359 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa1c2a4b232\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa1c2a4b232 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Started,Message:Started container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:35.334763058 +0000 UTC m=+5.658148857,LastTimestamp:2026-03-12 13:21:03.974962794 +0000 UTC m=+34.298348723,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.395302 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa5f052153b\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f052153b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.280970043 +0000 UTC m=+23.604356192,LastTimestamp:2026-03-12 13:21:13.280768219 +0000 UTC m=+43.604155008,Count:3,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.400662 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa5f057c77b\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f057c77b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.281343355 +0000 UTC m=+23.604730052,LastTimestamp:2026-03-12 13:21:13.281038657 +0000 UTC m=+43.604425116,Count:3,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.410412 4125 event.go:346] "Server rejected event (will not retry!)" err="events \"kube-controller-manager-crc.189c1aa5f052153b\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Mar 12 13:26:56 crc kubenswrapper[4125]: &Event{ObjectMeta:{kube-controller-manager-crc.189c1aa5f052153b openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:2eb2b200bca0d10cf0fe16fb7c0caf80,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://192.168.126.11:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Mar 12 13:26:56 crc kubenswrapper[4125]: body: Mar 12 13:26:56 crc kubenswrapper[4125]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:20:53.280970043 +0000 UTC m=+23.604356192,LastTimestamp:2026-03-12 13:21:23.27979731 +0000 UTC m=+53.603184109,Count:4,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Mar 12 13:26:56 crc kubenswrapper[4125]: > Mar 12 13:26:56 crc kubenswrapper[4125]: E0312 13:26:56.895731 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:57 crc kubenswrapper[4125]: I0312 13:26:57.386998 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:57 crc kubenswrapper[4125]: E0312 13:26:57.896076 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:58 crc kubenswrapper[4125]: I0312 13:26:58.386802 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:58 crc kubenswrapper[4125]: E0312 13:26:58.896038 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.025267 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.027678 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.027794 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.028012 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.030976 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:26:59 crc kubenswrapper[4125]: E0312 13:26:59.032608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.386004 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:26:59 crc kubenswrapper[4125]: E0312 13:26:59.520672 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.526373 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.528370 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.528469 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.528500 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:26:59 crc kubenswrapper[4125]: I0312 13:26:59.528544 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:26:59 crc kubenswrapper[4125]: E0312 13:26:59.538224 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Mar 12 13:26:59 crc kubenswrapper[4125]: E0312 13:26:59.895487 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:00 crc kubenswrapper[4125]: I0312 13:27:00.389668 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:00 crc kubenswrapper[4125]: W0312 13:27:00.529251 4125 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: services is forbidden: User "system:anonymous" cannot list resource "services" in API group "" at the cluster scope Mar 12 13:27:00 crc kubenswrapper[4125]: E0312 13:27:00.530482 4125 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:anonymous" cannot list resource "services" in API group "" at the cluster scope Mar 12 13:27:00 crc kubenswrapper[4125]: E0312 13:27:00.895392 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:00 crc kubenswrapper[4125]: E0312 13:27:00.921746 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:01 crc kubenswrapper[4125]: I0312 13:27:01.025219 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:01 crc kubenswrapper[4125]: I0312 13:27:01.029582 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:01 crc kubenswrapper[4125]: I0312 13:27:01.029646 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:01 crc kubenswrapper[4125]: I0312 13:27:01.029674 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:01 crc kubenswrapper[4125]: I0312 13:27:01.033234 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:27:01 crc kubenswrapper[4125]: E0312 13:27:01.034710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:27:01 crc kubenswrapper[4125]: I0312 13:27:01.387500 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:01 crc kubenswrapper[4125]: E0312 13:27:01.895088 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:02 crc kubenswrapper[4125]: E0312 13:27:02.207497 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:27:02 crc kubenswrapper[4125]: I0312 13:27:02.388453 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:02 crc kubenswrapper[4125]: E0312 13:27:02.895412 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:03 crc kubenswrapper[4125]: I0312 13:27:03.387268 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:03 crc kubenswrapper[4125]: I0312 13:27:03.532552 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Mar 12 13:27:03 crc kubenswrapper[4125]: I0312 13:27:03.581050 4125 reflector.go:351] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:27:03 crc kubenswrapper[4125]: E0312 13:27:03.895551 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:04 crc kubenswrapper[4125]: I0312 13:27:04.388502 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:04 crc kubenswrapper[4125]: E0312 13:27:04.896098 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:05 crc kubenswrapper[4125]: I0312 13:27:05.388563 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:05 crc kubenswrapper[4125]: E0312 13:27:05.895620 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:06 crc kubenswrapper[4125]: I0312 13:27:06.387007 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:06 crc kubenswrapper[4125]: E0312 13:27:06.536103 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Mar 12 13:27:06 crc kubenswrapper[4125]: I0312 13:27:06.539126 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:06 crc kubenswrapper[4125]: I0312 13:27:06.541181 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:06 crc kubenswrapper[4125]: I0312 13:27:06.541279 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:06 crc kubenswrapper[4125]: I0312 13:27:06.541314 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:06 crc kubenswrapper[4125]: I0312 13:27:06.541944 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:27:06 crc kubenswrapper[4125]: E0312 13:27:06.554315 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Mar 12 13:27:06 crc kubenswrapper[4125]: E0312 13:27:06.894800 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:07 crc kubenswrapper[4125]: I0312 13:27:07.384583 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:07 crc kubenswrapper[4125]: I0312 13:27:07.499147 4125 csr.go:261] certificate signing request csr-rgmm9 is approved, waiting to be issued Mar 12 13:27:07 crc kubenswrapper[4125]: E0312 13:27:07.895667 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:08 crc kubenswrapper[4125]: I0312 13:27:08.386691 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:08 crc kubenswrapper[4125]: E0312 13:27:08.896083 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:09 crc kubenswrapper[4125]: I0312 13:27:09.386423 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:09 crc kubenswrapper[4125]: E0312 13:27:09.894992 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:10 crc kubenswrapper[4125]: I0312 13:27:10.388416 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:10 crc kubenswrapper[4125]: E0312 13:27:10.896972 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:10 crc kubenswrapper[4125]: E0312 13:27:10.922601 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:11 crc kubenswrapper[4125]: I0312 13:27:11.025297 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:11 crc kubenswrapper[4125]: I0312 13:27:11.028070 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:11 crc kubenswrapper[4125]: I0312 13:27:11.028390 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:11 crc kubenswrapper[4125]: I0312 13:27:11.028584 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:11 crc kubenswrapper[4125]: I0312 13:27:11.387302 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:11 crc kubenswrapper[4125]: E0312 13:27:11.896048 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:12 crc kubenswrapper[4125]: I0312 13:27:12.024772 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:12 crc kubenswrapper[4125]: I0312 13:27:12.028792 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:12 crc kubenswrapper[4125]: I0312 13:27:12.029072 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:12 crc kubenswrapper[4125]: I0312 13:27:12.029108 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:12 crc kubenswrapper[4125]: I0312 13:27:12.031440 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:27:12 crc kubenswrapper[4125]: E0312 13:27:12.032322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:27:12 crc kubenswrapper[4125]: E0312 13:27:12.209302 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:27:12 crc kubenswrapper[4125]: I0312 13:27:12.415683 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:12 crc kubenswrapper[4125]: E0312 13:27:12.896227 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.025724 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.029380 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.030028 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.030069 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.034551 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:27:13 crc kubenswrapper[4125]: E0312 13:27:13.036195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.389387 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.554963 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:13 crc kubenswrapper[4125]: E0312 13:27:13.555638 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.559055 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.559138 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.559159 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:13 crc kubenswrapper[4125]: I0312 13:27:13.559192 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:27:13 crc kubenswrapper[4125]: E0312 13:27:13.571715 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Mar 12 13:27:13 crc kubenswrapper[4125]: E0312 13:27:13.895790 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:14 crc kubenswrapper[4125]: I0312 13:27:14.390001 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:14 crc kubenswrapper[4125]: E0312 13:27:14.895564 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:15 crc kubenswrapper[4125]: I0312 13:27:15.386638 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:15 crc kubenswrapper[4125]: E0312 13:27:15.896062 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:16 crc kubenswrapper[4125]: I0312 13:27:16.378228 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:16 crc kubenswrapper[4125]: E0312 13:27:16.896081 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:17 crc kubenswrapper[4125]: I0312 13:27:17.386963 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:17 crc kubenswrapper[4125]: E0312 13:27:17.895316 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:18 crc kubenswrapper[4125]: I0312 13:27:18.026163 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:18 crc kubenswrapper[4125]: I0312 13:27:18.040915 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:18 crc kubenswrapper[4125]: I0312 13:27:18.041100 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:18 crc kubenswrapper[4125]: I0312 13:27:18.041136 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:18 crc kubenswrapper[4125]: I0312 13:27:18.388571 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:18 crc kubenswrapper[4125]: E0312 13:27:18.895367 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:19 crc kubenswrapper[4125]: I0312 13:27:19.384347 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:19 crc kubenswrapper[4125]: E0312 13:27:19.910280 4125 transport.go:123] "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials." lastCertificateAvailabilityTime="2026-03-12 13:20:30.893046853 +0000 UTC m=+1.216434212" shutdownThreshold="5m0s" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.380760 4125 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.572572 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.574365 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.574430 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.574456 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.574507 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:27:20 crc kubenswrapper[4125]: E0312 13:27:20.580097 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Mar 12 13:27:20 crc kubenswrapper[4125]: E0312 13:27:20.580159 4125 kubelet_node_status.go:100] "Unable to register node with API server" err="nodes is forbidden: User \"system:anonymous\" cannot create resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.788422 4125 csr.go:257] certificate signing request csr-rgmm9 is issued Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.817962 4125 reconstruct_new.go:210] "DevicePaths of reconstructed volumes updated" Mar 12 13:27:20 crc kubenswrapper[4125]: I0312 13:27:20.895764 4125 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Mar 12 13:27:21 crc kubenswrapper[4125]: I0312 13:27:21.790612 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-05-30 19:56:00.799246359 +0000 UTC Mar 12 13:27:21 crc kubenswrapper[4125]: I0312 13:27:21.790676 4125 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1902h28m39.008574706s for next certificate rotation Mar 12 13:27:22 crc kubenswrapper[4125]: E0312 13:27:22.210987 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:27:23 crc kubenswrapper[4125]: I0312 13:27:23.021201 4125 reflector.go:351] Caches populated for *v1.RuntimeClass from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.025254 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.026084 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.027248 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.027306 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.027328 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.028865 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.029363 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.029432 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.029448 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.029899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.030861 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.031258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.581199 4125 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.583562 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.583663 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.583686 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.583891 4125 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.602712 4125 kubelet_node_status.go:116] "Node was previously registered" node="crc" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.603157 4125 kubelet_node_status.go:80] "Successfully registered node" node="crc" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.608527 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.608590 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.608611 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.608638 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.608920 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:27Z","lastTransitionTime":"2026-03-12T13:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.624078 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.630017 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.630142 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.630168 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.630195 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.630222 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:27Z","lastTransitionTime":"2026-03-12T13:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.645448 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.652085 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.652154 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.652179 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.652216 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.652260 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:27Z","lastTransitionTime":"2026-03-12T13:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.665604 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.672396 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.672635 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.672669 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.672715 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.672769 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:27Z","lastTransitionTime":"2026-03-12T13:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.685121 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.691590 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.691654 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.691672 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.691693 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:27 crc kubenswrapper[4125]: I0312 13:27:27.691722 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:27Z","lastTransitionTime":"2026-03-12T13:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.712046 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.712284 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.712497 4125 kubelet_node_status.go:512] "Error getting the current node from lister" err="node \"crc\" not found" Mar 12 13:27:27 crc kubenswrapper[4125]: E0312 13:27:27.813091 4125 kubelet_node_status.go:506] "Node not becoming ready in time after startup" Mar 12 13:27:31 crc kubenswrapper[4125]: I0312 13:27:31.416363 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:27:31 crc kubenswrapper[4125]: I0312 13:27:31.416533 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:27:31 crc kubenswrapper[4125]: I0312 13:27:31.416594 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:27:31 crc kubenswrapper[4125]: I0312 13:27:31.416674 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:27:31 crc kubenswrapper[4125]: I0312 13:27:31.416745 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:27:32 crc kubenswrapper[4125]: E0312 13:27:32.162458 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:27:32 crc kubenswrapper[4125]: E0312 13:27:32.211736 4125 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 12 13:27:37 crc kubenswrapper[4125]: E0312 13:27:37.165039 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:27:37 crc kubenswrapper[4125]: I0312 13:27:37.396369 4125 reflector.go:351] Caches populated for *v1.Service from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.037078 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.037222 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.037259 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.037305 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.037344 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:38Z","lastTransitionTime":"2026-03-12T13:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.060457 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.070204 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.070308 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.070344 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.070386 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.070430 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:38Z","lastTransitionTime":"2026-03-12T13:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.093264 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.103035 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.103203 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.103244 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.103285 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.103326 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:38Z","lastTransitionTime":"2026-03-12T13:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.121922 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.130685 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.130745 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.130765 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.130788 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.130904 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:38Z","lastTransitionTime":"2026-03-12T13:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.139710 4125 reflector.go:351] Caches populated for *v1.CSIDriver from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.153175 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.163567 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.164162 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.164205 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.164245 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.164285 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:38Z","lastTransitionTime":"2026-03-12T13:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.196568 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.196639 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.206538 4125 reflector.go:351] Caches populated for *v1.Node from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.531286 4125 apiserver.go:52] "Watching apiserver" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.565363 4125 reflector.go:351] Caches populated for *v1.Pod from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.569372 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","hostpath-provisioner/csi-hostpathplugin-hvm8g","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/machine-config-daemon-zpnhg","openshift-marketplace/certified-operators-7287f","openshift-network-operator/iptables-alerter-wwpnd","openshift-ingress/router-default-5c9bf7bc58-6jctv","openshift-ingress-canary/ingress-canary-2vhcn","openshift-console/console-84fccc7b6-mkncc","openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-network-node-identity/network-node-identity-7xghp","openshift-console-operator/console-operator-5dbbc74dc9-cp5cd","openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7","openshift-marketplace/marketplace-operator-8b455464d-f9xdt","openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh","openshift-dns-operator/dns-operator-75f687757b-nz2xb","openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm","openshift-machine-config-operator/machine-config-server-v65wr","openshift-network-diagnostics/network-check-target-v54bt","openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b","openshift-authentication/oauth-openshift-765b47f944-n2lhl","openshift-marketplace/redhat-marketplace-8s8pc","openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz","openshift-etcd-operator/etcd-operator-768d5b5d86-722mg","openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv","openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t","openshift-multus/multus-admission-controller-6c7c885997-4hbbc","openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46","openshift-dns/node-resolver-dn27q","openshift-image-registry/node-ca-l92hr","openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv","openshift-marketplace/redhat-marketplace-rmwfn","openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd","openshift-service-ca/service-ca-666f99b6f-vlbxv","openshift-apiserver/apiserver-67cbf64bc9-mtx25","openshift-console/downloads-65476884b9-9wcvx","openshift-etcd/etcd-crc","openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw","openshift-marketplace/redhat-operators-f4jkp","openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz","openshift-console-operator/console-conversion-webhook-595f9969b-l6z49","openshift-image-registry/image-registry-585546dd8b-v5m4t","openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb","openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5","openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8","openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7","openshift-multus/multus-additional-cni-plugins-bzj2p","openshift-multus/multus-q88th","openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc","openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z","openshift-multus/network-metrics-daemon-qdfr4","openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf","openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2","openshift-ovn-kubernetes/ovnkube-node-44qcg","openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m","openshift-controller-manager/controller-manager-6ff78978b4-q4vv8","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb","openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh","openshift-marketplace/community-operators-8jhz6","openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7","openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg","openshift-dns/dns-default-gbw49","openshift-kube-controller-manager/revision-pruner-8-crc","openshift-network-operator/network-operator-767c585db5-zd56b","openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9"] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.569539 4125 topology_manager.go:215] "Topology Admit Handler" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" podNamespace="openshift-operator-lifecycle-manager" podName="olm-operator-6d8474f75f-x54mh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.570268 4125 topology_manager.go:215] "Topology Admit Handler" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" podNamespace="openshift-machine-config-operator" podName="machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.570463 4125 topology_manager.go:215] "Topology Admit Handler" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" podNamespace="openshift-kube-apiserver-operator" podName="kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.570749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.570967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.571090 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.571323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.570752 4125 topology_manager.go:215] "Topology Admit Handler" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" podNamespace="openshift-etcd-operator" podName="etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.571922 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.572087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.572282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.572191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.572494 4125 topology_manager.go:215] "Topology Admit Handler" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" podNamespace="openshift-operator-lifecycle-manager" podName="package-server-manager-84d578d794-jw7r2" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.572762 4125 topology_manager.go:215] "Topology Admit Handler" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" podNamespace="openshift-operator-lifecycle-manager" podName="catalog-operator-857456c46-7f5wf" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.573703 4125 topology_manager.go:215] "Topology Admit Handler" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" podNamespace="openshift-marketplace" podName="marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.573973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.573210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.574463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.574038 4125 topology_manager.go:215] "Topology Admit Handler" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" podNamespace="openshift-service-ca-operator" podName="service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.574742 4125 topology_manager.go:215] "Topology Admit Handler" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" podNamespace="openshift-machine-api" podName="machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.575076 4125 topology_manager.go:215] "Topology Admit Handler" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" podNamespace="openshift-network-operator" podName="network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.575244 4125 topology_manager.go:215] "Topology Admit Handler" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" podNamespace="openshift-config-operator" podName="openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.576065 4125 topology_manager.go:215] "Topology Admit Handler" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" podNamespace="openshift-authentication-operator" podName="authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.576250 4125 topology_manager.go:215] "Topology Admit Handler" podUID="71af81a9-7d43-49b2-9287-c375900aa905" podNamespace="openshift-kube-scheduler-operator" podName="openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.576399 4125 topology_manager.go:215] "Topology Admit Handler" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" podNamespace="openshift-controller-manager-operator" podName="openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.576748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.576766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.574244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.574201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.577248 4125 topology_manager.go:215] "Topology Admit Handler" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" podNamespace="openshift-apiserver-operator" podName="openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.579195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.579358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.579515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.584461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.584636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.584743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.584963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.585262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.585331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.585454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.585947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.586167 4125 topology_manager.go:215] "Topology Admit Handler" podUID="10603adc-d495-423c-9459-4caa405960bb" podNamespace="openshift-dns-operator" podName="dns-operator-75f687757b-nz2xb" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.586341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.586437 4125 topology_manager.go:215] "Topology Admit Handler" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" podNamespace="openshift-kube-storage-version-migrator-operator" podName="kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.586466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.587119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.587222 4125 topology_manager.go:215] "Topology Admit Handler" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" podNamespace="openshift-machine-api" podName="control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.587267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.587424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.587527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.587586 4125 topology_manager.go:215] "Topology Admit Handler" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" podNamespace="openshift-kube-controller-manager-operator" podName="kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.587632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.587785 4125 topology_manager.go:215] "Topology Admit Handler" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" podNamespace="openshift-image-registry" podName="cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.588031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.588160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.588179 4125 topology_manager.go:215] "Topology Admit Handler" podUID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" podNamespace="openshift-multus" podName="multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.588391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.588522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.588637 4125 topology_manager.go:215] "Topology Admit Handler" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" podNamespace="openshift-multus" podName="multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.588770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.589060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.589176 4125 topology_manager.go:215] "Topology Admit Handler" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" podNamespace="openshift-multus" podName="network-metrics-daemon-qdfr4" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.589405 4125 topology_manager.go:215] "Topology Admit Handler" podUID="410cf605-1970-4691-9c95-53fdc123b1f3" podNamespace="openshift-ovn-kubernetes" podName="ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.589547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.589643 4125 topology_manager.go:215] "Topology Admit Handler" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" podNamespace="openshift-network-diagnostics" podName="network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.589679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.590741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.592292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.592547 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.592924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.593797 4125 topology_manager.go:215] "Topology Admit Handler" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" podNamespace="openshift-network-diagnostics" podName="network-check-target-v54bt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.594221 4125 topology_manager.go:215] "Topology Admit Handler" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" podNamespace="openshift-network-node-identity" podName="network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.595883 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.596088 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.596610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.596776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.597278 4125 topology_manager.go:215] "Topology Admit Handler" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" podNamespace="openshift-ovn-kubernetes" podName="ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.597606 4125 topology_manager.go:215] "Topology Admit Handler" podUID="2b6d14a5-ca00-40c7-af7a-051a98a24eed" podNamespace="openshift-network-operator" podName="iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.598234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.598262 4125 topology_manager.go:215] "Topology Admit Handler" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" podNamespace="openshift-kube-storage-version-migrator" podName="migrator-f7c6d88df-q2fnv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.598427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.598587 4125 topology_manager.go:215] "Topology Admit Handler" podUID="378552fd-5e53-4882-87ff-95f3d9198861" podNamespace="openshift-service-ca" podName="service-ca-666f99b6f-vlbxv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.598926 4125 topology_manager.go:215] "Topology Admit Handler" podUID="6a23c0ee-5648-448c-b772-83dced2891ce" podNamespace="openshift-dns" podName="node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.599239 4125 topology_manager.go:215] "Topology Admit Handler" podUID="13045510-8717-4a71-ade4-be95a76440a7" podNamespace="openshift-dns" podName="dns-default-gbw49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.599631 4125 topology_manager.go:215] "Topology Admit Handler" podUID="9fb762d1-812f-43f1-9eac-68034c1ecec7" podNamespace="openshift-cluster-version" podName="cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.600162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.600290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.600353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.600731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.600937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.601274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.601574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.601740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.605675 4125 topology_manager.go:215] "Topology Admit Handler" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" podNamespace="openshift-oauth-apiserver" podName="apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.606157 4125 topology_manager.go:215] "Topology Admit Handler" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" podNamespace="openshift-operator-lifecycle-manager" podName="packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.606445 4125 topology_manager.go:215] "Topology Admit Handler" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" podNamespace="openshift-ingress-operator" podName="ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.606694 4125 topology_manager.go:215] "Topology Admit Handler" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" podNamespace="openshift-cluster-samples-operator" podName="cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.607213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.607257 4125 topology_manager.go:215] "Topology Admit Handler" podUID="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" podNamespace="openshift-cluster-machine-approver" podName="machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.607357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.607453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.607575 4125 topology_manager.go:215] "Topology Admit Handler" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" podNamespace="openshift-ingress" podName="router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.607953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.607973 4125 topology_manager.go:215] "Topology Admit Handler" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" podNamespace="openshift-machine-config-operator" podName="machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.608145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.608952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.609241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.609559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.609598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.609649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.609704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.608347 4125 topology_manager.go:215] "Topology Admit Handler" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" podNamespace="openshift-console-operator" podName="console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.611728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.612070 4125 topology_manager.go:215] "Topology Admit Handler" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" podNamespace="openshift-console-operator" podName="console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.612405 4125 topology_manager.go:215] "Topology Admit Handler" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" podNamespace="openshift-machine-config-operator" podName="machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.612732 4125 topology_manager.go:215] "Topology Admit Handler" podUID="6268b7fe-8910-4505-b404-6f1df638105c" podNamespace="openshift-console" podName="downloads-65476884b9-9wcvx" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.613247 4125 topology_manager.go:215] "Topology Admit Handler" podUID="bf1a8b70-3856-486f-9912-a2de1d57c3fb" podNamespace="openshift-machine-config-operator" podName="machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.613542 4125 topology_manager.go:215] "Topology Admit Handler" podUID="f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e" podNamespace="openshift-image-registry" podName="node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.613936 4125 topology_manager.go:215] "Topology Admit Handler" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" podNamespace="openshift-ingress-canary" podName="ingress-canary-2vhcn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.614332 4125 topology_manager.go:215] "Topology Admit Handler" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" podNamespace="openshift-multus" podName="multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.614627 4125 topology_manager.go:215] "Topology Admit Handler" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" podNamespace="hostpath-provisioner" podName="csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.615178 4125 topology_manager.go:215] "Topology Admit Handler" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" podNamespace="openshift-image-registry" podName="image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.615546 4125 topology_manager.go:215] "Topology Admit Handler" podUID="13ad7555-5f28-4555-a563-892713a8433a" podNamespace="openshift-authentication" podName="oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.616041 4125 topology_manager.go:215] "Topology Admit Handler" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" podNamespace="openshift-controller-manager" podName="controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.616388 4125 topology_manager.go:215] "Topology Admit Handler" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" podNamespace="openshift-console" podName="console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.616800 4125 topology_manager.go:215] "Topology Admit Handler" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.618638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.619103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.619275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.619369 4125 topology_manager.go:215] "Topology Admit Handler" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" podNamespace="openshift-apiserver" podName="apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.618409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.619465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.619561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.619572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.619562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.619607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.619787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.619941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.620174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.620295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.622185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.622262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.622667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.622737 4125 topology_manager.go:215] "Topology Admit Handler" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" podNamespace="openshift-marketplace" podName="certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.623921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.624077 4125 topology_manager.go:215] "Topology Admit Handler" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" podNamespace="openshift-marketplace" podName="community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.624624 4125 topology_manager.go:215] "Topology Admit Handler" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" podNamespace="openshift-marketplace" podName="redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.625465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.627420 4125 topology_manager.go:215] "Topology Admit Handler" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" podNamespace="openshift-marketplace" podName="redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.631946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.634436 4125 topology_manager.go:215] "Topology Admit Handler" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" podNamespace="openshift-marketplace" podName="redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.634789 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.635342 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.623235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.640568 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.640671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.627976 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.640902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.629588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.641190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.635618 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.641250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.637175 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.639785 4125 topology_manager.go:215] "Topology Admit Handler" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.641642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.627913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.641939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.642146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.642344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.643351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.643638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.640165 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.645699 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.646095 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.649133 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.650194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.650352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.650476 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.650599 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.651702 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.652200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.655103 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.655370 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.655629 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.655170 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.668175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.669468 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.670594 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.671747 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.672202 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.669321 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.672603 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673037 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673292 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673327 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673516 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673580 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673702 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673738 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673954 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.674029 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.674193 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.674329 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.674408 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.674485 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.674771 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.675244 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.675533 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.675691 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-host\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.675799 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676113 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.675538 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.175206693 +0000 UTC m=+429.498592722 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676369 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676427 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676467 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f9495\" (UniqueName: \"kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676497 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-plugins-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676530 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-system-cni-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676561 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676595 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/cc291782-27d2-4a74-af79-c7dcb31535d2-host-etc-kube\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676627 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd556935-a077-45df-ba3f-d42c39326ccd-tmpfs\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676659 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6z2n9\" (UniqueName: \"kubernetes.io/projected/bf1a8b70-3856-486f-9912-a2de1d57c3fb-kube-api-access-6z2n9\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676689 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.676726 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.678680 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.678798 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.178768506 +0000 UTC m=+429.502154585 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.673516 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.680736 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.680973 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.681402 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.681438 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.681781 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.682100 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.682250 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.682319 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.182302645 +0000 UTC m=+429.505688704 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.681618 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.681655 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.682590 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.682681 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.182663023 +0000 UTC m=+429.506048892 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.682730 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.682769 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.182761656 +0000 UTC m=+429.506147415 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.683902 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.683942 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.183933135 +0000 UTC m=+429.507319014 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.689758 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd556935-a077-45df-ba3f-d42c39326ccd-tmpfs\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.694386 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.694592 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.694633 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gsxd9\" (UniqueName: \"kubernetes.io/projected/6a23c0ee-5648-448c-b772-83dced2891ce-kube-api-access-gsxd9\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.694651 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.694661 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.694696 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.194682362 +0000 UTC m=+429.518068271 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.694538 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.694760 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.194743933 +0000 UTC m=+429.518129832 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.694930 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.694966 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.19495764 +0000 UTC m=+429.518343679 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695013 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-socket-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695127 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4qr9t\" (UniqueName: \"kubernetes.io/projected/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-kube-api-access-4qr9t\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695153 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-certs\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695176 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-etc-kubernetes\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695205 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-c2f8t\" (UniqueName: \"kubernetes.io/projected/475321a1-8b7e-4033-8f72-b05a8b377347-kube-api-access-c2f8t\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695225 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6a23c0ee-5648-448c-b772-83dced2891ce-hosts-file\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695263 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-env-overrides\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695288 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-v45vm\" (UniqueName: \"kubernetes.io/projected/aa90b3c2-febd-4588-a063-7fbbe82f00c1-kube-api-access-v45vm\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695310 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9d0dcce3-d96e-48cb-9b9f-362105911589-mcd-auth-proxy-config\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695334 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695360 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695386 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695410 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695447 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cc291782-27d2-4a74-af79-c7dcb31535d2-metrics-tls\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695468 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa90b3c2-febd-4588-a063-7fbbe82f00c1-service-ca-bundle\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695492 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695516 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695563 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695584 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695606 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zjg2w\" (UniqueName: \"kubernetes.io/projected/51a02bbf-2d40-4f84-868a-d399ea18a846-kube-api-access-zjg2w\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695631 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695663 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695689 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-env-overrides\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695713 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-ovnkube-identity-cm\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695734 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695759 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-auth-proxy-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695783 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695805 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695883 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695906 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.695939 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-csi-data-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696011 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696043 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-ssl-certs\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696066 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb762d1-812f-43f1-9eac-68034c1ecec7-serving-cert\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696089 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696112 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696134 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696155 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696198 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696219 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696241 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696261 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4sfhc\" (UniqueName: \"kubernetes.io/projected/cc291782-27d2-4a74-af79-c7dcb31535d2-kube-api-access-4sfhc\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696286 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696310 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696332 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-default-certificate\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696359 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696381 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696403 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696424 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696447 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rkkfv\" (UniqueName: \"kubernetes.io/projected/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-kube-api-access-rkkfv\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696472 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696495 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696517 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696542 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8svnk\" (UniqueName: \"kubernetes.io/projected/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-kube-api-access-8svnk\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696564 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696589 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-auth-proxy-config\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696612 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/410cf605-1970-4691-9c95-53fdc123b1f3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696635 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696665 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vtgqn\" (UniqueName: \"kubernetes.io/projected/297ab9b6-2186-4d5b-a952-2bfd59af63c4-kube-api-access-vtgqn\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696707 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696730 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-bin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696752 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696774 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-serviceca\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696796 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696865 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696894 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696918 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696940 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-cnibin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.696961 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-socket-dir-parent\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697010 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/530553aa-0a1d-423e-8a22-f5eb4bdbb883-available-featuregates\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697039 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697063 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697085 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-conf-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697107 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697131 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697152 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697199 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697219 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697241 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697262 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-registration-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697286 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697307 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-multus\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697330 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697351 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-os-release\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697372 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-stats-auth\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697395 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697419 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xkzjk\" (UniqueName: \"kubernetes.io/projected/9d0dcce3-d96e-48cb-9b9f-362105911589-kube-api-access-xkzjk\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697442 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697464 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697486 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9d0dcce3-d96e-48cb-9b9f-362105911589-proxy-tls\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697508 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/2b6d14a5-ca00-40c7-af7a-051a98a24eed-iptables-alerter-script\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697530 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697555 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697577 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697600 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/297ab9b6-2186-4d5b-a952-2bfd59af63c4-mcc-auth-proxy-config\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697621 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697645 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697670 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2b6d14a5-ca00-40c7-af7a-051a98a24eed-host-slash\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697698 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697721 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-bound-sa-token\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697746 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697767 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j4qn7\" (UniqueName: \"kubernetes.io/projected/2b6d14a5-ca00-40c7-af7a-051a98a24eed-kube-api-access-j4qn7\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.697791 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.701709 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.701765 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.201747293 +0000 UTC m=+429.525133262 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703304 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/13045510-8717-4a71-ade4-be95a76440a7-kube-api-access-dtjml\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703342 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-system-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703377 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703403 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703450 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703474 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703502 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-cni-binary-copy\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703527 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-multus-certs\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703551 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703573 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cnibin\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703601 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-dir\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703627 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703648 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-kubelet\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703683 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-bound-sa-token\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703710 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703734 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703754 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703793 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-k8s-cni-cncf-io\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703875 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bwbqm\" (UniqueName: \"kubernetes.io/projected/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-kube-api-access-bwbqm\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703912 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703948 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-netns\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.703977 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704037 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-node-bootstrap-token\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704067 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704095 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704118 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704146 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb762d1-812f-43f1-9eac-68034c1ecec7-kube-api-access\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704178 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-binary-copy\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704205 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704232 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704255 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-mountpoint-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704278 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704304 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-os-release\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704327 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704350 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9d0dcce3-d96e-48cb-9b9f-362105911589-rootfs\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704375 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51a02bbf-2d40-4f84-868a-d399ea18a846-webhook-cert\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704399 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-metrics-certs\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704422 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704446 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704481 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704503 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704524 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-hostroot\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704550 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-multus-daemon-config\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704575 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704601 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704627 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9x6dp\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-kube-api-access-9x6dp\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704652 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704677 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7jw8\" (UniqueName: \"kubernetes.io/projected/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-kube-api-access-d7jw8\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704703 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704726 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-machine-approver-tls\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704751 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704774 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.704802 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705013 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705044 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705070 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705096 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705121 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bwvjb\" (UniqueName: \"kubernetes.io/projected/120b38dc-8236-4fa6-a452-642b8ad738ee-kube-api-access-bwvjb\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705146 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-ovnkube-config\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705172 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705195 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705219 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.705247 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.709167 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.709204 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.709276 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-cx4f9\" (UniqueName: \"kubernetes.io/projected/410cf605-1970-4691-9c95-53fdc123b1f3-kube-api-access-cx4f9\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.709309 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb762d1-812f-43f1-9eac-68034c1ecec7-service-ca\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.709336 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.709556 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.709634 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.209613867 +0000 UTC m=+429.532999766 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.709867 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.710034 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.710081 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.210067866 +0000 UTC m=+429.533453755 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.710121 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.710144 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.210137108 +0000 UTC m=+429.533523047 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.710278 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.710305 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.210297823 +0000 UTC m=+429.533683762 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.711038 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.711130 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.21110966 +0000 UTC m=+429.534495449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.712727 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.715903 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-stats-auth\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.717020 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.216970394 +0000 UTC m=+429.540356293 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.717497 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.717612 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.717650 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.217641467 +0000 UTC m=+429.541027246 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.718340 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9d0dcce3-d96e-48cb-9b9f-362105911589-mcd-auth-proxy-config\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.718405 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.718438 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.218427852 +0000 UTC m=+429.541813631 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.718900 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9495\" (UniqueName: \"kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.718976 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.719045 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.219033973 +0000 UTC m=+429.542419832 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.719104 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.719136 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.219129006 +0000 UTC m=+429.542514885 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.719335 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-env-overrides\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.721108 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.721259 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.221244022 +0000 UTC m=+429.544629972 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.722153 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-env-overrides\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.722413 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.722483 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.222469243 +0000 UTC m=+429.545855122 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.722918 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.722977 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.222964979 +0000 UTC m=+429.546350858 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.724661 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.724903 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.224890752 +0000 UTC m=+429.548276621 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.727351 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.728934 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-auth-proxy-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.729024 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747343 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.24731219 +0000 UTC m=+429.570698079 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.730187 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-auth-proxy-config\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.733336 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-ovnkube-identity-cm\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.737692 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747432 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247423884 +0000 UTC m=+429.570809763 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.737756 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747497 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247490287 +0000 UTC m=+429.570876176 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.737791 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747534 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247528468 +0000 UTC m=+429.570914357 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.738042 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747574 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247565129 +0000 UTC m=+429.570951028 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.742075 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747612 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.24760606 +0000 UTC m=+429.570991829 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.742113 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747646 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247640771 +0000 UTC m=+429.571026650 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.742544 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa90b3c2-febd-4588-a063-7fbbe82f00c1-service-ca-bundle\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.743049 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747700 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247694193 +0000 UTC m=+429.571080042 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.743796 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/297ab9b6-2186-4d5b-a952-2bfd59af63c4-mcc-auth-proxy-config\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.743912 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747755 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247749475 +0000 UTC m=+429.571135244 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.744142 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747795 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247789686 +0000 UTC m=+429.571175455 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.744282 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747885 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.247876939 +0000 UTC m=+429.571262718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.744547 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.747920 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.24791418 +0000 UTC m=+429.571299949 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.750920 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-serviceca\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.751501 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.751552 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.251535799 +0000 UTC m=+429.574921588 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.752919 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z2n9\" (UniqueName: \"kubernetes.io/projected/bf1a8b70-3856-486f-9912-a2de1d57c3fb-kube-api-access-6z2n9\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.754109 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-cni-binary-copy\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754191 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754225 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.254215417 +0000 UTC m=+429.577601296 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754290 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754354 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.25430876 +0000 UTC m=+429.577694659 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754546 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754606 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.25459667 +0000 UTC m=+429.577982559 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754649 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.754672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.254665762 +0000 UTC m=+429.578051651 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.755533 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.762904 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/2b6d14a5-ca00-40c7-af7a-051a98a24eed-iptables-alerter-script\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.762978 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.763216 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.263192871 +0000 UTC m=+429.586578920 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.763401 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-default-certificate\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.763937 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2f8t\" (UniqueName: \"kubernetes.io/projected/475321a1-8b7e-4033-8f72-b05a8b377347-kube-api-access-c2f8t\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.763943 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-certs\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.767187 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.767256 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.267244404 +0000 UTC m=+429.590630183 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.767305 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.767331 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.267323687 +0000 UTC m=+429.590709456 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.768078 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.768250 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.268228781 +0000 UTC m=+429.591614570 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.769227 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-binary-copy\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.769322 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.769371 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.269356306 +0000 UTC m=+429.592742175 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.769456 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.769483 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.2694763 +0000 UTC m=+429.592862079 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.769536 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.769561 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.269553222 +0000 UTC m=+429.592939001 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.768665 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.770475 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.270463404 +0000 UTC m=+429.593849183 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.770959 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.771918 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.271902761 +0000 UTC m=+429.595288600 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.772113 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.772251 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.272237533 +0000 UTC m=+429.595623432 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.772467 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.772769 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.272751278 +0000 UTC m=+429.596137188 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.774179 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-multus-daemon-config\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.775314 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51a02bbf-2d40-4f84-868a-d399ea18a846-webhook-cert\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.775631 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb762d1-812f-43f1-9eac-68034c1ecec7-service-ca\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.775700 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.775736 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.275726825 +0000 UTC m=+429.599112594 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.775971 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.776042 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.276028284 +0000 UTC m=+429.599414123 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.778032 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.780025 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.280009625 +0000 UTC m=+429.603395414 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.780143 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.780175 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.28016692 +0000 UTC m=+429.603552689 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.784478 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.784907 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.284805155 +0000 UTC m=+429.608191024 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.786414 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.786577 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.786685 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.286674608 +0000 UTC m=+429.610060507 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.787455 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.787614 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.287603588 +0000 UTC m=+429.610989367 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.788655 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/410cf605-1970-4691-9c95-53fdc123b1f3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.789069 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cc291782-27d2-4a74-af79-c7dcb31535d2-metrics-tls\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.789299 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.789626 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-node-bootstrap-token\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.790048 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.790304 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb762d1-812f-43f1-9eac-68034c1ecec7-serving-cert\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.790401 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.790473 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.290460319 +0000 UTC m=+429.613846098 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.790527 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.790556 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.290547682 +0000 UTC m=+429.613933581 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.790327 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9d0dcce3-d96e-48cb-9b9f-362105911589-proxy-tls\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.779582 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.790944 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791165 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.779389 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/530553aa-0a1d-423e-8a22-f5eb4bdbb883-available-featuregates\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791468 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791728 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791742 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791384 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791777 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.791785 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.792014 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-ovnkube-config\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.792444 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsxd9\" (UniqueName: \"kubernetes.io/projected/6a23c0ee-5648-448c-b772-83dced2891ce-kube-api-access-gsxd9\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.795525 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.295509347 +0000 UTC m=+429.618895246 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.795630 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.295619351 +0000 UTC m=+429.619005240 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.795727 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.295713455 +0000 UTC m=+429.619099234 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.806136 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.806442 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.807170 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.807305 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.307292622 +0000 UTC m=+429.630678421 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.807142 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8svnk\" (UniqueName: \"kubernetes.io/projected/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-kube-api-access-8svnk\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.807466 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-v45vm\" (UniqueName: \"kubernetes.io/projected/aa90b3c2-febd-4588-a063-7fbbe82f00c1-kube-api-access-v45vm\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.808049 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-metrics-certs\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.808331 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-machine-approver-tls\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.808342 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qr9t\" (UniqueName: \"kubernetes.io/projected/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-kube-api-access-4qr9t\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814077 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814142 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814168 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814225 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814250 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814279 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814341 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-csi-data-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814368 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814399 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-ssl-certs\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814438 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit-dir\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814470 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-catalog-content\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814492 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814529 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814621 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814730 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-bin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814790 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-cnibin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814859 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-socket-dir-parent\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814895 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.814961 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-conf-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815008 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815045 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815088 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815119 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-registration-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815151 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-multus\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815187 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-os-release\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815208 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-node-pullsecrets\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815230 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815254 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815285 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815320 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815420 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815449 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815471 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815517 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2b6d14a5-ca00-40c7-af7a-051a98a24eed-host-slash\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815578 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-system-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815607 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815654 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-multus-certs\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815685 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cnibin\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815751 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-dir\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815784 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-kubelet\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815866 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815898 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815921 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.815953 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-utilities\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816008 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-k8s-cni-cncf-io\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816048 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-netns\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816121 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816145 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-os-release\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816178 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816199 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-mountpoint-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816221 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9d0dcce3-d96e-48cb-9b9f-362105911589-rootfs\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816253 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816284 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816336 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-hostroot\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816408 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816451 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816474 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816498 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816519 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816566 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816606 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816655 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816713 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816741 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-plugins-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816762 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-host\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.816795 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.817650 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.817671 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.817685 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.817733 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.317717583 +0000 UTC m=+429.641103472 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.817860 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.817906 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.817972 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818031 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.318021503 +0000 UTC m=+429.641407382 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818078 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818102 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.318095706 +0000 UTC m=+429.641481595 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818142 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818166 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.318159969 +0000 UTC m=+429.641545748 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818197 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818220 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.31821294 +0000 UTC m=+429.641598829 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818248 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818271 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.318264952 +0000 UTC m=+429.641650721 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818379 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-csi-data-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818435 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-ssl-certs\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818495 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818518 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.31851184 +0000 UTC m=+429.641897609 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818545 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818575 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.818598 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.318591722 +0000 UTC m=+429.641977491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818625 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-bin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818677 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-cnibin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818874 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-socket-dir-parent\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818917 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.818949 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-conf-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819025 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819063 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.319052527 +0000 UTC m=+429.642438286 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819094 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819124 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819166 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-registration-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819196 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-multus\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819242 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-os-release\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819288 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819313 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.319305215 +0000 UTC m=+429.642690994 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819349 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819383 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819407 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.319399398 +0000 UTC m=+429.642785177 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819439 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819460 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.31945403 +0000 UTC m=+429.642839919 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819522 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819547 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.319540514 +0000 UTC m=+429.642926413 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819575 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819606 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819635 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2b6d14a5-ca00-40c7-af7a-051a98a24eed-host-slash\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819668 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.819691 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.319684619 +0000 UTC m=+429.643070398 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819727 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-system-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819761 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.819799 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-multus-certs\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.821244 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/13045510-8717-4a71-ade4-be95a76440a7-kube-api-access-dtjml\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.821444 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.821551 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cnibin\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.821676 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-dir\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.821779 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-kubelet\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.822186 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.822741 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-netns\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.823574 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-k8s-cni-cncf-io\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.828756 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtgqn\" (UniqueName: \"kubernetes.io/projected/297ab9b6-2186-4d5b-a952-2bfd59af63c4-kube-api-access-vtgqn\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.828912 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830111 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830182 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830283 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.330267515 +0000 UTC m=+429.653653414 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.828945 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830442 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.830515 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830522 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830630 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.330614637 +0000 UTC m=+429.654000406 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.830729 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.830791 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-hostroot\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830967 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.831078 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.831173 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.331162964 +0000 UTC m=+429.654548763 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.829699 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.830083 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.830365 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-mountpoint-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.830393 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9d0dcce3-d96e-48cb-9b9f-362105911589-rootfs\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.829612 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-bound-sa-token\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.831350 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.831723 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.832750 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.832787 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.831906 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.832902 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.832912 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.832137 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.832164 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.832188 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.832372 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-os-release\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.832726 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833496 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.333483521 +0000 UTC m=+429.656869270 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833513 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.333506682 +0000 UTC m=+429.656892441 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833597 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833609 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833618 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833666 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.333639257 +0000 UTC m=+429.657025136 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833701 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833715 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833724 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.833748 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.33374159 +0000 UTC m=+429.657127359 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.837894 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-bound-sa-token\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.838342 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.838581 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.838897 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.839037 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.339022983 +0000 UTC m=+429.662408782 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.839194 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.839270 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.839360 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.339349694 +0000 UTC m=+429.662735532 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.842641 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.842906 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.843228 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.34321571 +0000 UTC m=+429.666601639 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.843010 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-plugins-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.843051 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-host\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.844122 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-system-cni-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.843779 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjg2w\" (UniqueName: \"kubernetes.io/projected/51a02bbf-2d40-4f84-868a-d399ea18a846-kube-api-access-zjg2w\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.843885 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.844649 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-system-cni-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.846447 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/cc291782-27d2-4a74-af79-c7dcb31535d2-host-etc-kube\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.846755 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.846971 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.847227 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-socket-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.847402 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-etc-kubernetes\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.847561 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6a23c0ee-5648-448c-b772-83dced2891ce-hosts-file\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.847655 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.847904 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.848151 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/cc291782-27d2-4a74-af79-c7dcb31535d2-host-etc-kube\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.848244 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.848303 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.348285497 +0000 UTC m=+429.671671466 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.848521 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.848792 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-socket-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.849344 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.851651 4125 desired_state_of_world_populator.go:159] "Finished populating initial desired state of world" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.854551 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.854649 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.854789 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.354762842 +0000 UTC m=+429.678148831 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.849883 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-etc-kubernetes\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.850068 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6a23c0ee-5648-448c-b772-83dced2891ce-hosts-file\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.850127 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.855300 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.355287259 +0000 UTC m=+429.678673148 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.850182 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.855472 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.855568 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.855682 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.355669762 +0000 UTC m=+429.679055791 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.862489 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkkfv\" (UniqueName: \"kubernetes.io/projected/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-kube-api-access-rkkfv\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.865708 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.865758 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.865774 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.866057 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.866075 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.866084 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.866491 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.365851787 +0000 UTC m=+429.689237786 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.866572 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.366559509 +0000 UTC m=+429.689945388 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.871428 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.871550 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.871628 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.871781 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.37176006 +0000 UTC m=+429.695145959 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.874439 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sfhc\" (UniqueName: \"kubernetes.io/projected/cc291782-27d2-4a74-af79-c7dcb31535d2-kube-api-access-4sfhc\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.874970 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwbqm\" (UniqueName: \"kubernetes.io/projected/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-kube-api-access-bwbqm\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.878604 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.879226 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.880011 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.880865 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4qn7\" (UniqueName: \"kubernetes.io/projected/2b6d14a5-ca00-40c7-af7a-051a98a24eed-kube-api-access-j4qn7\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.881387 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwvjb\" (UniqueName: \"kubernetes.io/projected/120b38dc-8236-4fa6-a452-642b8ad738ee-kube-api-access-bwvjb\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.881775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.881967 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.381947066 +0000 UTC m=+429.705332975 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.884143 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.884186 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.884202 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.884260 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.3842379 +0000 UTC m=+429.707623669 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.899097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910335 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910363 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910431 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.410412279 +0000 UTC m=+429.733798338 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910469 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910481 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910501 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.910531 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.410524173 +0000 UTC m=+429.733910052 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.912755 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.912771 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.912780 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.912909 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.412805408 +0000 UTC m=+429.736191187 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913150 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913168 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913176 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913207 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.41319817 +0000 UTC m=+429.736583999 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913551 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913566 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913575 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.913608 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.413599084 +0000 UTC m=+429.736984963 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.913697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.926031 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.926089 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.926105 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.926171 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.426151296 +0000 UTC m=+429.749537175 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928274 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928315 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928327 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928365 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.428355188 +0000 UTC m=+429.751740967 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928439 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928453 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928461 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928493 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.428480912 +0000 UTC m=+429.751866691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928570 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928582 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928590 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.928628 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.428618497 +0000 UTC m=+429.752004336 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.929376 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.929395 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.929404 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.929437 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.429428244 +0000 UTC m=+429.752814013 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936364 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936395 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936409 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936472 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.436452832 +0000 UTC m=+429.759838721 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936516 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936527 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936536 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.936568 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.436560157 +0000 UTC m=+429.759945936 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.938860 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.938934 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.938953 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.939062 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.439038829 +0000 UTC m=+429.762424718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.940012 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkzjk\" (UniqueName: \"kubernetes.io/projected/9d0dcce3-d96e-48cb-9b9f-362105911589-kube-api-access-xkzjk\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.941128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.942020 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x6dp\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-kube-api-access-9x6dp\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.947632 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.947729 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7jw8\" (UniqueName: \"kubernetes.io/projected/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-kube-api-access-d7jw8\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.948556 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.948631 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.948692 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.948910 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949008 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit-dir\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949057 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-catalog-content\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949188 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949421 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-node-pullsecrets\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949482 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949504 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949600 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949676 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949700 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949723 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949866 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949893 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949929 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-utilities\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.949952 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950024 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950082 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950142 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950185 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950217 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-ca-trust-extracted\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950237 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950308 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950376 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-certificates\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950409 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950452 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950595 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950635 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950656 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950689 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950719 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.950780 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.951156 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.951216 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.951261 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.952250 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit-dir\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.952946 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-catalog-content\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.953120 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.953782 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.953906 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962423 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.462405618 +0000 UTC m=+429.785791397 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962461 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.46245387 +0000 UTC m=+429.785839629 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962477 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.46247023 +0000 UTC m=+429.785856089 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962519 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.462506671 +0000 UTC m=+429.785892430 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962548 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.462538592 +0000 UTC m=+429.785924341 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962564 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.462557673 +0000 UTC m=+429.785943422 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962580 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.462573503 +0000 UTC m=+429.785959252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.962586 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-node-pullsecrets\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962868 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.962958 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.462948135 +0000 UTC m=+429.786333924 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.963047 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.963084 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.46307569 +0000 UTC m=+429.786461539 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.963301 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.963348 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.463332479 +0000 UTC m=+429.786718248 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.963500 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.963546 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.963896 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-utilities\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.963953 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.964124 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.964170 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.464156465 +0000 UTC m=+429.787542402 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.964205 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.964247 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.464232868 +0000 UTC m=+429.787618837 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.964375 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.965397 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.966359 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13ad7555-5f28-4555-a563-892713a8433a-audit-dir\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.966523 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.466513889 +0000 UTC m=+429.789899678 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.966600 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.966668 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.966679 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.966745 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.466737577 +0000 UTC m=+429.790123346 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.967252 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.967339 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.967396 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.467384038 +0000 UTC m=+429.790769917 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.967502 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-bound-sa-token\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.967596 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-khtlk\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-kube-api-access-khtlk\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.967630 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.971556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.971674 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.971688 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.971728 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.47171691 +0000 UTC m=+429.795102679 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.971955 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.972088 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.472076832 +0000 UTC m=+429.795462721 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.976799 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.976897 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.976924 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.977037 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.476978423 +0000 UTC m=+429.800364312 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.977194 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx4f9\" (UniqueName: \"kubernetes.io/projected/410cf605-1970-4691-9c95-53fdc123b1f3-kube-api-access-cx4f9\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.977413 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.977469 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.477453598 +0000 UTC m=+429.800839687 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.977701 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978111 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978161 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.478143342 +0000 UTC m=+429.801529111 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978218 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978270 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.478248805 +0000 UTC m=+429.801634584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978331 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978379 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.478366219 +0000 UTC m=+429.801751998 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978463 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.978510 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.478497789 +0000 UTC m=+429.801883792 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.979422 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: E0312 13:27:38.979891 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.479800072 +0000 UTC m=+429.803185971 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.983166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.988151 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-ca-trust-extracted\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.988262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-q88th" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.989470 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-certificates\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:38 crc kubenswrapper[4125]: I0312 13:27:38.991969 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb762d1-812f-43f1-9eac-68034c1ecec7-kube-api-access\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.008231 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.008307 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.008325 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.008612 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.508381506 +0000 UTC m=+429.831767395 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.012367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.038603 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-bound-sa-token\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.045666 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-khtlk\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-kube-api-access-khtlk\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.065461 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.065615 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.065705 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.066493 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.565977641 +0000 UTC m=+429.889363700 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.071603 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13ad7555-5f28-4555-a563-892713a8433a-audit-dir\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.075307 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13ad7555-5f28-4555-a563-892713a8433a-audit-dir\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.079734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.096471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.111433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.111535 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.111902 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.111918 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.112026 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.611972373 +0000 UTC m=+429.935358242 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.115881 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.115932 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.115951 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.116072 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:39.616049948 +0000 UTC m=+429.939435737 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.125681 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.134178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dn27q" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.162147 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.167882 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.176036 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.177456 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.177591 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.177517245 +0000 UTC m=+430.500903184 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.202087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-version-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-version-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.214625 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"d6aaf0a79a82a6ecb9af0cff5ef19cafd18e0ce4564b8aeed6d2e845f6acf255"} Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.218905 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" event={"ID":"bf1a8b70-3856-486f-9912-a2de1d57c3fb","Type":"ContainerStarted","Data":"9a514882fd03c030b0cc0ddf6ecff2bf63c0147ce86239bc29fc47ba2987d6a3"} Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.223566 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerStarted","Data":"1738658d3b6cf61b7c853d59526fbff52bff9b4f599215ffdf828211c71d4163"} Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.225443 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"9b8354c373751fbeb0189df7398727f0987256cbdb648526e718b641633c24cf"} Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.226842 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"faa99e04060377b01cebd4a188d2c3faeada051a6162480051fceacd1e7dd74c"} Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.227054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.230328 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"4e80e6ca5f70fe734d8ff9fcb53441185c863de1a5757b9003da8987ef3f3ccb"} Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.242557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-l92hr" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.249614 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.256240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281208 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281347 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281397 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281453 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281505 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281534 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281611 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281640 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.281797 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282276 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282335 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282373 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282413 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282496 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282551 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282611 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282660 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282689 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282714 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282749 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282791 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282895 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.282968 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.283066 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.283104 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.283134 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.283283 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.283479 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284453 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284573 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284665 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284694 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284718 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284749 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284779 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284910 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284941 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.284967 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285055 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285128 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285182 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285275 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285335 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285461 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285660 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285726 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285797 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285893 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.285964 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.286019 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.286190 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.286271 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.286297 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.286355 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.286407 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.286735 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.286783 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.286867 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.286788724 +0000 UTC m=+430.610174623 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.286911 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.286889507 +0000 UTC m=+430.610275616 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.286955 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.286971 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287019 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.287006011 +0000 UTC m=+430.610392270 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287059 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.287045492 +0000 UTC m=+430.610431361 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287073 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287106 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.287096423 +0000 UTC m=+430.610482202 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287129 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287165 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.287156665 +0000 UTC m=+430.610542444 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287175 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287216 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287265 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287510 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287515 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287551 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287298 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287592 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287006 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287313 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287331 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287361 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287378 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287401 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287691 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287220 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.287207507 +0000 UTC m=+430.610593446 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287730 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287778 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287802 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287885 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287409 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287933 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288012 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288024 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287731 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.287722404 +0000 UTC m=+430.611108163 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288072 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.288054964 +0000 UTC m=+430.611440833 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288091 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.288084205 +0000 UTC m=+430.611469954 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288106 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.288099316 +0000 UTC m=+430.611485065 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287936 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288545 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288610 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288668 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288723 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288778 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288881 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287275 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288934 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288120 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.288114126 +0000 UTC m=+430.611499875 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287472 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287484 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287342 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288161 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288189 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288223 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288251 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288281 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288308 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288331 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288359 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288398 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288435 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288459 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288491 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.288516 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287439 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.287448 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290650 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.288972195 +0000 UTC m=+430.614020375 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290746 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290670037 +0000 UTC m=+430.614056086 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290771 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.29075912 +0000 UTC m=+430.614144989 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290789 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290782251 +0000 UTC m=+430.614168010 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290804 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290798371 +0000 UTC m=+430.614184120 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290896 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290886724 +0000 UTC m=+430.614272483 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290916 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290905035 +0000 UTC m=+430.614290904 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290936 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290928876 +0000 UTC m=+430.614314635 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290950 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290943536 +0000 UTC m=+430.614329295 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.290968 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290962377 +0000 UTC m=+430.614348236 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291058 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.290976137 +0000 UTC m=+430.614362016 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291077 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.29107171 +0000 UTC m=+430.614457459 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291093 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291086281 +0000 UTC m=+430.614472080 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291112 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291099961 +0000 UTC m=+430.614485710 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291132 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291124362 +0000 UTC m=+430.614510111 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291150 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291141852 +0000 UTC m=+430.614527661 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291170 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291160643 +0000 UTC m=+430.614546612 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291186 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291179294 +0000 UTC m=+430.614565103 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291217 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291206574 +0000 UTC m=+430.614592674 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291232 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291226005 +0000 UTC m=+430.614611754 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291246 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291239706 +0000 UTC m=+430.614625465 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291268 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291254826 +0000 UTC m=+430.614640855 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291287 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291279377 +0000 UTC m=+430.614665126 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291302 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291294787 +0000 UTC m=+430.614680536 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291316 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291310398 +0000 UTC m=+430.614696147 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291348 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291327138 +0000 UTC m=+430.614712997 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291373 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.29136552 +0000 UTC m=+430.614751269 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291389 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.29138216 +0000 UTC m=+430.614767909 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291403 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291396281 +0000 UTC m=+430.614782040 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291419 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291411421 +0000 UTC m=+430.614797420 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291441 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291428733 +0000 UTC m=+430.614814482 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291460 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291452303 +0000 UTC m=+430.614838062 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291474 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291467424 +0000 UTC m=+430.614853173 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291490 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291483614 +0000 UTC m=+430.614869363 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291505 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291498215 +0000 UTC m=+430.614883974 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291525 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291512805 +0000 UTC m=+430.614898554 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291544 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291537316 +0000 UTC m=+430.614923075 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291559 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291552077 +0000 UTC m=+430.614937826 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291575 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291568777 +0000 UTC m=+430.614954526 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291592 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291583458 +0000 UTC m=+430.614969547 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291620 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291609008 +0000 UTC m=+430.614994857 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291642 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.291633879 +0000 UTC m=+430.615019678 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.291959 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.29166137 +0000 UTC m=+430.615047149 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.292050 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.29197615 +0000 UTC m=+430.615361939 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.292259 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: W0312 13:27:39.328171 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e19f9e8_9a37_4ca8_9790_c219750ab482.slice/crio-12a74a3477015d455fa4567a17693dfe02eb7d5d2415392f159ed0f72ae4370c WatchSource:0}: Error finding container 12a74a3477015d455fa4567a17693dfe02eb7d5d2415392f159ed0f72ae4370c: Status 404 returned error can't find the container with id 12a74a3477015d455fa4567a17693dfe02eb7d5d2415392f159ed0f72ae4370c Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.336514 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: W0312 13:27:39.362389 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51a02bbf_2d40_4f84_868a_d399ea18a846.slice/crio-144361f0fea4c7b9496c097378788c284ef01da0912479ce5fc41b5759e9a67e WatchSource:0}: Error finding container 144361f0fea4c7b9496c097378788c284ef01da0912479ce5fc41b5759e9a67e: Status 404 returned error can't find the container with id 144361f0fea4c7b9496c097378788c284ef01da0912479ce5fc41b5759e9a67e Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.366128 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.387692 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.387861 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.387972 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.388269 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388289 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388346 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.388329896 +0000 UTC m=+430.711715785 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388458 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.388471 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388476 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388499 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.388481834 +0000 UTC m=+430.711867593 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388505 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388560 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388662 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388671 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.388661427 +0000 UTC m=+430.712047296 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.388618 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388726 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.388714289 +0000 UTC m=+430.712100091 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.388777 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.388767483 +0000 UTC m=+430.712153232 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.388939 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.389007 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.389087 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.389132 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.393092 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.393110 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.393128 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.393185 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.393166048 +0000 UTC m=+430.716551927 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.393211 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.393200649 +0000 UTC m=+430.716586508 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394317 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394361 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394403 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394440 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394487 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394516 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.394521 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394556 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.394575 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.394559903 +0000 UTC m=+430.717945792 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394615 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394667 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394697 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394722 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394748 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394778 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394898 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.394946 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395012 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395067 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395115 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395153 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395270 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395335 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395385 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395487 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395534 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.395523094 +0000 UTC m=+430.718908883 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395541 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395582 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395594 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.395579607 +0000 UTC m=+430.718965566 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395622 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.395611518 +0000 UTC m=+430.718997457 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395651 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395663 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395672 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395702 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.39569298 +0000 UTC m=+430.719078859 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395747 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395783 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.395775433 +0000 UTC m=+430.719161212 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395869 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395931 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395950 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.395961 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396040 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.396029402 +0000 UTC m=+430.719415291 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396052 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396072 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396118 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396134 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396152 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396189 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.396176856 +0000 UTC m=+430.719562635 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396234 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396247 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396256 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396289 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.39628005 +0000 UTC m=+430.719665819 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396346 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396362 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396371 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396403 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.396393583 +0000 UTC m=+430.719779612 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396442 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396476 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.396461555 +0000 UTC m=+430.719847434 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.396806 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.396792036 +0000 UTC m=+430.720177825 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397144 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397162 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397171 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397261 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.397248981 +0000 UTC m=+430.720634978 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397171 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397291 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397301 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397302 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397331 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.397322391 +0000 UTC m=+430.720708332 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397389 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397367 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397422 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397451 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397465 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397475 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397534 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397545 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.397528111 +0000 UTC m=+430.720914000 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397554 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397659 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397665 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397866 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.397696556 +0000 UTC m=+430.721082335 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397905 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.397894764 +0000 UTC m=+430.721280653 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397615 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397934 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397948 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397957 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398015 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398033 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.397981175 +0000 UTC m=+430.721405216 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.397399 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398084 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.398071668 +0000 UTC m=+430.721457557 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398032 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398113 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.398101339 +0000 UTC m=+430.721487118 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398255 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.398239005 +0000 UTC m=+430.721624764 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398493 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.398515 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.395422 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: W0312 13:27:39.399065 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a23c0ee_5648_448c_b772_83dced2891ce.slice/crio-443fc346903e580e1a29e68acf866f5cd69c33fa80c4f3306d95102755eb9331 WatchSource:0}: Error finding container 443fc346903e580e1a29e68acf866f5cd69c33fa80c4f3306d95102755eb9331: Status 404 returned error can't find the container with id 443fc346903e580e1a29e68acf866f5cd69c33fa80c4f3306d95102755eb9331 Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.399279 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.398568705 +0000 UTC m=+430.721954644 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.399326 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.39931268 +0000 UTC m=+430.722698509 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.399392 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.399431 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.399461 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.399497 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.399712 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.399733 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.399743 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.399913 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.400094 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.400219 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.400239 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.400457 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.405179 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.405249 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.405329 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405340 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405384 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405372526 +0000 UTC m=+430.728758405 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.405442 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.405501 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405538 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405521192 +0000 UTC m=+430.728906971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405563 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405555104 +0000 UTC m=+430.728940853 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405574 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405581 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405574214 +0000 UTC m=+430.728960291 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405586 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405596 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405624 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405614333 +0000 UTC m=+430.729000212 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405645 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405639384 +0000 UTC m=+430.729025143 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405677 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.405622 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.405707 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.405700188 +0000 UTC m=+430.729086107 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.406025 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.406066 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406194 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406260 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406273 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406282 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406291 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406306 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406337 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.406326769 +0000 UTC m=+430.729712548 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.406614 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.406596879 +0000 UTC m=+430.729982768 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.409253 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.409321 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.409301668 +0000 UTC m=+430.732687557 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.409365 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.409405 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.409396591 +0000 UTC m=+430.732782560 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.446163 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.491491 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.508788 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.509101 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509126 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509211 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.509187667 +0000 UTC m=+430.832573586 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509266 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.509281 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509327 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.509311489 +0000 UTC m=+430.832697548 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509385 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509402 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.509553 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509629 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.509614211 +0000 UTC m=+430.833000250 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509681 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.509723 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509732 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.509719984 +0000 UTC m=+430.833105873 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.509866 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.512424 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512450 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512476 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.512478 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512538 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.512520536 +0000 UTC m=+430.835906425 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512579 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512597 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512640 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.512627566 +0000 UTC m=+430.836013465 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.512710 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512737 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.512745 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512756 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512767 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512895 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512937 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.51292767 +0000 UTC m=+430.836313449 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512947 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.512964 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.51295294 +0000 UTC m=+430.836338959 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.513099 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.51302641 +0000 UTC m=+430.836412279 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.513318 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.513364 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.51334761 +0000 UTC m=+430.836733499 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.512873 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.514139 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.514258 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.514299 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.514290021 +0000 UTC m=+430.837675910 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.514707 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.515802 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.5157919 +0000 UTC m=+430.839177699 (durationBeforeRetry 1s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.516359 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.516501 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.516725 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.516931 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.519286 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.519458 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.519752 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.520315 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.520597 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.520748 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.521019 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.521106 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.521125 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.521167 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.521155975 +0000 UTC m=+430.844541744 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.521227 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.521265 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.521251018 +0000 UTC m=+430.844636897 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521023 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521313 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521375 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521444 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521505 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521532 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521587 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521632 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521660 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521691 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521745 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521778 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.521901 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.522041 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522077 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522095 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522105 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.520929 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522163 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522218 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522371 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522393 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522402 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522474 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522488 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522496 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522567 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522581 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522589 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522662 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522679 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522686 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522742 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522802 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522907 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522952 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523095 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523120 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523129 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523184 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523215 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523236 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523242 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523248 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523252 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523260 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.522124 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.522114487 +0000 UTC m=+430.845500366 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523312 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523302296 +0000 UTC m=+430.846688055 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523333 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523323566 +0000 UTC m=+430.846709986 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523352 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523344847 +0000 UTC m=+430.846730786 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523372 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523360518 +0000 UTC m=+430.846746277 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523394 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523386238 +0000 UTC m=+430.846772208 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523411 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523404759 +0000 UTC m=+430.846790508 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523425 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.52341922 +0000 UTC m=+430.846804979 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.523484 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.523650 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523730 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523766 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.52375837 +0000 UTC m=+430.847144149 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523794 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523785041 +0000 UTC m=+430.847170800 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523885 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523854223 +0000 UTC m=+430.847239993 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523907 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523899635 +0000 UTC m=+430.847285504 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.523968 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.523915685 +0000 UTC m=+430.847301444 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524068 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.52404875 +0000 UTC m=+430.847434669 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524089 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524081581 +0000 UTC m=+430.847467330 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524103 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524097621 +0000 UTC m=+430.847483500 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524123 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524115442 +0000 UTC m=+430.847501201 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524178 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524216 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524204006 +0000 UTC m=+430.847589895 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524281 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524296 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524308 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524338 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.52433102 +0000 UTC m=+430.847716799 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.520631 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524378 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524371211 +0000 UTC m=+430.847756980 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.520770 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524401 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524409 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524431 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524425473 +0000 UTC m=+430.847811242 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524476 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524487 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524496 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524544 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524522566 +0000 UTC m=+430.847908455 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.524682 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.524670301 +0000 UTC m=+430.848056180 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.528007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.534100 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.534208 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.534289 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.534405 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.534388811 +0000 UTC m=+430.857774600 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.588429 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.620581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.657142 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.658205 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.658261 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.658283 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.658336 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.658315792 +0000 UTC m=+430.981701581 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.660055 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.663458 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.663790 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.664593 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.664636 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.664648 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.665366 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.666685 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.666716 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.680106 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.680074696 +0000 UTC m=+431.003460465 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: E0312 13:27:39.680148 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:40.680133508 +0000 UTC m=+431.003519257 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.695264 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.724067 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.785186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.839206 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.900045 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: W0312 13:27:39.941509 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d0dcce3_d96e_48cb_9b9f_362105911589.slice/crio-57c0d5a59f8ce485ac8223e54fbaf1dbf2d569f5ae3e1b5fb25a66983e1bb4a1 WatchSource:0}: Error finding container 57c0d5a59f8ce485ac8223e54fbaf1dbf2d569f5ae3e1b5fb25a66983e1bb4a1: Status 404 returned error can't find the container with id 57c0d5a59f8ce485ac8223e54fbaf1dbf2d569f5ae3e1b5fb25a66983e1bb4a1 Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.941913 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:39 crc kubenswrapper[4125]: I0312 13:27:39.986703 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.062188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.062483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.062602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.062780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.062955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.063353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.063544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.063796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.063924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.064485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.064707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.074074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.074232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.075104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.075239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.075695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.080025 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.080372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.080650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.095592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.096755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.097731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.100909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.102497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.102686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.103571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.103758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.105100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.105519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.106391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.111427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.111620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.113493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.122680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.135182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.159438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.135687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.160023 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.160196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.137613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.138012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.144971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.145430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.146121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.146774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.136972 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.165013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.167486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.168086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.168388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.168665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.168782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.168973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.183215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.183746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.184113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.184267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.184388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.184507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.184606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.184708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.184792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.208558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.222908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.223061 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.223121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223154 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223221 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.223201842 +0000 UTC m=+432.546587711 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.217129 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.216982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.222038 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.223503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.223637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.223933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.224152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.224261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.224377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.224477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.222595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.225414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.222642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.225574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.222763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.222854 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.225717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.279781 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" event={"ID":"2b6d14a5-ca00-40c7-af7a-051a98a24eed","Type":"ContainerStarted","Data":"f90046c12d81dae3a80e33b860ed850b8047de0d0990a3973cdabad0272322ab"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.303977 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" event={"ID":"9fb762d1-812f-43f1-9eac-68034c1ecec7","Type":"ContainerStarted","Data":"4a76d161620c158b50836fb663f7af47a7b9562b25667770e47caf9cda43e275"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.196119 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.349160 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dn27q" event={"ID":"6a23c0ee-5648-448c-b772-83dced2891ce","Type":"ContainerStarted","Data":"443fc346903e580e1a29e68acf866f5cd69c33fa80c4f3306d95102755eb9331"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.355171 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-l92hr" event={"ID":"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e","Type":"ContainerStarted","Data":"bfdf87a501625557b41a25cd553c70f920a72cca0c806e9daf420c54a2af06dc"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.359776 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"57c0d5a59f8ce485ac8223e54fbaf1dbf2d569f5ae3e1b5fb25a66983e1bb4a1"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.367845 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"144361f0fea4c7b9496c097378788c284ef01da0912479ce5fc41b5759e9a67e"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.378914 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.380183 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.385070 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.385255 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.385541 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.385736 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.385905 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.386062 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.386202 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.386313 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.386578 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.386881 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.387034 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.387144 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.387269 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.387435 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387544 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387637 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.387617501 +0000 UTC m=+432.711003540 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387695 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387725 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.387718044 +0000 UTC m=+432.711103813 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387770 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387879 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.387799717 +0000 UTC m=+432.711185486 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387932 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387960 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.387952371 +0000 UTC m=+432.711338260 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388033 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388068 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.388055635 +0000 UTC m=+432.711441404 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388128 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388190 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.388180349 +0000 UTC m=+432.711566228 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.388354 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.387479 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388606 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.388588671 +0000 UTC m=+432.711974450 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.381502 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388802 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.388792887 +0000 UTC m=+432.712178666 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.389271 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.389605 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.389701 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.390093 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.390072262 +0000 UTC m=+432.713458271 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.389719 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.390160 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.390236 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.391588 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.381224 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"12a74a3477015d455fa4567a17693dfe02eb7d5d2415392f159ed0f72ae4370c"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.391861 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" event={"ID":"bf1a8b70-3856-486f-9912-a2de1d57c3fb","Type":"ContainerStarted","Data":"df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708"} Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.388888 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.392159 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.392307 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.395137 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397006 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397180 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397245 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397366 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397438 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397505 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.397950 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.398910 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.398964 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399029 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399059 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399084 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399154 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399198 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399232 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399246 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399273 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399296 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.39927843 +0000 UTC m=+432.722664369 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399352 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399411 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.399399004 +0000 UTC m=+432.722784953 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399452 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399478 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399495 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399504 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399520 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399581 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.399569704 +0000 UTC m=+432.722955483 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399607 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399617 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399646 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.399632452 +0000 UTC m=+432.723018241 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399676 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399686 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399715 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399760 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.399938 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400197 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400213 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400231 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400251 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400272 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400287 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.400277626 +0000 UTC m=+432.723663415 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400310 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400348 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400390 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.40037851 +0000 UTC m=+432.723764479 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400460 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400481 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400497 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400521 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400540 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.400526531 +0000 UTC m=+432.723912320 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400480 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400559 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.400550955 +0000 UTC m=+432.723936824 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400615 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400685 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400697 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400736 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.400727921 +0000 UTC m=+432.724113820 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400772 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.400795 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400899 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401073 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401213 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.399715 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401248 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401316 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.400856 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.40079415 +0000 UTC m=+432.724180049 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.401396 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401407 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401424 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401436 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401460 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401464 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401455535 +0000 UTC m=+432.724841434 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401485 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401478126 +0000 UTC m=+432.724863985 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401503 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401496886 +0000 UTC m=+432.724882646 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401543 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401510977 +0000 UTC m=+432.724896726 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401562 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401554728 +0000 UTC m=+432.724940577 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401578 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401572299 +0000 UTC m=+432.724958058 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401595 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401587009 +0000 UTC m=+432.724972758 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401596 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401610 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.40160313 +0000 UTC m=+432.724988979 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401629 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401619972 +0000 UTC m=+432.725005959 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401644 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401637061 +0000 UTC m=+432.725022830 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401657 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401651331 +0000 UTC m=+432.725037080 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401675 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401665742 +0000 UTC m=+432.725051491 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401689 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401682852 +0000 UTC m=+432.725068711 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401710 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401697023 +0000 UTC m=+432.725082772 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401727 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401721104 +0000 UTC m=+432.725106853 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401741 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401733934 +0000 UTC m=+432.725119683 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401757 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401750105 +0000 UTC m=+432.725135854 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401777 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.401766815 +0000 UTC m=+432.725152574 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402582 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402629 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.402613829 +0000 UTC m=+432.725999718 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402683 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402714 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.402705322 +0000 UTC m=+432.726091101 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402759 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402794 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.402781085 +0000 UTC m=+432.726166864 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402862 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.402852387 +0000 UTC m=+432.726238146 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402908 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.402943 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.402929281 +0000 UTC m=+432.726315060 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.403065 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.403094 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.403087186 +0000 UTC m=+432.726472965 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.403110 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.403105076 +0000 UTC m=+432.726490825 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.401426 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403158 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403198 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403235 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403266 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403340 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403375 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403423 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403458 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403511 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403547 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403583 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403610 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403641 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403701 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403751 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403856 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403898 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.403959 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.404046 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.404084 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404190 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404234 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.404219142 +0000 UTC m=+432.727604921 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404380 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404414 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.404405279 +0000 UTC m=+432.727791068 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404455 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404482 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.404474951 +0000 UTC m=+432.727860720 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404521 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404549 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.404542853 +0000 UTC m=+432.727928742 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404580 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404606 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.404596765 +0000 UTC m=+432.727982534 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404651 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404675 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.404668897 +0000 UTC m=+432.728054666 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.401562 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404714 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404750 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.40474225 +0000 UTC m=+432.728128139 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404767 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.40476052 +0000 UTC m=+432.728146269 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.404909 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.406773 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.406866 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.406854769 +0000 UTC m=+432.730240648 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.406920 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.406957 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.406948172 +0000 UTC m=+432.730334101 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407034 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407070 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407061685 +0000 UTC m=+432.730447454 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407110 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407137 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407130418 +0000 UTC m=+432.730516197 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407181 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407215 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407203881 +0000 UTC m=+432.730589650 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407250 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407282 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407269813 +0000 UTC m=+432.730655582 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407331 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407362 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407354876 +0000 UTC m=+432.730740645 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407397 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407429 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407421618 +0000 UTC m=+432.730807507 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407473 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407506 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.40749455 +0000 UTC m=+432.730880439 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407543 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407573 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407565682 +0000 UTC m=+432.730951561 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407611 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407645 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407636095 +0000 UTC m=+432.731021874 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407684 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.407711 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.407704847 +0000 UTC m=+432.731090626 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.410771 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.410747677 +0000 UTC m=+432.734133556 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.411949 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48c1471ee6eaa615e5b0e19686e3fafc0f687dc03625988c88b411dc682d223f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:27:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:24:26Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.415416 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.415498 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.415485041 +0000 UTC m=+432.738871120 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.424079 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" event={"ID":"410cf605-1970-4691-9c95-53fdc123b1f3","Type":"ContainerStarted","Data":"bdd93edd5a40c60463f421cca39992772d412dc8c0f365a389675a100c924be5"} Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.458893 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.510520 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.511477 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.511896 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512044 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512135 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512193 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512231 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512273 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512307 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512334 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512357 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512412 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.512415 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.512459 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512466 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.512476 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512498 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.512569 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.512548946 +0000 UTC m=+432.835934784 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512624 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.512650 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.512670 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512674 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512712 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512744 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512779 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512860 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512905 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.512950 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513063 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513150 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513169 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513196 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.513184233 +0000 UTC m=+432.836570002 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513232 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513281 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.513264216 +0000 UTC m=+432.836650115 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513326 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513341 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513347 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513397 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513409 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513418 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513424 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513459 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.513448021 +0000 UTC m=+432.836833910 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513502 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513619 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513641 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513667 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513679 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.513665108 +0000 UTC m=+432.837050987 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513723 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513732 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513750 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.513743411 +0000 UTC m=+432.837129350 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.513781 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513798 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513865 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513880 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513908 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.513900436 +0000 UTC m=+432.837286215 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513964 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513978 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514058 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514048971 +0000 UTC m=+432.837434870 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.514062 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514110 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514149 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.514111 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514153 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514136814 +0000 UTC m=+432.837522593 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514205 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514220 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514211 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514202166 +0000 UTC m=+432.837587915 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514255 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.514273 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.514318 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514320 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514334 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514348 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514379 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514371031 +0000 UTC m=+432.837756910 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.514347 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514393 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514422 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514414453 +0000 UTC m=+432.837800332 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514454 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514466 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514474 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514483 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514499 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514492535 +0000 UTC m=+432.837878414 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514515 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514508556 +0000 UTC m=+432.837894435 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514539 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514552 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514561 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514590 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514579508 +0000 UTC m=+432.837965387 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514597 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514616 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514627 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514661 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514652031 +0000 UTC m=+432.838037810 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514667 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514701 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514689131 +0000 UTC m=+432.838075020 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514732 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514746 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514772 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.514765594 +0000 UTC m=+432.838151373 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514779 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514949 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.51493529 +0000 UTC m=+432.838321189 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.514456 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.513350 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515151 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.515142937 +0000 UTC m=+432.838528716 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515193 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.515184928 +0000 UTC m=+432.838570677 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514276 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515210 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515243 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.5152358 +0000 UTC m=+432.838621579 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514624 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515300 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.515292062 +0000 UTC m=+432.838677841 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515327 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.515318033 +0000 UTC m=+432.838703782 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.514748 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515342 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.515373 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.515361854 +0000 UTC m=+432.838747623 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.521268 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.521734 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.521722832 +0000 UTC m=+432.845108621 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.521890 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.521906 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.521914 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.521952 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.52194054 +0000 UTC m=+432.845326309 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522008 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522048 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522110 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522151 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522172 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522237 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522291 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522266291 +0000 UTC m=+432.845652190 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522338 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522354 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522363 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522404 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522393525 +0000 UTC m=+432.845779314 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522438 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522481 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.522522 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522555 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.52254507 +0000 UTC m=+432.845930969 (durationBeforeRetry 2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522571 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522607 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522597351 +0000 UTC m=+432.845983130 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522189 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522661 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522675 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522666714 +0000 UTC m=+432.846052493 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522679 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522696 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522727 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522741 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522751 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522761 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522728 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522720395 +0000 UTC m=+432.846106274 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522857 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522796008 +0000 UTC m=+432.846181757 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522881 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.52287315 +0000 UTC m=+432.846259030 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522885 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522900 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522891312 +0000 UTC m=+432.846277321 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522902 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522928 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522940 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522961 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522954264 +0000 UTC m=+432.846340043 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.522978 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.522972364 +0000 UTC m=+432.846358223 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523023 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523062 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.523053016 +0000 UTC m=+432.846438795 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523118 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523134 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523143 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523178 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.523162369 +0000 UTC m=+432.846548148 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523245 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523262 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523273 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523299 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.523292524 +0000 UTC m=+432.846678303 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523343 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523376 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.523367427 +0000 UTC m=+432.846753206 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523417 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523446 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.523435019 +0000 UTC m=+432.846820788 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523669 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.523706 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.523698438 +0000 UTC m=+432.847084217 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.556074 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.585119 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.607189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.627282 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.627438 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.627489 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627507 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.627531 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.627567 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627582 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.627564251 +0000 UTC m=+432.950950140 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627662 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627711 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627729 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627745 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627966 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627972 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.628016 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.627731 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.627714566 +0000 UTC m=+432.951100465 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.628090 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.628054807 +0000 UTC m=+432.951440676 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628138 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628217 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628281 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628314 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628376 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628421 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628465 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628496 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628560 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628593 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628680 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628715 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.628765 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.629146 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.629970 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.630030 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.630155 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.630217 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.630281 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.630344 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630485 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630503 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630520 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630562 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.630553029 +0000 UTC m=+432.953938918 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.628028 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630616 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.630607401 +0000 UTC m=+432.953993280 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630690 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630706 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630718 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630748 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.630740665 +0000 UTC m=+432.954126444 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630805 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630880 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630889 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630921 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.630912901 +0000 UTC m=+432.954298670 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.630978 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631074 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631086 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631113 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631105718 +0000 UTC m=+432.954491497 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631169 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631183 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631192 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631226 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631218162 +0000 UTC m=+432.954603941 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631264 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631294 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631286364 +0000 UTC m=+432.954672143 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631352 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631401 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631389347 +0000 UTC m=+432.954775206 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631446 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631478 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.63146752 +0000 UTC m=+432.954853289 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631510 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631540 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631524941 +0000 UTC m=+432.954914281 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631596 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631611 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631619 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631651 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631640625 +0000 UTC m=+432.955026704 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631687 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631720 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631710437 +0000 UTC m=+432.955096446 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631773 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631789 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631801 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.631983 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.631970706 +0000 UTC m=+432.955356955 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632089 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632101 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632133 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.632120241 +0000 UTC m=+432.955506020 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632173 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632204 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.632196433 +0000 UTC m=+432.955582212 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632249 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632300 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.632287176 +0000 UTC m=+432.955673315 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632369 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632384 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632392 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632428 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.63241969 +0000 UTC m=+432.955805459 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632490 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632521 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.632513134 +0000 UTC m=+432.955898913 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632572 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632628 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.632618777 +0000 UTC m=+432.956004857 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632666 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632709 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.63269746 +0000 UTC m=+432.956083279 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632777 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632792 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.632802 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.634192 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.634221 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.634292 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.634275772 +0000 UTC m=+432.957661551 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.642062 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.642049696 +0000 UTC m=+432.965435635 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.642320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.694914 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.735775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.739504 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.739641 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.740189 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741550 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741629 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741646 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741705 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.741686351 +0000 UTC m=+433.065072230 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741769 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741788 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.741799 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.745136 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.745349 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.745370 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.745486 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.745461655 +0000 UTC m=+433.068847544 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: E0312 13:27:40.764045 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:42.764019934 +0000 UTC m=+433.087406063 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.786610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.846118 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9572cbf27a025e52f8350ba1f90df2f73ac013d88644e34f555a7ae71822234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:23:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:07Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.888230 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.908104 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-approver-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-approver-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.940768 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:40 crc kubenswrapper[4125]: I0312 13:27:40.979625 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.010313 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.025070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.025223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:41 crc kubenswrapper[4125]: E0312 13:27:41.025264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.025331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:41 crc kubenswrapper[4125]: E0312 13:27:41.025451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.025547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:41 crc kubenswrapper[4125]: E0312 13:27:41.025627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.025665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:41 crc kubenswrapper[4125]: E0312 13:27:41.025743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:41 crc kubenswrapper[4125]: E0312 13:27:41.025982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.045574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.089210 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.124588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.163090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.220008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.270673 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.301042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.352069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.415059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.451389 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec"} Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.486027 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerStarted","Data":"33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86"} Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.491455 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.502520 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23"} Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.565460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.603090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.625361 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.661453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.702980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.733129 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.777477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.823610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.853129 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.885396 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.890554 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.890654 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.893618 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.921528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.958666 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:41 crc kubenswrapper[4125]: I0312 13:27:41.984691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.028157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.029732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029851 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029868 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.029941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.029985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.030158 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.030289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.030432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.030511 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.030697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.030912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.030963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.031024 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.031133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.031138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.031255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.031262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.031357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.031360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.031494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.031496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.031648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.031710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.031766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.032281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.032415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.032543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.032594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.032692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.032783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.032951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.033031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.033086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.033193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.033262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.033372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.033418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.033938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.034255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.034454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.034570 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034910 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.034984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.035208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.035380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.035447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.035521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.035569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.035612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.035680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.035762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.035952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.036052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.036151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.036229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.043795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.044503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.059686 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.105190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48c1471ee6eaa615e5b0e19686e3fafc0f687dc03625988c88b411dc682d223f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:27:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:24:26Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.140296 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.166168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.178033 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.200922 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.236546 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.236737 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.236873 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.236783994 +0000 UTC m=+436.560169853 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.251889 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.274937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.315589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.355592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.393214 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.412098 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440390 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440455 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440544 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440579 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440645 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440684 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440710 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440742 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.440772 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441134 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441190 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441223 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441199317 +0000 UTC m=+436.764585346 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441284 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441258522 +0000 UTC m=+436.764644301 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441286 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441330 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441320051 +0000 UTC m=+436.764705830 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441327 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441353 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441384 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441398 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441378972 +0000 UTC m=+436.764764861 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441435 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441423924 +0000 UTC m=+436.764809743 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441438 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441452 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441444584 +0000 UTC m=+436.764830333 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441470 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441462159 +0000 UTC m=+436.764848058 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441707 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.441744 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.441735428 +0000 UTC m=+436.765121297 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442027 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442062 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442111 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442146 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442192 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442233 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442268 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.442296 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442425 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442473 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442514 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442565 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442584 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442599 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442638 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442773 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442855 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.44280448 +0000 UTC m=+436.766226590 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442856 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.442918 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.442897576 +0000 UTC m=+436.766283355 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443147 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443131484 +0000 UTC m=+436.766517483 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443179 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443165425 +0000 UTC m=+436.766551284 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443195 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443187985 +0000 UTC m=+436.766573744 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443209 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443202636 +0000 UTC m=+436.766588395 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443226 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443219696 +0000 UTC m=+436.766605555 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443226 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443250 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443241757 +0000 UTC m=+436.766627616 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443269 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443261668 +0000 UTC m=+436.766647537 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.443292 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.443279518 +0000 UTC m=+436.766665547 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.447609 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.448066 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448145 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448182 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448250 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448308 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448342 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448378 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448405 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448549 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448576 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448644 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448682 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448742 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448785 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448866 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.448902 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452102 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452205 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452267 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452342 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452412 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452474 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452535 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452598 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452664 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452726 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.452796 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.453064 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.453144 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.453479 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.457720 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.457702553 +0000 UTC m=+436.781088432 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.457732 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.457853 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.457768 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.457757124 +0000 UTC m=+436.781142993 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.458072 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.458114 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458258 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458460 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458438431 +0000 UTC m=+436.781824210 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458495 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458481183 +0000 UTC m=+436.781866932 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458515 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458508334 +0000 UTC m=+436.781894083 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458531 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458523614 +0000 UTC m=+436.781909373 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458552 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458539615 +0000 UTC m=+436.781925364 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458573 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458565935 +0000 UTC m=+436.781951684 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458590 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458581426 +0000 UTC m=+436.781967185 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458605 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458598236 +0000 UTC m=+436.781983985 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458630 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458612537 +0000 UTC m=+436.781998286 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458648 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458641429 +0000 UTC m=+436.782027178 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.45866428 +0000 UTC m=+436.782050029 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458687 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.45868 +0000 UTC m=+436.782066183 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458707 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458698991 +0000 UTC m=+436.782084740 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.458721 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458731 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458723081 +0000 UTC m=+436.782108940 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458750 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458744632 +0000 UTC m=+436.782130381 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458873 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.458970 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.458906467 +0000 UTC m=+436.782292516 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459106 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459141 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459222 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459285 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459339 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459476 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459570 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459611 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459661 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459691 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.459267 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459738 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.459753 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.459741669 +0000 UTC m=+436.783127658 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.459782 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.459900 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.459923 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.459937 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.459976 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.459966711 +0000 UTC m=+436.783352490 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.460040 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.460087 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460192 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460258 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.460238832 +0000 UTC m=+436.783624741 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460285 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460321 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.460312498 +0000 UTC m=+436.783698232 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460339 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460355 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460368 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460400 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460417 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.460409158 +0000 UTC m=+436.783795017 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460433 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.460425337 +0000 UTC m=+436.783811226 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460467 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460499 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.4604915 +0000 UTC m=+436.783877279 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460500 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460538 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.46053014 +0000 UTC m=+436.783915899 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460543 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460573 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.460566803 +0000 UTC m=+436.783952582 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460624 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460884 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.460869913 +0000 UTC m=+436.784255932 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460884 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461043 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461059 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460614 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461097 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.461087758 +0000 UTC m=+436.784473537 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.460717 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461119 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.461109341 +0000 UTC m=+436.784495230 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461141 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.46113315 +0000 UTC m=+436.784519061 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461281 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.461318 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.461309506 +0000 UTC m=+436.784695285 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.462112 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.462193 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.462173119 +0000 UTC m=+436.785558998 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462301 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462334 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462371 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462410 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462452 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462484 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462529 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462571 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462676 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462716 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462750 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.462982 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.463084 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.463130 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.463186 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463301 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463343 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.463331667 +0000 UTC m=+436.786717546 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463573 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463608 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.463598966 +0000 UTC m=+436.786984845 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463653 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463687 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.463675619 +0000 UTC m=+436.787061698 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463729 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463762 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.463755401 +0000 UTC m=+436.787141170 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463880 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463926 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.463917946 +0000 UTC m=+436.787303725 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.463975 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464133 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464179 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464169231 +0000 UTC m=+436.787555020 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464238 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464248 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464269 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464283 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464287 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464274798 +0000 UTC m=+436.787660687 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464314 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464306166 +0000 UTC m=+436.787692055 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464368 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464370 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464393 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464406 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464410 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464401799 +0000 UTC m=+436.787787578 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464443 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464432823 +0000 UTC m=+436.787818602 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464461 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464502 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464485911 +0000 UTC m=+436.787871811 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464510 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464540 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464532987 +0000 UTC m=+436.787918756 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464552 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464583 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464575814 +0000 UTC m=+436.787961593 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464593 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464628 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.46462028 +0000 UTC m=+436.788006169 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.464672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.464663981 +0000 UTC m=+436.788049850 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.484334 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.508601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.534427 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.559687 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72"} Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.584669 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.587194 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.587594 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.587873 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.588974 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.589043 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.589061 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.587648 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98"} Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.592722 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" event={"ID":"410cf605-1970-4691-9c95-53fdc123b1f3","Type":"ContainerStarted","Data":"7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52"} Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.593240 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.593451 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.593565 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.593529491 +0000 UTC m=+436.916915240 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.593701 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.593682515 +0000 UTC m=+436.917068615 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.593745 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.593945 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.594189 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.594168239 +0000 UTC m=+436.917554218 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.594273 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.594295 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.594311 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.594352 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.594333724 +0000 UTC m=+436.917719503 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.595359 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.595423 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.595404619 +0000 UTC m=+436.918790408 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.596260 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.596571 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.599332 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.599308418 +0000 UTC m=+436.922694207 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.599556 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630"} Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.597475 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.602918 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.603456 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.603595 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.603730 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.604137 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.604326 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.607606 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.608187 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.608310 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.608427 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.608564 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.609134 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612461 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612539 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612563 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612680 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612704 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612851 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612964 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612985 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.613088 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.613231 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.613252 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.613263 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.612269 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.606117 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614162 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.614146135 +0000 UTC m=+436.937531924 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.606155 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614226 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.614213117 +0000 UTC m=+436.937598896 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.606191 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614248 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614287 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.614279109 +0000 UTC m=+436.937664888 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.605749 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614322 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614338 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.614372 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.614359962 +0000 UTC m=+436.937745741 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.615433 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.615609 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.606212 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.612372 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.605692 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.615982 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.616423 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.616520 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.616028 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.615979433 +0000 UTC m=+436.939365212 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.616164 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.618569 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.618676 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.618907 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.618892061 +0000 UTC m=+436.942278050 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.619232 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.619212592 +0000 UTC m=+436.942598341 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.619467 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.61945052 +0000 UTC m=+436.942836399 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.619601 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.619585575 +0000 UTC m=+436.942971334 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.619717 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.619694748 +0000 UTC m=+436.943080507 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.619961 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.619948846 +0000 UTC m=+436.943334715 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.620191 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.620734 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.620711909 +0000 UTC m=+436.944097778 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.620949 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.620931116 +0000 UTC m=+436.944316885 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.620973 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.620963597 +0000 UTC m=+436.944349346 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621144 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621224 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621335 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621502 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621660 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621731 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621784 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621937 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.621981 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622082 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622125 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622180 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622221 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622263 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622324 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622379 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622410 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622460 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622530 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622595 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622756 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622791 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.622891 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623216 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623264 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.623252262 +0000 UTC m=+436.946638041 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623354 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623404 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.623388847 +0000 UTC m=+436.946774736 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623486 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623505 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623519 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623568 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.623556642 +0000 UTC m=+436.946942703 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623643 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623689 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.623679019 +0000 UTC m=+436.947064865 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623756 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623771 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623779 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623890 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.623879112 +0000 UTC m=+436.947265051 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623965 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.623984 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624122 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.6241115 +0000 UTC m=+436.947497279 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624193 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624208 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624218 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624254 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.624244404 +0000 UTC m=+436.947630453 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624307 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624355 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.62434474 +0000 UTC m=+436.947730646 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624423 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624438 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624453 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624490 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.624478102 +0000 UTC m=+436.947863881 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624553 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624575 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624585 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624616 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.624608737 +0000 UTC m=+436.947994506 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624671 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.624703 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.62469646 +0000 UTC m=+436.948082349 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.625501 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.625609 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.625595591 +0000 UTC m=+436.948981370 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.625682 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.625726 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.625711695 +0000 UTC m=+436.949097463 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.625778 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.625960 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.625947832 +0000 UTC m=+436.949333811 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626090 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626139 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.626128368 +0000 UTC m=+436.949514267 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626220 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626283 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.626263622 +0000 UTC m=+436.949650161 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626351 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626393 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.626383637 +0000 UTC m=+436.949769416 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626700 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.626685297 +0000 UTC m=+436.950071186 (durationBeforeRetry 4s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626770 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.626865 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.6268028 +0000 UTC m=+436.950188579 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.627131 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" event={"ID":"9fb762d1-812f-43f1-9eac-68034c1ecec7","Type":"ContainerStarted","Data":"9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59"} Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.712402 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dn27q" event={"ID":"6a23c0ee-5648-448c-b772-83dced2891ce","Type":"ContainerStarted","Data":"3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c"} Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.627279 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712493 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712511 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712600 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.71257364 +0000 UTC m=+437.035959429 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.627373 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712644 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712655 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712699 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.712678444 +0000 UTC m=+437.036064213 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.627450 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712739 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712752 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712781 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.712774037 +0000 UTC m=+437.036159806 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.633410 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712850 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712865 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712893 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.71288566 +0000 UTC m=+437.036271539 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.635347 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.712946 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.712937962 +0000 UTC m=+437.036323731 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.695300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.725340 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.726193 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.726266 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.726252999 +0000 UTC m=+437.049638888 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.727162 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.727968 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.728222 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.728315 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.728303457 +0000 UTC m=+437.051689426 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.728500 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.729566 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.729658 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.729639581 +0000 UTC m=+437.053025520 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.729511 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.731214 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.731320 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.732075 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.732047504 +0000 UTC m=+437.055433453 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.732165 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.732316 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733189 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733275 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733470 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733511 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733559 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733610 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733635 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733694 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733753 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733858 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733897 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.733948 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734057 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734089 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734121 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734170 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734203 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734288 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734316 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.734366 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.732363 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3"} Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.735691 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.735709 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.735719 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.735766 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.735750775 +0000 UTC m=+437.059136544 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736178 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736201 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736211 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736245 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.736231301 +0000 UTC m=+437.059617080 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736443 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736482 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.736473209 +0000 UTC m=+437.059858978 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736540 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736554 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736563 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736588 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.736580353 +0000 UTC m=+437.059966252 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736647 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736696 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736710 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736743 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.736734528 +0000 UTC m=+437.060120417 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736801 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736898 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736906 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.736940 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.736925464 +0000 UTC m=+437.060311403 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.738581 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.738626 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.738611579 +0000 UTC m=+437.061997478 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.738689 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.738703 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.738712 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.738870 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.738731203 +0000 UTC m=+437.062116992 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739057 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739075 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739084 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739214 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.739204139 +0000 UTC m=+437.062590048 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739284 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739298 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739306 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739336 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.739325153 +0000 UTC m=+437.062710932 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739487 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739503 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739516 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739550 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.73953732 +0000 UTC m=+437.062923219 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739596 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739628 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.739619472 +0000 UTC m=+437.063005371 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739897 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739936 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.739927182 +0000 UTC m=+437.063313071 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.739973 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740036 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740026685 +0000 UTC m=+437.063412454 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740080 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740114 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740105738 +0000 UTC m=+437.063491517 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740173 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740198 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740209 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740234 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740227472 +0000 UTC m=+437.063613361 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740277 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740311 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740303245 +0000 UTC m=+437.063689024 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740370 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740384 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740400 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740432 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740424089 +0000 UTC m=+437.063809868 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740486 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740499 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740526 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740518182 +0000 UTC m=+437.063903961 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740577 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.740612 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.740603775 +0000 UTC m=+437.063989544 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.744403 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.744385059 +0000 UTC m=+437.067770838 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.745201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.757542 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155" exitCode=0 Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.760019 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155"} Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.794574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-approver-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-approver-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.837644 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.837893 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.837933 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.837948 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838034 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.83801495 +0000 UTC m=+437.161400969 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.837903 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838056 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838086 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838099 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838164 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.838143316 +0000 UTC m=+437.161529205 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.838411 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838736 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838752 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838761 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: E0312 13:27:42.838799 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:46.838787678 +0000 UTC m=+437.162173457 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.972559 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:42 crc kubenswrapper[4125]: I0312 13:27:42.972618 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.025568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.025716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:43 crc kubenswrapper[4125]: E0312 13:27:43.026261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.026441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:43 crc kubenswrapper[4125]: E0312 13:27:43.026771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.026784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:43 crc kubenswrapper[4125]: E0312 13:27:43.027481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.027658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:43 crc kubenswrapper[4125]: E0312 13:27:43.027874 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:43 crc kubenswrapper[4125]: E0312 13:27:43.028154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.309307 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.358605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.584657 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.692442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.739389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.777622 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03"} Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.786984 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-l92hr" event={"ID":"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e","Type":"ContainerStarted","Data":"52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874"} Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.828778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.902235 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.902380 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:43 crc kubenswrapper[4125]: I0312 13:27:43.997122 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.030953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.031353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.031527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.031794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.031987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.032027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.032273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.032398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.032556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.032594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.032651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.032677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.032723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.032770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.032802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.037964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.032914 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.032952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033136 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.033142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.038952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.039942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040073 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040557 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.033468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.040679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.033494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.033607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.033686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.041393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.033734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.041515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.033916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.034238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.061688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:44 crc kubenswrapper[4125]: E0312 13:27:44.064202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.181090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.242230 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.276424 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.344673 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.388573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.420157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.456351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.483110 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.510671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.537215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.572494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.602921 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.638344 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.671482 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.715540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.741124 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.784526 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.792719 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" event={"ID":"410cf605-1970-4691-9c95-53fdc123b1f3","Type":"ContainerStarted","Data":"cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51"} Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.800404 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36"} Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.804394 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086"} Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.809875 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770"} Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.828083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.857966 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.893050 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.893137 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.893285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.916679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-version-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-version-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:44 crc kubenswrapper[4125]: I0312 13:27:44.997189 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.025217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:45 crc kubenswrapper[4125]: E0312 13:27:45.025477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.025533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:45 crc kubenswrapper[4125]: E0312 13:27:45.025627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.025671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:45 crc kubenswrapper[4125]: E0312 13:27:45.025748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.025794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:45 crc kubenswrapper[4125]: E0312 13:27:45.026072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.026131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:45 crc kubenswrapper[4125]: E0312 13:27:45.026223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.028201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.059107 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.075542 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.099423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.118648 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.191719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.220798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.278177 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.341474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.380359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.463081 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.498223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.538401 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.560573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.601393 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.629595 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.664678 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.698670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.732531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.791387 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.820650 4125 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72" exitCode=0 Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.820975 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72"} Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.839556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.864921 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.890343 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.890485 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.912953 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.942773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:45 crc kubenswrapper[4125]: I0312 13:27:45.996368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.030283 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.033482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.033669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.033733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.034141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.034327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.034468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.034636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.034934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.035163 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.034727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.035477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.035611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.035779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.035899 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.036277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.036315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.036329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.036329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.036428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.036437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.036582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.036665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.036703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.036755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.036870 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.036923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.037787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.037971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.038076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.038127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.038190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.038276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.038326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.038445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.038539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.038582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.038655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.038752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.047749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.048268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.048359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.048568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.048654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.048730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.049795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.062758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.121566 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.187855 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.210610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.248536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.292989 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.293240 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.293528 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.293498498 +0000 UTC m=+444.616884447 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.308571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.345425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.442110 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.485419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502128 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502182 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502212 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502248 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502273 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502297 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502327 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502409 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502451 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502706 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502747 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502794 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502940 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.502978 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503001 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503128 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503185 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503229 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503284 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503312 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503356 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503398 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503425 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503511 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503567 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503674 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503702 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503766 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.503797 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.504624 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.505002 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.50478214 +0000 UTC m=+444.828168189 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.505239 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.505363 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.505351158 +0000 UTC m=+444.828736947 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.505504 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.505621 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.505610187 +0000 UTC m=+444.828995956 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.505764 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.506368 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.506353102 +0000 UTC m=+444.829739011 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.506569 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.508195 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.508181671 +0000 UTC m=+444.831567480 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.508354 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.508487 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.508472171 +0000 UTC m=+444.831857960 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.508627 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.508745 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.508734279 +0000 UTC m=+444.832120118 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.509139 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.516277 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.516255837 +0000 UTC m=+444.839641626 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.509330 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.516566 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.516555986 +0000 UTC m=+444.839941775 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.509370 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.516763 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.516754153 +0000 UTC m=+444.840140012 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.509400 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.519611 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.519569125 +0000 UTC m=+444.842954914 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510284 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.520090 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.520077132 +0000 UTC m=+444.843463131 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510462 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.520328 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.52031677 +0000 UTC m=+444.843702549 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510506 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510532 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510572 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510601 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510625 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510670 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510700 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510727 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510763 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510806 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510919 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510962 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.510997 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.511107 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.511148 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.511416 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.511455 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.511486 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.520719 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.520697562 +0000 UTC m=+444.844083471 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.524221 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.527315 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.52730269 +0000 UTC m=+444.850688439 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.527445 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.527429664 +0000 UTC m=+444.850815423 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.527544 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.527533347 +0000 UTC m=+444.850919096 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.527672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.527654051 +0000 UTC m=+444.851039900 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.528065 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.527995242 +0000 UTC m=+444.851381001 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.528259 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.5282478 +0000 UTC m=+444.851633559 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.528362 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.528351053 +0000 UTC m=+444.851736812 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.528472 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.528460778 +0000 UTC m=+444.851846527 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.528582 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.528570841 +0000 UTC m=+444.851956600 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.528684 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.528671105 +0000 UTC m=+444.852056864 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.532212 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.53219453 +0000 UTC m=+444.855580379 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.532349 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.532335004 +0000 UTC m=+444.855720793 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.532466 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.532449048 +0000 UTC m=+444.855834807 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.532577 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.532560911 +0000 UTC m=+444.855946660 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.532704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.533178 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.533205 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.533192113 +0000 UTC m=+444.856577862 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.533584 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.533571875 +0000 UTC m=+444.856957634 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.533697 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.534087 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.534190 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.534097 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.534125 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.534112003 +0000 UTC m=+444.857497752 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537368 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537411 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537445 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537513 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537591 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537650 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537705 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537866 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537906 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537934 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.537988 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538105 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538137 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538168 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538199 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538231 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538272 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538302 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538334 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538363 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538392 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538421 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538464 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538499 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538531 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538569 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538609 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538768 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538797 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538913 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.538974 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.539000 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539248 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539280 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539272092 +0000 UTC m=+444.862657861 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539317 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539348 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539340234 +0000 UTC m=+444.862726003 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539366 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539361245 +0000 UTC m=+444.862746994 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539379 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539373335 +0000 UTC m=+444.862759094 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539415 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539449 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539440817 +0000 UTC m=+444.862826586 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539681 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539715 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539703607 +0000 UTC m=+444.863089386 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539755 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.539789 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.539781679 +0000 UTC m=+444.863167458 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542337 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542381 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.542369543 +0000 UTC m=+444.865755333 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542424 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542450 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.542443326 +0000 UTC m=+444.865829095 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542496 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542533 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.542523419 +0000 UTC m=+444.865909198 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.542577 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.544718 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.544706901 +0000 UTC m=+444.868092680 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.544776 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.544806 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.544798984 +0000 UTC m=+444.868184843 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545279 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545323 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.545313397 +0000 UTC m=+444.868699176 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545365 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545389 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.545383679 +0000 UTC m=+444.868769458 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545431 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545458 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.545451301 +0000 UTC m=+444.868837071 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545529 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545547 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545558 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545590 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.545577156 +0000 UTC m=+444.868962935 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545635 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545663 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.545652478 +0000 UTC m=+444.869038257 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545790 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545805 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545967 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.545995 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.545987579 +0000 UTC m=+444.869373468 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.546140 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.546154 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.546162 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555034 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555056 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555072 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555135 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555190 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555240 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555300 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555353 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555421 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555433 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555451 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555508 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555582 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555650 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555713 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555763 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555885 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.555940 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556084 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556160 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556515 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.546180985 +0000 UTC m=+444.869566754 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556547 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556530202 +0000 UTC m=+444.879915961 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556569 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556561023 +0000 UTC m=+444.879946772 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556587 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556581014 +0000 UTC m=+444.879966763 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556601 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556594114 +0000 UTC m=+444.879979863 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556619 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556611645 +0000 UTC m=+444.879997394 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556640 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556628475 +0000 UTC m=+444.880014235 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556664 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556652766 +0000 UTC m=+444.880038515 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556683 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556675977 +0000 UTC m=+444.880061795 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556697 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556692268 +0000 UTC m=+444.880078017 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556710 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556704718 +0000 UTC m=+444.880090467 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556729 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556721178 +0000 UTC m=+444.880106927 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556743 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556737539 +0000 UTC m=+444.880123288 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556757 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556752289 +0000 UTC m=+444.880138048 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556771 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.55676591 +0000 UTC m=+444.880151659 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556785 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.55677931 +0000 UTC m=+444.880165059 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.556798 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.556792301 +0000 UTC m=+444.880178170 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.590328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641144 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641408 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641547 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641622 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641652 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641730 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641762 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641793 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641879 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641907 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.641938 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642097 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642139 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642211 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642242 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642266 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642301 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642330 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642399 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642426 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642523 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642625 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642695 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642733 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642766 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642791 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642909 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642942 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.642984 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643073 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643110 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643153 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643178 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643203 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643237 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643291 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.643462 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.643936 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644091 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644075666 +0000 UTC m=+444.967461565 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644244 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644299 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644313 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644351 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644336775 +0000 UTC m=+444.967722664 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644391 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644414 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644408217 +0000 UTC m=+444.967793986 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644459 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644482 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644476539 +0000 UTC m=+444.967862308 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644525 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644533 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644554 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644548171 +0000 UTC m=+444.967933950 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644594 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644606 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644616 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644638 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644631954 +0000 UTC m=+444.968017853 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644693 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644721 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644714407 +0000 UTC m=+444.968100296 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644753 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644789 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644779669 +0000 UTC m=+444.968165438 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644886 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644920 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.644912413 +0000 UTC m=+444.968298192 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644983 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.644996 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645096 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645134 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.64512598 +0000 UTC m=+444.968511749 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645178 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645203 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645197032 +0000 UTC m=+444.968582801 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645231 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645262 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645253185 +0000 UTC m=+444.968639064 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645299 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645321 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645314987 +0000 UTC m=+444.968700876 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645371 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645385 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645418 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.64540642 +0000 UTC m=+444.968792419 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645470 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645489 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645497 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645527 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645519264 +0000 UTC m=+444.968905043 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645568 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645602 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645594736 +0000 UTC m=+444.968980615 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645658 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645673 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645681 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645709 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645698139 +0000 UTC m=+444.969084028 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645766 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645777 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645806 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645798343 +0000 UTC m=+444.969184122 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645916 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645932 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645939 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.645971 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.645962938 +0000 UTC m=+444.969348837 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646059 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646076 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646089 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646122 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646107333 +0000 UTC m=+444.969493102 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646176 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646197 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646206 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646236 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646227186 +0000 UTC m=+444.969612955 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646281 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646315 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646303829 +0000 UTC m=+444.969689608 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646370 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646383 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646391 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646414 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646407392 +0000 UTC m=+444.969793161 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646456 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646493 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646481745 +0000 UTC m=+444.969867514 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646548 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646563 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646572 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646604 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646596818 +0000 UTC m=+444.969982587 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646661 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646677 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646685 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646716 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646704223 +0000 UTC m=+444.970089992 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646772 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646784 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646854 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.646801766 +0000 UTC m=+444.970187535 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646902 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646939 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.64692511 +0000 UTC m=+444.970310889 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.646993 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647083 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647094 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647126 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647117186 +0000 UTC m=+444.970502965 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647174 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647185 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647192 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647223 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647215479 +0000 UTC m=+444.970601258 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647266 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647295 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647288601 +0000 UTC m=+444.970674370 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647331 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647363 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647355154 +0000 UTC m=+444.970741163 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647398 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647432 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647423706 +0000 UTC m=+444.970809475 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647635 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647692 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647676804 +0000 UTC m=+444.971062583 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647742 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647768 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647761767 +0000 UTC m=+444.971147536 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647859 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647896 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647887801 +0000 UTC m=+444.971273570 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647939 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.647972 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.647964583 +0000 UTC m=+444.971350472 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.648064 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.648102 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.648092298 +0000 UTC m=+444.971478077 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.648343 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.648332366 +0000 UTC m=+444.971718215 (durationBeforeRetry 8s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.743970 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745214 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745493 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745586 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.745598 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745638 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.745665 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.745647157 +0000 UTC m=+445.069033176 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.745789 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745896 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745947 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.745983 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.746058 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746073 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746101 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746121 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.746132 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746133 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746184 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746159939 +0000 UTC m=+445.069545898 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746234 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746260 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746273 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.746276 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746305 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746330 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746359 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746307 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746296199 +0000 UTC m=+445.069681978 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746387 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746411 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746427 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746443 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746463 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746474 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746492 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.74647966 +0000 UTC m=+445.069865629 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746531 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746545 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746561 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746571 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746558377 +0000 UTC m=+445.069944336 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746575 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.746580 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746602 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.74658516 +0000 UTC m=+445.069971009 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.746645 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746673 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746681 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746707 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746718 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746725 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746716595 +0000 UTC m=+445.070102374 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.746729 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746745 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746737273 +0000 UTC m=+445.070123142 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746762 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746755083 +0000 UTC m=+445.070140842 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746784 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746777174 +0000 UTC m=+445.070162933 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746798 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746792365 +0000 UTC m=+445.070178114 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.746889 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.746878607 +0000 UTC m=+445.070264566 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747087 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.747089 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.747163 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.747142452 +0000 UTC m=+445.070528481 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.747299 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.747323 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.747334 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.747365 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.747356323 +0000 UTC m=+445.070742222 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747484 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747541 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747596 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747629 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747663 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747756 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747803 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747881 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747918 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747967 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.747992 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.748081 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.748124 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.748173 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.748333 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.748573 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.748721 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.748737 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.748750 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.748779 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.748770729 +0000 UTC m=+445.072156618 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749123 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749141 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749150 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749178 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749169763 +0000 UTC m=+445.072555787 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749232 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749252 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749261 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749286 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749279596 +0000 UTC m=+445.072665485 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749336 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749350 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749359 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749391 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.74938332 +0000 UTC m=+445.072769199 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749441 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749459 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749466 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749492 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749485353 +0000 UTC m=+445.072871242 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749534 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749545 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749559 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749590 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749581836 +0000 UTC m=+445.072967725 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749625 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749650 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749644308 +0000 UTC m=+445.073030197 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749693 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749729 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749719081 +0000 UTC m=+445.073104970 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749770 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749799 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.749792013 +0000 UTC m=+445.073177902 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.749986 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750046 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750037091 +0000 UTC m=+445.073422860 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750105 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750119 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750128 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750158 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750145284 +0000 UTC m=+445.073531063 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750196 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750219 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750213056 +0000 UTC m=+445.073598826 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750263 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750273 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750304 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750292309 +0000 UTC m=+445.073678088 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750356 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750372 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750380 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750405 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750398312 +0000 UTC m=+445.073784201 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750440 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750471 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750458715 +0000 UTC m=+445.073844484 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750524 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.750554 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.750547168 +0000 UTC m=+445.073932947 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.784101 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.808710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.838164 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.850159 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.850336 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850618 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850680 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850702 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850764 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850779 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.850753828 +0000 UTC m=+445.174142177 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850785 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.850637 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850949 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850966 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.850983 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.851095 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.851079839 +0000 UTC m=+445.174465748 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.851117 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: E0312 13:27:46.851228 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:27:54.851218644 +0000 UTC m=+445.174604543 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.882665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.886642 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.886743 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.916159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:46 crc kubenswrapper[4125]: I0312 13:27:46.947563 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.008627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.026621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.026735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:47 crc kubenswrapper[4125]: E0312 13:27:47.027142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.026690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.027208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.027241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:47 crc kubenswrapper[4125]: E0312 13:27:47.027334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:47 crc kubenswrapper[4125]: E0312 13:27:47.027475 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:47 crc kubenswrapper[4125]: E0312 13:27:47.027562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:47 crc kubenswrapper[4125]: E0312 13:27:47.027692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.033692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.064865 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:47 crc kubenswrapper[4125]: E0312 13:27:47.179547 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.351166 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.420421 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.453864 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.492232 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.524170 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.745987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.779470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.834554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.861073 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840"} Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.891317 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.891396 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.910602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:47 crc kubenswrapper[4125]: I0312 13:27:47.968467 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.010176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.027543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.027716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.027767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.027911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.027963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.028071 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.028115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.028192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.028340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.028455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.028507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.028596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.028644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.028738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.028784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.028940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.028990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.029098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.029148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.029411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.029549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.029636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.029676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.029750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.029801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.034952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.035110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.036307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.036527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.036641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.036917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.037155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033890 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.037259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033897 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.037366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.037964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.033975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.038178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.038285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034089 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.038394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.038726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.038756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.034357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.035211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.035394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.035429 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.037463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.037720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.038982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.039891 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.040056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.040181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.040280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.081248 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.132527 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.194288 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.279391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.349327 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.389670 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.389743 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.389764 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.390075 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.390155 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:48Z","lastTransitionTime":"2026-03-12T13:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.414724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.459391 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.473268 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.475974 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.476104 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.476122 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.476142 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.476168 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:48Z","lastTransitionTime":"2026-03-12T13:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.597634 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.608389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.620242 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.620397 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.620504 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.620616 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.620742 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:48Z","lastTransitionTime":"2026-03-12T13:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.662187 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.664689 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.670763 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.670914 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.670929 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.670947 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.670978 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:48Z","lastTransitionTime":"2026-03-12T13:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.711455 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.725258 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.725404 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.725529 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.725620 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.725704 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:48Z","lastTransitionTime":"2026-03-12T13:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.752330 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.752706 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: E0312 13:27:48.752734 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.817270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ce15d141220317b4e57b1599c379e880d26b45054aa1776fbad6346dd58a55d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce15d141220317b4e57b1599c379e880d26b45054aa1776fbad6346dd58a55d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d4e207328f4e3140d751e6046a1a8d14a7f392d2f10d6248f7db828278d0972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d4e207328f4e3140d751e6046a1a8d14a7f392d2f10d6248f7db828278d0972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:10Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:11Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}],\\\"phase\\\":\\\"Pending\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.842105 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.873235 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea"} Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.879480 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f"} Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.883075 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.891393 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.891960 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.941169 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:48 crc kubenswrapper[4125]: I0312 13:27:48.991303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.025149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:49 crc kubenswrapper[4125]: E0312 13:27:49.025325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.025388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:49 crc kubenswrapper[4125]: E0312 13:27:49.025470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.025519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:49 crc kubenswrapper[4125]: E0312 13:27:49.025596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.025637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:49 crc kubenswrapper[4125]: E0312 13:27:49.025732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.025790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:49 crc kubenswrapper[4125]: E0312 13:27:49.025972 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.031797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.079747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.161953 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.203368 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.229785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.271131 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.303763 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.337459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.381143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.431319 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.472713 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.527159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.562450 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.596438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.637233 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.679539 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.716560 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.769414 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.827485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d4e207328f4e3140d751e6046a1a8d14a7f392d2f10d6248f7db828278d0972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d4e207328f4e3140d751e6046a1a8d14a7f392d2f10d6248f7db828278d0972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:10Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:11Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}],\\\"phase\\\":\\\"Pending\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.868227 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.894593 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57"} Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.900590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.901684 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.901849 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:49 crc kubenswrapper[4125]: I0312 13:27:49.968359 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.009116 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.024767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025865 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.025952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.026112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.026470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.026648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.026764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.025959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.027235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.027419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.027515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.027653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.027797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027862 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.027978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028066 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.028803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.028907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029889 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.029945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.029987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.030352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.030529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.030667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.030783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.030913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.030996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.031170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.031323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.031431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.031496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.031623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:50 crc kubenswrapper[4125]: E0312 13:27:50.031928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.057692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.115215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.191171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.239439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.297325 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.346168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.375706 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.432544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.477186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.561272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.605541 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.651615 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.692218 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.727519 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.768697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.833528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.872720 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.885603 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.885681 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.911226 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b"} Mar 12 13:27:50 crc kubenswrapper[4125]: I0312 13:27:50.944786 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.000334 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.025351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.025406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.025475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:51 crc kubenswrapper[4125]: E0312 13:27:51.025543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.025601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.025620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:51 crc kubenswrapper[4125]: E0312 13:27:51.025760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:51 crc kubenswrapper[4125]: E0312 13:27:51.026240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:51 crc kubenswrapper[4125]: E0312 13:27:51.026367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:51 crc kubenswrapper[4125]: E0312 13:27:51.026615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.038468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.089905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.132548 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.162269 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.198233 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.226687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.263460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.319787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.356393 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.391233 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.437198 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.469723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.526686 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.600766 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.633333 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.669489 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.703557 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.727699 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.765590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.803501 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.839998 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.892986 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.896385 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.896508 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.933997 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.949658 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf"} Mar 12 13:27:51 crc kubenswrapper[4125]: I0312 13:27:51.970560 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.007526 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.028718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.029365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.029565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.029609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.029800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.029952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.030120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.030307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.030445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.030675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030869 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.030753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.031002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.031197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.031302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.031417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.031538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.031649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.031717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.031800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.031949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.031959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.032098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.032128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.032228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.032266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.032366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.032389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.032517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.032629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.032680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.032792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.033107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.033140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.033195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.033769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.033901 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.033967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.034152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.034328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034396 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.034983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.035223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.035306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.035337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.036182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.036280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.036558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.036650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.036787 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.038378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.067984 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.112401 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.152855 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: E0312 13:27:52.180780 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.212680 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.295427 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.356169 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.434926 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:52 crc kubenswrapper[4125]: I0312 13:27:52.466735 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.527142 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.533185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.533459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.533761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.533934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.534106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.534220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.536394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.536539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.537374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.537496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.538231 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.538366 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.564233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.564364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.564472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564511 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.564589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.564696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.564884 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.564924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565105 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.565752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.565792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.575705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566452 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.576937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566470 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.577129 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.577261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.577694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566898 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.566935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.578771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.578938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579012 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.579982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.581247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.581778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.581913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.581985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.582366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.582456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.584159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.584234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:53 crc kubenswrapper[4125]: E0312 13:27:53.586320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.697102 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.748588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.787306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.821654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.864206 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d4e207328f4e3140d751e6046a1a8d14a7f392d2f10d6248f7db828278d0972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d4e207328f4e3140d751e6046a1a8d14a7f392d2f10d6248f7db828278d0972\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:10Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:11Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}],\\\"phase\\\":\\\"Pending\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.889779 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.889956 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.907626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:53 crc kubenswrapper[4125]: I0312 13:27:53.969133 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.013318 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.052153 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.087223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.115691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.140286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.177641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.209558 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.254188 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.302250 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.343590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.376623 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.379283 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.380336 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.380426 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.380408459 +0000 UTC m=+460.703794348 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.434765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.469635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.511646 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.535999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.584995 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585176 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585215 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585240 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585294 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585330 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585391 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585465 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585514 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585579 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585615 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585671 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585746 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585776 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585931 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.585991 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586026 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586179 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586204 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586265 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586347 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586382 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586427 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586538 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586582 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586609 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586650 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586674 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586721 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586756 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586782 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586894 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586930 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586957 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.586987 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587017 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587128 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587161 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587203 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587233 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587275 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587316 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587423 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587474 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587505 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587561 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587593 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587631 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587693 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587727 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587754 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587776 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587861 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587906 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.587945 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588019 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588095 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588139 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588170 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588211 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588243 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588274 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588303 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.588332 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588456 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588508 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.588494865 +0000 UTC m=+460.911880644 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588573 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588605 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.588597569 +0000 UTC m=+460.911983348 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588652 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588681 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.588673971 +0000 UTC m=+460.912060020 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588727 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588754 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.588747145 +0000 UTC m=+460.912133294 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588797 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588918 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588953 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.588944031 +0000 UTC m=+460.912329930 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.588993 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589017 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589010633 +0000 UTC m=+460.912396402 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589089 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589121 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589112916 +0000 UTC m=+460.912498685 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589156 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589179 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589173058 +0000 UTC m=+460.912558957 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589183 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589226 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589249 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.58924349 +0000 UTC m=+460.912629259 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589263 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589273 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589317 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589336 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589337 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589369 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589377 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589383 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589394 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589437 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589445 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589482 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589495 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589507 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589524 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589545 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589557 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589569 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589573 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589579 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589589 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589594 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589596 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589623 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589631 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589635 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589344 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589663 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589556 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589286 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589689 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589695 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589394 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589730 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589735 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589749 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589294 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589269042 +0000 UTC m=+460.912654931 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589750 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589778 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589767298 +0000 UTC m=+460.913153047 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589672 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589798 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589804 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589774 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589878 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589892 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589923 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589896 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589938 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589948 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589394 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589957 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589970 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589940 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589795 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589787929 +0000 UTC m=+460.913173778 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589951 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589927 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590005 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590015 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590025 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589993 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590079 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590005 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.589993184 +0000 UTC m=+460.913379293 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590108 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.589496 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590116 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590106429 +0000 UTC m=+460.913492168 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590125 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590138 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590139 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.59013155 +0000 UTC m=+460.913517449 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590150 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590136 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590162 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590155921 +0000 UTC m=+460.913542310 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590184 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590176361 +0000 UTC m=+460.913562240 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590117 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590208 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590194452 +0000 UTC m=+460.913580211 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590240 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590219393 +0000 UTC m=+460.913605292 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590267 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590255924 +0000 UTC m=+460.913641683 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590289 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590280755 +0000 UTC m=+460.913666604 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590408 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590399479 +0000 UTC m=+460.913785228 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590431 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590418179 +0000 UTC m=+460.913804018 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590449 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.59044198 +0000 UTC m=+460.913827729 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590465 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.59045751 +0000 UTC m=+460.913843259 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590479 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590472681 +0000 UTC m=+460.913858440 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590559 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590549573 +0000 UTC m=+460.913935332 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590583 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590574244 +0000 UTC m=+460.913960003 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590600 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590593435 +0000 UTC m=+460.913979274 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590616 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590609635 +0000 UTC m=+460.913995384 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590638 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590626216 +0000 UTC m=+460.914011965 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590657 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590650657 +0000 UTC m=+460.914036406 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590673 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590665297 +0000 UTC m=+460.914051046 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590688 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590680928 +0000 UTC m=+460.914066777 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590705 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590695438 +0000 UTC m=+460.914081197 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590727 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590716959 +0000 UTC m=+460.914102848 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590743 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590735969 +0000 UTC m=+460.914121728 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590758 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.59075097 +0000 UTC m=+460.914137129 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590777 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.5907655 +0000 UTC m=+460.914151249 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590795 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590788851 +0000 UTC m=+460.914174340 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.590870 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.590859293 +0000 UTC m=+460.914245242 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591087 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.59107414 +0000 UTC m=+460.914459899 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591108 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591100161 +0000 UTC m=+460.914485910 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591124 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591117302 +0000 UTC m=+460.914503061 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591146 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591133192 +0000 UTC m=+460.914519031 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591165 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591158273 +0000 UTC m=+460.914544032 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591181 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591175173 +0000 UTC m=+460.914560932 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591195 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591188734 +0000 UTC m=+460.914574483 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591209 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591202444 +0000 UTC m=+460.914588203 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591231 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591223065 +0000 UTC m=+460.914608824 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591248 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591241456 +0000 UTC m=+460.914627215 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591263 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591256826 +0000 UTC m=+460.914642585 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591278 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591271227 +0000 UTC m=+460.914656976 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591291 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591284687 +0000 UTC m=+460.914670446 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591335 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591328308 +0000 UTC m=+460.914714067 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591356 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.59134322 +0000 UTC m=+460.914728979 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591375 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591367901 +0000 UTC m=+460.914753750 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591392 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591383601 +0000 UTC m=+460.914769450 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591408 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591400222 +0000 UTC m=+460.914786071 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591422 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591415812 +0000 UTC m=+460.914801651 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591437 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591430403 +0000 UTC m=+460.914816162 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591458 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591446533 +0000 UTC m=+460.914832292 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591484 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591470924 +0000 UTC m=+460.914856683 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591501 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591494865 +0000 UTC m=+460.914880624 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591522 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591514555 +0000 UTC m=+460.914900314 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.591540 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.591532846 +0000 UTC m=+460.914918595 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.594588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.627423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.668745 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689451 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689549 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689599 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689628 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689668 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689731 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689760 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689794 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689918 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.689977 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.689998 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690087 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690188 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690209 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690217 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690254 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690231208 +0000 UTC m=+461.013617087 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690283 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.6902712 +0000 UTC m=+461.013657059 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690320 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.69029742 +0000 UTC m=+461.013683299 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690323 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690348 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690152 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690362 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690367 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690375 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690395 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690399 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690389824 +0000 UTC m=+461.013775843 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690407 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690419 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690412625 +0000 UTC m=+461.013798374 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.690111 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690427 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690449 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690429126 +0000 UTC m=+461.013814975 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690469 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690460647 +0000 UTC m=+461.013846496 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690353 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690488 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690507 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690522 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690524 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690515509 +0000 UTC m=+461.013901368 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690558 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690530 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690586 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690579671 +0000 UTC m=+461.013965540 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690585 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690602 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690612 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690613 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690605971 +0000 UTC m=+461.013991830 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690643 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690634912 +0000 UTC m=+461.014020771 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.690647 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690690 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690701 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690708 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690730 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690751 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.690755 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690761 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690797 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690787537 +0000 UTC m=+461.014173326 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690916 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690935 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690955 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690941862 +0000 UTC m=+461.014327611 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.690935 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.690976 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.690968623 +0000 UTC m=+461.014354392 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691011 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691082 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691097 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691145 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691167 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691176 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691168169 +0000 UTC m=+461.014554028 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691204 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691205 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691226 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691100 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691235 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691250 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691255 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691275 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691263983 +0000 UTC m=+461.014649842 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691299 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691307 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691325 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691318024 +0000 UTC m=+461.014703803 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691330 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691352 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691363 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691367 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691358886 +0000 UTC m=+461.014744745 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691411 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691413 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691427 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691438 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691453 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691444918 +0000 UTC m=+461.014830787 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691470 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691463309 +0000 UTC m=+461.014849148 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691437 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691482 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691490 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.6914832 +0000 UTC m=+461.014868959 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691505 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.69149863 +0000 UTC m=+461.014884479 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691554 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691586 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691633 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691731 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.691966 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.691994 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692001 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.691992347 +0000 UTC m=+461.015378126 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692020 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.692014258 +0000 UTC m=+461.015400117 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692080 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692115 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.692107241 +0000 UTC m=+461.015493120 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692143 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692178 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.692166643 +0000 UTC m=+461.015552512 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692357 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.692349238 +0000 UTC m=+461.015735017 (durationBeforeRetry 16s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692385 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.692412 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.6924057 +0000 UTC m=+461.015791569 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.692549 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.692709 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.692880 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.692963 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.692992 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693063 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.693084 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693088 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693104 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.693117 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693143 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693127713 +0000 UTC m=+461.016513592 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693169 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.693178 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693206 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693196797 +0000 UTC m=+461.016582586 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.693232 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693235 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.693271 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.693304 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693319 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693333 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693341 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693367 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693360022 +0000 UTC m=+461.016745801 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693371 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693405 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693395563 +0000 UTC m=+461.016781592 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693410 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693441 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693434444 +0000 UTC m=+461.016820223 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693449 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693475 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693467715 +0000 UTC m=+461.016853584 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693483 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693515 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693507687 +0000 UTC m=+461.016893466 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693531 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693545 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693553 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693567 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693577 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693579 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693572019 +0000 UTC m=+461.016957798 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693618 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693626 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.69361753 +0000 UTC m=+461.017003279 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693641 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693634891 +0000 UTC m=+461.017020730 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.693656 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.693649881 +0000 UTC m=+461.017035640 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.727423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.763756 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.794242 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.794343 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794367 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794433 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.794416813 +0000 UTC m=+461.117802782 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.794377 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.794491 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794561 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794628 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794645 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794657 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794695 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.794683581 +0000 UTC m=+461.118069360 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794710 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794717 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794779 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794796 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794758 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.794746368 +0000 UTC m=+461.118132357 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.794568 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794942 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.794918269 +0000 UTC m=+461.118304348 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794780 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.794983 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795074 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795114 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.79510264 +0000 UTC m=+461.118488419 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795159 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795185 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795193 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795218 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795225 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.795216219 +0000 UTC m=+461.118602108 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795264 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795309 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795350 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.795340863 +0000 UTC m=+461.118726832 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795311 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795388 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795407 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795413 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795449 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795464 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795478 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795486 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795500 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795519 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.795508864 +0000 UTC m=+461.118894893 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795564 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795570 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795584 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795595 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795627 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.795617732 +0000 UTC m=+461.119003611 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795659 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795674 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795683 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795714 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795417 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795769 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795806 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.795663 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795717 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.79570385 +0000 UTC m=+461.119089739 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795733 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796181 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796167 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796147449 +0000 UTC m=+461.119533308 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796208 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796200771 +0000 UTC m=+461.119586530 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.795870 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796222 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796237 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796258 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796246598 +0000 UTC m=+461.119632377 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796319 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796320 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796354 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796362 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796379 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796341 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796332985 +0000 UTC m=+461.119718734 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796432 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796465 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796509 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796559 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796589 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796651 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796683 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.796740 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796898 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796927 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796940 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796930676 +0000 UTC m=+461.120316555 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796970 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796953056 +0000 UTC m=+461.120338925 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.796989 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.796981877 +0000 UTC m=+461.120367636 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797009 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797024 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797105 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797104 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797174 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797214 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797265 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797285 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797295 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797299 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797310 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797011 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.797002798 +0000 UTC m=+461.120388547 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797343 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.797330573 +0000 UTC m=+461.120716652 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797363 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.797355284 +0000 UTC m=+461.120741033 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797384 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.797376095 +0000 UTC m=+461.120761844 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.797405 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.797393635 +0000 UTC m=+461.120779539 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.797503 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.797733 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.798256 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.798401 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.798467 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.798499 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798632 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798667 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.798659297 +0000 UTC m=+461.122045076 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798695 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.798681488 +0000 UTC m=+461.122067367 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798719 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.798707829 +0000 UTC m=+461.122093578 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798757 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798781 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.798775071 +0000 UTC m=+461.122160850 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798892 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798908 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798917 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798942 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.798935286 +0000 UTC m=+461.122321055 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.798986 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799013 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.799007118 +0000 UTC m=+461.122392887 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799201 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799217 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799231 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799258 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.799250736 +0000 UTC m=+461.122636515 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799307 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799318 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799326 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.799348 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.799341729 +0000 UTC m=+461.122727498 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.828421 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.879938 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.891472 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.891615 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.900336 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.900478 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.900735 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901325 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901357 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901374 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901395 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901438 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901451 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901464 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901477 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901486 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901543 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.901522124 +0000 UTC m=+461.224907993 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901569 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.901561105 +0000 UTC m=+461.224946864 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: E0312 13:27:54.901585 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:10.901577196 +0000 UTC m=+461.224962945 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.918235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.945211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:54 crc kubenswrapper[4125]: I0312 13:27:54.983577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.025554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.025909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.025692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.026192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.026348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.026521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.026310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.025805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.026958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.025727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.026879 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.026930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.026144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.028063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.030257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.028367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.028451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.030689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.031086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.031417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.033308 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.045267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.045391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.045487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.045593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.045980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.046181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.046512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.047406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.029141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.029231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.029327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.029409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.050450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.051252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.029775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.030000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.028626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.051948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.052309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.052324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.052422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.052487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.046283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.053228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.029518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.056274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.056358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.056462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.056525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.058199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.058288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.058366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.058903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.058998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.059168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.061777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.061944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:55 crc kubenswrapper[4125]: E0312 13:27:55.062765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.071755 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.106510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.136345 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.230648 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.263526 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.285999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.316418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.344432 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.363512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.405608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.454263 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.506390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.544231 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.598425 4125 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f" exitCode=0 Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.598562 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f"} Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.615316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.653253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.707348 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.744184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.789361 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.834399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.881386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.886395 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.886697 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.935279 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.966482 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:55 crc kubenswrapper[4125]: I0312 13:27:55.995538 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.025141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.025230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:56 crc kubenswrapper[4125]: E0312 13:27:56.025481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:56 crc kubenswrapper[4125]: E0312 13:27:56.025618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.038581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.079066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.102559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.150205 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.177144 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.240640 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.280743 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.505676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.537415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.556873 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.586387 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.611609 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a"} Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.619195 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c"} Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.631924 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.887612 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:56 crc kubenswrapper[4125]: I0312 13:27:56.888000 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.026455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025768 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.026354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.025803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.027347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.027510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.027612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.027939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028109 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.027989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.028463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.028705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.027520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.029613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.029882 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.030116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.030333 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.030528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.030716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.031605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.031902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.032333 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.032491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.032598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.032687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.032770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033862 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.033964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.034133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.034221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.034444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.034552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.034654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.035182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.035265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.035347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.035417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.035494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.036526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.036632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.037103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.037396 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.037494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.081533 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.114724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: E0312 13:27:57.182606 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.192302 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.251249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.280475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.304578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.347772 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.374023 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.398750 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.428189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.471402 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.526245 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.556801 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.591965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.673612 4125 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c" exitCode=0 Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.674282 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c"} Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.693651 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.744078 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.787753 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.815592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.839419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.861509 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.885469 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.886521 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.911204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.939256 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:57 crc kubenswrapper[4125]: I0312 13:27:57.979980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.003121 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.025939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:27:58 crc kubenswrapper[4125]: E0312 13:27:58.026263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.026579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:27:58 crc kubenswrapper[4125]: E0312 13:27:58.026749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.040537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.064219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.093561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.125022 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.152225 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:11Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}],\\\"phase\\\":\\\"Pending\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.180286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.225891 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.266597 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.292987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.332854 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.355606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.377259 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.404237 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.440781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.462470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.484403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.509963 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.549780 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.575211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.601985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.626940 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.656471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.690516 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.730758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.771782 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.821965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.852785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.879117 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.884709 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.884790 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.923135 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.923197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.923215 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.923238 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:58 crc kubenswrapper[4125]: I0312 13:27:58.923269 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:58Z","lastTransitionTime":"2026-03-12T13:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.026177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.026408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.026550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026866 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026880 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026898 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.026778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.026779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.027274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.027385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.027478 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.027538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.027798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.028301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.028448 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.028551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.028572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.028644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.028803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.029065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.029065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.029142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.029156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.029169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.029306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.029354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.029463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.028231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.029762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.029925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.030068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.030097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.030121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.030514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.030665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.030667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.031148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.031310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031898 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.031990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.032934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.033014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.033125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.033196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.047974 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.053540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.057968 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.058126 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.058142 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.058161 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.058197 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:59Z","lastTransitionTime":"2026-03-12T13:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.089691 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.090110 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.094190 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.094270 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.094311 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.094338 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.094376 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:59Z","lastTransitionTime":"2026-03-12T13:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.123307 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.128013 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.134135 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.134207 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.134225 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.134248 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.134277 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:59Z","lastTransitionTime":"2026-03-12T13:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.154626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.155140 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.163450 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.163596 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.163690 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.163781 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.163997 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:27:59Z","lastTransitionTime":"2026-03-12T13:27:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.185716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.192091 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: E0312 13:27:59.192203 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.206096 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.231426 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.260647 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.291076 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.355633 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.389234 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.412981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.438300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.463772 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.499906 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.527700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.550805 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.578977 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.607564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.627130 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.655435 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.677762 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.713905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.742977 4125 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70" exitCode=0 Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.743156 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70"} Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.758535 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf"} Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.759354 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.759680 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.759701 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.767903 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.792578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.827504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.853990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.885888 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.889324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:27:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:27:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:27:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.889423 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.913352 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.935797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.961493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:27:59 crc kubenswrapper[4125]: I0312 13:27:59.990773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:27:59Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.026027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:00 crc kubenswrapper[4125]: E0312 13:28:00.026350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.026446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:00 crc kubenswrapper[4125]: E0312 13:28:00.026534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.033042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.071784 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.135620 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.173107 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.209630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.259661 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.307438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.350504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.406347 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.463173 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.507737 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.547336 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.588734 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.627720 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:57Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://455c9dcaca7ee7118b89a599c97b6a458888800688dd381f8c5dcbd6ba96e17d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:11Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.659195 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.698379 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.750671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.805485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.839654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.862299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.892258 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.892382 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.906005 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.944028 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.984914 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:00 crc kubenswrapper[4125]: I0312 13:28:00.989254 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.002323 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.025645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025891 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.025959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.025997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.026085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.026873 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.026937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.027099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.027184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.027221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.027274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.027432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.027490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.033696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.033913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034134 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.034386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.034591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.034755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.035383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.035743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.036850 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.037005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.037235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.037310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.037389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.037577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.037654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.037763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.038950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.039103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.039240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.039522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.039744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.042594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.041268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.041310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.041344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.041373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.041537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.041641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.041958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.042150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.042215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.046351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.047037 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.047195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.047973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.048162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.048320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.048390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.057586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.048706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.048797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.050568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.050644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.062635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.050701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.062772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.050738 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.063091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.050764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.050787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.063654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.052558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.053265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.054695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.065439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.065567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.055733 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.055780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.055944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.069130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.056179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:01 crc kubenswrapper[4125]: E0312 13:28:01.056286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.090422 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.124559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.167532 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.194222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.233625 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.273343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.318528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.343749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.374898 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.414394 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.463168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.489951 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.513634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.539724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.600324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.630564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.661000 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.692534 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.718304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.749973 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.800546 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.837924 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.869900 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.885889 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.885958 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:01 crc kubenswrapper[4125]: I0312 13:28:01.902106 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.023290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.026712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:02 crc kubenswrapper[4125]: E0312 13:28:02.027017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:02 crc kubenswrapper[4125]: E0312 13:28:02.026754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.125042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: E0312 13:28:02.185340 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.392217 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.452569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.683229 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.719986 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.764517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.800648 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.812930 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce"} Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.832683 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.863848 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.887241 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.888323 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.888419 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.912932 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.944785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.968408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:02 crc kubenswrapper[4125]: I0312 13:28:02.985173 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.003369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025147 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.026183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.025778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.026442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.026593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.026671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026885 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.026900 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.026906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.027016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.027188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.027323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.027569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.027462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.027744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.028130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.028304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.028437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.028316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.028773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028865 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.028969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.029006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.029018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.029644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.029909 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.029929 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.029690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.030476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.030967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.031467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031862 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.031925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.032000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.032403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.032514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.032644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:03 crc kubenswrapper[4125]: E0312 13:28:03.034332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.052298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.079510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.105495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.131158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.173238 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.206626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.230645 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.262374 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.302967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.324183 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.346451 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.368484 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.397543 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.419305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.436040 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.464272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.499446 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.530919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.560036 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.598995 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.631023 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.654969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.671412 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.708636 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.730003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.754089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.797721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.823186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.854322 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.885975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.888255 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.888335 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.921464 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.952333 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.972591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:03 crc kubenswrapper[4125]: I0312 13:28:03.994806 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.026270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:04 crc kubenswrapper[4125]: E0312 13:28:04.026569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.026776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:04 crc kubenswrapper[4125]: E0312 13:28:04.026961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.030981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.052786 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.082641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:57Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d0ea8f66b79c23a45ba2f75937377749519dc802fb755a7fce9c90efb994507\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.105705 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.139502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.171798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.195300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.225037 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.242343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.265266 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.297298 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.336178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.365621 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.392533 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.420962 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.443105 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.471723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.493249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.520616 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.555460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.582138 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.623531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.651466 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.672252 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.698491 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.728545 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.755377 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.779637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.810744 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.838541 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.875032 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.887630 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.887875 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.901142 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.927736 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.960959 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:04 crc kubenswrapper[4125]: I0312 13:28:04.990082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.016098 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.025598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.026132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.026647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.026873 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.027109 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.027249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.027395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.027545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.027657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.027794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.028006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.028205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.028309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.028476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.028543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.028985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.029183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.029338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.029524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.029661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.029753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.030044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.030303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.030491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030532 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.030675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.030967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.031112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.031294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.031463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.031607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.031734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.031796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.032138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.032247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.032438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.032620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.032743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.032791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.033151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.033251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.033301 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.033363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.033620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.033703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.033755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.033770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.033947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.033983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.034157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.034287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.034954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.035312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.035485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.035612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.035708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.035872 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.035971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.036123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.036236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.036348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:05 crc kubenswrapper[4125]: E0312 13:28:05.036593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.052581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.073597 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.099655 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.133212 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.166415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.195105 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.218969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.258721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.280955 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.301297 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.321580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.349186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.374184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.399502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.424503 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.447790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.481119 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.515476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:02Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}],\\\"phase\\\":\\\"Pending\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.536164 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.568787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.602270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.623663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.644089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.667941 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.692701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.722526 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.750985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.776977 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.798456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.812539 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.834500 4125 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce" exitCode=0 Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.834589 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce"} Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.835108 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.860474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.876663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.886669 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.886761 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.904920 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.921120 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.939146 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.957656 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.976956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:05 crc kubenswrapper[4125]: I0312 13:28:05.997356 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.016665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.026947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.027180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:06 crc kubenswrapper[4125]: E0312 13:28:06.027446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:06 crc kubenswrapper[4125]: E0312 13:28:06.027216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.032505 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.032965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: E0312 13:28:06.036878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.048711 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.068303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.086199 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.108021 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.131993 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.159335 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.190242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.223245 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.242952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.268148 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.286309 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.301591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.325210 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.344940 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.368191 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.391593 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.415217 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.435749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.455197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.481982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.497646 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.519321 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.543232 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.598628 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.641909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.669159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.698500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.736567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.780355 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.835895 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.864432 4125 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6" exitCode=0 Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.865052 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6"} Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.873609 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.886324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.886773 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.897856 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.941695 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:06 crc kubenswrapper[4125]: I0312 13:28:06.982356 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.021050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.025573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.025767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.026294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.026421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.026712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.026681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.027207 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.027379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.027538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.027616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.027720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.027982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.028030 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.028121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.028163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.028384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.028436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.028473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.028706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.028785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.029216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.029359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.029531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.029702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.029993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.033314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.030214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.034034 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.034926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.030424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.030554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.035943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.030770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030898 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.030958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031046 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.037130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.037248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.031875 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.032189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.032245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.035467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.031369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036900 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.036986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.037641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.037703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.037800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.037970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.038111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.038926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.060203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: E0312 13:28:07.187617 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.372891 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.424375 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1dba0ea54e565345301e3986d0dd8c643d32ea56c561c86bdb4d4b35fa49a453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2024-06-27T13:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:13Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.455724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.483901 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.516552 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.535960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.569296 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.592402 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.614437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.641368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.666174 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.695275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.727700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.754581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.775988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.803416 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.826351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.849528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.883233 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379"} Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.884911 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.890039 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.890667 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.908589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.928523 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:07 crc kubenswrapper[4125]: I0312 13:28:07.954381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.005563 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.027665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:08 crc kubenswrapper[4125]: E0312 13:28:08.028264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.028524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.028701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: E0312 13:28:08.029005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.287739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.326554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.370783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.415979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.443173 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.462201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.476318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.495721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.518262 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.537330 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.557574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.582620 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.610897 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.631225 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.652378 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.673231 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.702475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.742020 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.778352 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.823179 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.868723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.891944 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.892125 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.900600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.937246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:08 crc kubenswrapper[4125]: I0312 13:28:08.976972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.021990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.025542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.025721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.025798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.025725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.025958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.025983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.026025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.026434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.026702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026841 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026885 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026890 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026917 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.026996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027116 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027136 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.027488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.026550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.027605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.027890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.028234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.028291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.028382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.028660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.028752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.029168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.029254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.029526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.029603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.029669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.029711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030035 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.030979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.031200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.031363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.031767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.031963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.032977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.033987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.034209 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.034354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.073167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.113767 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.156698 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.195612 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.220602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.225781 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" probeResult="failure" output="" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.240730 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.240878 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.240899 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.240918 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.240951 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:09Z","lastTransitionTime":"2026-03-12T13:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.255315 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.259049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.260241 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.260352 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.260436 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.260521 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.260613 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:09Z","lastTransitionTime":"2026-03-12T13:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.273492 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.278495 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.278663 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.278781 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.278987 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.279198 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:09Z","lastTransitionTime":"2026-03-12T13:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.292918 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.301546 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.301616 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.301636 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.301661 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.301689 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:09Z","lastTransitionTime":"2026-03-12T13:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.303014 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.322949 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.328043 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.328148 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.328166 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.328189 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.328218 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:09Z","lastTransitionTime":"2026-03-12T13:28:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.337048 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.345117 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: E0312 13:28:09.345195 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.379146 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.419145 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.465341 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.510397 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.557335 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.591140 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.634941 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.668481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.708655 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.748584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.781765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.820134 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.873925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.890127 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.890274 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.901420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.937456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:09 crc kubenswrapper[4125]: I0312 13:28:09.986670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.025010 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.025184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.025232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.025547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.025733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.068963 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.100175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.140420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.189928 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.228537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.281486 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.313467 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.346447 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.392454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.397327 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.397720 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.398125 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.398022117 +0000 UTC m=+492.721408446 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.433144 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.470412 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.516725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.552767 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.589961 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.607981 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.608179 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.608287 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.608262794 +0000 UTC m=+492.931648863 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.608301 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.608390 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.608366378 +0000 UTC m=+492.931752617 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.608187 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.608616 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.608686 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.608741 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.608916 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.608968 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609029 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609118 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609154 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609311 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609351 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609379 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609304 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609464 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609483 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609936 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.609777023 +0000 UTC m=+492.933163412 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.609994 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.609971489 +0000 UTC m=+492.933357658 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.610027 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.610013461 +0000 UTC m=+492.933399640 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.610055 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.610042642 +0000 UTC m=+492.933428821 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.610420 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.610499 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.610477115 +0000 UTC m=+492.933863354 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.610935 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.611253 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.611334 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.611314742 +0000 UTC m=+492.934701061 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.611593 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.611969 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.612478 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.612449689 +0000 UTC m=+492.935836248 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.612596 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.613011 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.613175 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.613150781 +0000 UTC m=+492.936537160 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.613416 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.613743 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.613943 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.613920366 +0000 UTC m=+492.937306705 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.614235 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.614592 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.614693 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.614728 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.614910 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.614784572 +0000 UTC m=+492.938170911 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.623199 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.623500 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.623653 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.623742 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.623963 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.624111 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.624387 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.624559 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.624708 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.626775 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627130 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.627030312 +0000 UTC m=+492.950416705 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627134 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627336 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627389 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627333 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.627311361 +0000 UTC m=+492.950697610 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627460 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627486 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627477 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.6274525 +0000 UTC m=+492.950838679 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627489 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627560 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.627530978 +0000 UTC m=+492.950917297 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627588 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627607 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627641 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.627621496 +0000 UTC m=+492.951007745 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.627679 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.627663167 +0000 UTC m=+492.951049346 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.628041 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.627700009 +0000 UTC m=+492.951086358 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.628387 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.628485 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.628464268 +0000 UTC m=+492.951850607 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.632321 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.632497 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.633513 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.633790 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.634043 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.634228 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.634326 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.634457 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.635058 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.635306 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.636329 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.638335 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640278 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640362 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.640406 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640477 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.640513 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.640489232 +0000 UTC m=+492.963875481 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640583 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640674 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.640747 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.641028 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.641219 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.641330 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.641291843 +0000 UTC m=+492.964678162 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.641383 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.641464 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.641445061 +0000 UTC m=+492.964831300 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.641531 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.641577 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.641663 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.641643814 +0000 UTC m=+492.965030093 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.641665 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.641746 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.641761 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.641965 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642026 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642124 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642193 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642035 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642013666 +0000 UTC m=+492.965399945 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642280 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642252374 +0000 UTC m=+492.965638813 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642316 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642300275 +0000 UTC m=+492.965686564 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642349 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642391 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642238 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642433 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642350 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642334766 +0000 UTC m=+492.965720945 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642528 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642511672 +0000 UTC m=+492.965897951 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642558 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642566 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.642582 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642637 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642606864 +0000 UTC m=+492.965993213 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642677 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642658495 +0000 UTC m=+492.966044794 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642706 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642729 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642709 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642690096 +0000 UTC m=+492.966076135 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642771 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642756193 +0000 UTC m=+492.966142298 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642775 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642883 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.642886 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643191 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.642315 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643309 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643320 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643201 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643416 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643449 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643503 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643908 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.642868422 +0000 UTC m=+492.967269123 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643947 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.643932626 +0000 UTC m=+492.967318525 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.643980 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.643959297 +0000 UTC m=+492.967345186 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.644002 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.643992508 +0000 UTC m=+492.967378517 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.644023 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.644012808 +0000 UTC m=+492.967398697 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.644551 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.644668 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.644771 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645230 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645427 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645475 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645520 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645563 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645615 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645721 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645921 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.645976 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646119 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646266 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646328 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646444 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646521 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646692 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646739 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.646798 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647019 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647119 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647065406 +0000 UTC m=+492.970484887 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647160 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647146649 +0000 UTC m=+492.970532538 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647196 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.64718467 +0000 UTC m=+492.970570559 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647225 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647211441 +0000 UTC m=+492.970597460 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647267 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647238202 +0000 UTC m=+492.970624191 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647303 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647283743 +0000 UTC m=+492.970669742 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647339 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647321915 +0000 UTC m=+492.970708044 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.647371 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.647353506 +0000 UTC m=+492.970739395 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648110 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648061398 +0000 UTC m=+492.971447287 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648191 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648249 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648237623 +0000 UTC m=+492.971623552 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648245 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648337 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648393 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648359109 +0000 UTC m=+492.971745138 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648425 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648412761 +0000 UTC m=+492.971798760 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648445 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648487 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648474101 +0000 UTC m=+492.971860141 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648519 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648543 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648570 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648556795 +0000 UTC m=+492.971942724 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648595 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648585016 +0000 UTC m=+492.971970895 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648622 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648663 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648653197 +0000 UTC m=+492.972039116 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648667 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648712 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648695569 +0000 UTC m=+492.972081498 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648719 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648762 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.64875151 +0000 UTC m=+492.972137419 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648771 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648895 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648902 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648802283 +0000 UTC m=+492.972188202 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648970 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.648953627 +0000 UTC m=+492.972339636 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.648971 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649037 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649055 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.649029149 +0000 UTC m=+492.972415498 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649133 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.649119513 +0000 UTC m=+492.972505692 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649204 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649226 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649269 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.649253626 +0000 UTC m=+492.972639665 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649296 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.649283127 +0000 UTC m=+492.972669016 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649300 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649347 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.64933541 +0000 UTC m=+492.972721339 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649367 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649413 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.6494017 +0000 UTC m=+492.972787619 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649432 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649449 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649474 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.649460414 +0000 UTC m=+492.972846343 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.649493 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.649483673 +0000 UTC m=+492.972869852 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.658200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.682238 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.705200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.740310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.749502 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.749998 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.750146 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.750178 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.750563 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.750799 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.750901 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.750659 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.750591561 +0000 UTC m=+493.073977570 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.750940 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.751121 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.751104208 +0000 UTC m=+493.074490487 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.751997 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752114 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752162 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752200 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752225 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752246 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752254 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752288 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752308 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.752289475 +0000 UTC m=+493.075675364 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752362 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752370 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752403 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.752389108 +0000 UTC m=+493.075775017 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752425 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752452 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752483 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752498 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752464 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.75245226 +0000 UTC m=+493.075838229 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752542 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752578 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.752550773 +0000 UTC m=+493.075936552 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752611 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752613 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752653 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.752644876 +0000 UTC m=+493.076030655 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752705 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752740 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752752 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.752770 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752774 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752798 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.752784701 +0000 UTC m=+493.076170590 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752884 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752800 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752914 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752931 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.752894 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.752872443 +0000 UTC m=+493.076258302 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.753043 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753056 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753101 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753141 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.753128941 +0000 UTC m=+493.076514810 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753161 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.753154602 +0000 UTC m=+493.076540501 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753177 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.753168683 +0000 UTC m=+493.076555012 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753216 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.753208674 +0000 UTC m=+493.076594603 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.753291 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.753476 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.753891 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753957 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753979 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.753993 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754003 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754040 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.754013829 +0000 UTC m=+493.077400148 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754132 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.75406048 +0000 UTC m=+493.077446719 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754181 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.754169015 +0000 UTC m=+493.077554954 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.754460 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754469 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.754454694 +0000 UTC m=+493.077840553 (durationBeforeRetry 32s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754614 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.754672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.754658911 +0000 UTC m=+493.078044940 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.754926 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755124 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755190 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755264 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755302 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755319 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755395 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755337 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755444 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755399 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755473 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755486 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755496 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755399 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.755380843 +0000 UTC m=+493.078767142 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755522 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755538 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.755523987 +0000 UTC m=+493.078909876 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755615 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755658 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755729 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.755755 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755801 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.755780747 +0000 UTC m=+493.079166736 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755871 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755894 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.75587952 +0000 UTC m=+493.079265529 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755916 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.755907439 +0000 UTC m=+493.079293318 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755541 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755957 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755973 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.755934 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.75592642 +0000 UTC m=+493.079312169 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756001 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.755994602 +0000 UTC m=+493.079380381 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756032 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756039 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756047 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756058 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756115 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756054 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756048503 +0000 UTC m=+493.079434402 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756154 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756174 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756195 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756203 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756195128 +0000 UTC m=+493.079580907 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756261 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756272 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756274 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756280 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756319 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756311562 +0000 UTC m=+493.079697311 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756329 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756335 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756328802 +0000 UTC m=+493.079714671 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756356 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756347443 +0000 UTC m=+493.079733202 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756371 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756365913 +0000 UTC m=+493.079751702 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756397 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756430 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756443 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756459 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756468 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756488 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756500 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756547 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756526158 +0000 UTC m=+493.079912477 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756560 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756586 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.75657967 +0000 UTC m=+493.079965449 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756622 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756661 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756680 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.756714 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756730 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756715174 +0000 UTC m=+493.080101473 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756764 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756791 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756785187 +0000 UTC m=+493.080170936 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756890 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756911 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756919 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756942 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756951 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756958 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756970 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756977 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756986 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756975422 +0000 UTC m=+493.080361291 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.756921 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.757006 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.756998104 +0000 UTC m=+493.080383973 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.757024 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.757017114 +0000 UTC m=+493.080402893 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.757043 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.757130 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.757158 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.757220 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.75720282 +0000 UTC m=+493.080589069 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.776780 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.817743 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.857741 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.857889 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.857944 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.857972 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.857985 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858125 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858148 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858159 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858236 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858248 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858255 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858310 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858322 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858331 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.857997 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858130 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.858103931 +0000 UTC m=+493.181490030 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858380 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858523 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.858485793 +0000 UTC m=+493.181872052 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858572 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.858555815 +0000 UTC m=+493.181942064 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858605 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.858589836 +0000 UTC m=+493.181976085 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.858773 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.858803 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.858781112 +0000 UTC m=+493.182166961 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.858976 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859030 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859111 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859187 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859214 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859208 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859245 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859251 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859262 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859270 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859283 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859285 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859326 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.859310119 +0000 UTC m=+493.182695898 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859333 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859285 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859359 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.85933995 +0000 UTC m=+493.182726269 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859393 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859441 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859513 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859443 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.859427323 +0000 UTC m=+493.182813292 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859548 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.859539376 +0000 UTC m=+493.182925125 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859579 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.859559277 +0000 UTC m=+493.182945536 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859638 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859688 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859697 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859706 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859718 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859769 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.859760133 +0000 UTC m=+493.183146032 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.859853 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859908 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.859900877 +0000 UTC m=+493.183286626 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.859947 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860024 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.860006602 +0000 UTC m=+493.183392911 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860056 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860113 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860128 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860157 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.860149275 +0000 UTC m=+493.183535184 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.860161 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860200 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860211 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860219 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860242 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.860235548 +0000 UTC m=+493.183621457 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.860284 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860312 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860333 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860386 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.860370453 +0000 UTC m=+493.183756742 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860409 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860439 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.860432405 +0000 UTC m=+493.183818324 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.860464 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860530 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860575 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.86056511 +0000 UTC m=+493.183951059 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860676 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860695 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860704 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.860770 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.860760305 +0000 UTC m=+493.184146194 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.860986 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.861121 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.861274 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.861283 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.861331 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.861321022 +0000 UTC m=+493.184706911 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.861671 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.861926 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.861965 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862039 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.861972 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.861960642 +0000 UTC m=+493.185346581 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862054 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862100 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862177 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862279 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862294 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862303 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862335 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.862325815 +0000 UTC m=+493.185712004 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862388 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.862371625 +0000 UTC m=+493.185757414 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862424 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862545 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862575 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862601 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862604 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862632 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862642 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.862632104 +0000 UTC m=+493.186017883 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862695 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862701 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862709 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862718 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.862748 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862755 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.862744027 +0000 UTC m=+493.186129806 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862797 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862868 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862882 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862912 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.862903813 +0000 UTC m=+493.186289762 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862965 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862968 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862976 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.862992 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863015 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.863002335 +0000 UTC m=+493.186388214 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863102 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.863090108 +0000 UTC m=+493.186476187 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863118 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863155 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.86314661 +0000 UTC m=+493.186532519 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863051 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863178 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863186 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.863210 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.863203823 +0000 UTC m=+493.186589592 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.881619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.886792 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.886884 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.913399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.946013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.964686 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.964938 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965410 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.965494 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965504 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965543 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965638 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.96560937 +0000 UTC m=+493.288995609 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965747 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965780 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965795 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.965939 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.965918939 +0000 UTC m=+493.289304888 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.966317 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.966362 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.966382 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: E0312 13:28:10.966618 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:28:42.96658957 +0000 UTC m=+493.289975539 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:10 crc kubenswrapper[4125]: I0312 13:28:10.987688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.026145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.026209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.026170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.026465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.026656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.026687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.026762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.026952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.027016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.027161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.027367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.027477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.027377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.026015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.027430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.027564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.025720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.028576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.028966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.028978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.029352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.029368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.029624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.029890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.030131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.030167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.030366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.030505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.030587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.030732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.030622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.031262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.031425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.031569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.031767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.032474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.032542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.032767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.033314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.033488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.033623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.033656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.033958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.034267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.034412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.034599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.034645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.034780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.034992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.035042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.035315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.035142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.035187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.035913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.035940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.036188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.036293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.036435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.036544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.036613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.036711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.036777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.036923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.037758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.038169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.038398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.038607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.038982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:11 crc kubenswrapper[4125]: E0312 13:28:11.039128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.066588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.105457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.152910 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.181559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.223299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.273204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.304935 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.890179 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:11 crc kubenswrapper[4125]: I0312 13:28:11.891520 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.025456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.025727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:12 crc kubenswrapper[4125]: E0312 13:28:12.026131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:12 crc kubenswrapper[4125]: E0312 13:28:12.026408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.059003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.096683 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.126641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.156238 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: E0312 13:28:12.191044 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.191721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.232644 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.264133 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.298664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.339993 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.366583 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.387417 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.413589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.431967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.449641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.476284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.493201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.513803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.535923 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.575290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.614463 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.644802 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.682222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b065562fefc63a381832e1073dc188f7f27d20b65780f1c54a9aa34c767a3b80\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:28:38Z\\\",\\\"message\\\":\\\"Thu Jun 27 13:21:15 UTC 2024\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:21:14Z\\\"}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.706580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.737948 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.763784 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.783349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.808927 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.829126 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.861641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.881456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.890480 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.890603 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.903621 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.937200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.961158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:12 crc kubenswrapper[4125]: I0312 13:28:12.984634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.006449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.025606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.025757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.025920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.025325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.026651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.026550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.026735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.027193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.027229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.027286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.027333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.027380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.027620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.028194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.028199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.028309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.028212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.028431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.028484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.028606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.028718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.028741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.028748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.028632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.028955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.029024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.029587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.029777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.029993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.030135 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.030285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.030384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.030463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.030717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.030925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.030962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.030954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.031156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.031955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.032063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.032214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.032249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.032384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.032589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.032805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.033230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.033394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.033519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.033703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.033921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.034054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.034228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.037174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.037471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:13 crc kubenswrapper[4125]: E0312 13:28:13.037623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.038941 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.065694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.096471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.115875 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.138251 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.164745 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.188467 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.213330 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.235262 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.256451 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.280007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.304495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.349347 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.415979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.457732 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.502044 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.524619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.541976 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.558409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.577211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.596742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.639489 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.677660 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.705337 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.726323 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.743409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.786553 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.820689 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.865066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.886020 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.886243 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.898554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.949285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:13 crc kubenswrapper[4125]: I0312 13:28:13.983790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.025706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:14 crc kubenswrapper[4125]: E0312 13:28:14.026497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.026742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:14 crc kubenswrapper[4125]: E0312 13:28:14.027066 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.886484 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.886577 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.933248 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" event={"ID":"2b6d14a5-ca00-40c7-af7a-051a98a24eed","Type":"ContainerStarted","Data":"0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e"} Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.969392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:14 crc kubenswrapper[4125]: I0312 13:28:14.998629 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.025602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.025733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.025986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.025994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.026219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.026378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.026578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.026794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.027048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.027178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.027246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.027475 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.027705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.027929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.028047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.028424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.028778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.028959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.029291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.029424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.029484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.029599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.029717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.029918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.029802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.030060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.030317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.030431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.030447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.030474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.030493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.030527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.030898 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.030965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.031037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.031128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.031194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.031264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.031409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.031421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.031588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.031714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.031787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.032011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.032266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.032643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.032672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.032963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.033065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.033237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.033338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.033496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.033683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.033804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.034169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.034335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.034480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.034643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.034786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.035065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.035300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.035464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.035624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.035747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.036140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.036510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.036519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:15 crc kubenswrapper[4125]: E0312 13:28:15.036768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.043705 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.071564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.113143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.142533 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.165777 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.208880 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.237547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.262465 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.311938 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.343290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.369731 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.400378 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.437559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.470023 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.496702 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.522938 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.542771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.564889 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.589007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.607682 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.626719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.649254 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.678733 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.707021 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.738604 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.767336 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.800460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.826284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.850985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.877300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.887513 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.887636 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.903003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.941551 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:15 crc kubenswrapper[4125]: I0312 13:28:15.968703 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.002928 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.025561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:16 crc kubenswrapper[4125]: E0312 13:28:16.025802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.026010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:16 crc kubenswrapper[4125]: E0312 13:28:16.026434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.049039 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.074269 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.101779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.131444 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.151927 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.177540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.198577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.221133 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.263931 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.286801 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.319728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.353257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.387512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.424305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.466738 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.501124 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.535758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.572195 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.597776 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.622688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.653177 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.683316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.718277 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.740806 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.761581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.784909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.800992 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.839468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.880631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.889156 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.889240 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.907742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:16 crc kubenswrapper[4125]: I0312 13:28:16.936669 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.025159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.025293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.025432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.025531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.025605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.025623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.026045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.026433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.026614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026738 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.026964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.026227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.027333 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.027860 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.027958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.027502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.027757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.028054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.028305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.028458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.028622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.028768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.029026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.029370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.029611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.029977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.030144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.030322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.030329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.030402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.030527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.030689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.030751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.031064 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.031216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.031322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.031479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.031542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.031723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.031988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.032320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.032579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.032701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.033190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.033400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.033544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.033544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.033413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.033771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.033949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.034402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034859 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.034948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.035011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.035069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.035177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.035225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.035292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.035373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.035434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.036636 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.037061 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:28:17 crc kubenswrapper[4125]: E0312 13:28:17.196394 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.888918 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:17 crc kubenswrapper[4125]: I0312 13:28:17.889133 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:18 crc kubenswrapper[4125]: I0312 13:28:18.025629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:18 crc kubenswrapper[4125]: I0312 13:28:18.025911 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:18 crc kubenswrapper[4125]: E0312 13:28:18.026483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:18 crc kubenswrapper[4125]: E0312 13:28:18.027341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:18 crc kubenswrapper[4125]: I0312 13:28:18.030037 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:28:18 crc kubenswrapper[4125]: E0312 13:28:18.032751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:28:18 crc kubenswrapper[4125]: I0312 13:28:18.888803 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:18 crc kubenswrapper[4125]: I0312 13:28:18.889162 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.025762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.025985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.026226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.026367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.027520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.027752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.027947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.028157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.028591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.028740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.028943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.029079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.029416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.029614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.029982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.029984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.030073 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.030076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.030184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.030431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.030634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.030746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.030799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.031058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.031386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.031520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.031574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.031537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.031753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.032072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.032173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.032076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.032366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.032546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.032624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.032677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.032984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.033034 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.033149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.033251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.033299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.033193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.033266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.033396 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.033540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.033610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.033967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.033982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.034226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.034387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.034545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.034708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.035197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.035490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.036187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.036232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.036329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.036353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.036505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.036681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.037055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.037484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.037699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.038154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.038325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.038551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.039079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.039377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.039656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.040068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.569197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.569412 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.569451 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.569501 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.569559 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:19Z","lastTransitionTime":"2026-03-12T13:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.605743 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.617906 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.617945 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.617961 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.617985 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.618011 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:19Z","lastTransitionTime":"2026-03-12T13:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.648683 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.659768 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.660439 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.660552 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.660733 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.660938 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:19Z","lastTransitionTime":"2026-03-12T13:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.687179 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.695974 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.696151 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.696335 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.696371 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.696416 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:19Z","lastTransitionTime":"2026-03-12T13:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.726286 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.738219 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.738315 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.738345 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.738382 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.738427 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:19Z","lastTransitionTime":"2026-03-12T13:28:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.768796 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:19 crc kubenswrapper[4125]: E0312 13:28:19.769077 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.889265 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:19 crc kubenswrapper[4125]: I0312 13:28:19.889444 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:20 crc kubenswrapper[4125]: I0312 13:28:20.025283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:20 crc kubenswrapper[4125]: I0312 13:28:20.025334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:20 crc kubenswrapper[4125]: E0312 13:28:20.026781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:20 crc kubenswrapper[4125]: E0312 13:28:20.026920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:20 crc kubenswrapper[4125]: I0312 13:28:20.887888 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:20 crc kubenswrapper[4125]: I0312 13:28:20.888044 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.024985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.025568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.025791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.025976 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.026184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.026432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.026552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.026647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.026745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.027047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.027357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.027524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.027718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.028054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.028161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.028365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.028373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.028434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.028571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.028923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.029146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.029312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.029404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.029545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.029667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.029798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.030007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.030230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.030236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.030280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.030366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.030453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.030517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.030540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.030720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.031332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.031555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.031741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.031743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.032485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.032656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.032747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.033262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.033481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.033687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.033986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.033989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.034227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.034579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.034996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.035285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.034963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.035387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.035439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.035572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.035681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.035928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.036204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.036347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.036500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.036697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.036769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.037045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.037308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.037757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.038217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:21 crc kubenswrapper[4125]: E0312 13:28:21.038309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.888341 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:21 crc kubenswrapper[4125]: I0312 13:28:21.889198 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.029155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.029411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:22 crc kubenswrapper[4125]: E0312 13:28:22.029543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:22 crc kubenswrapper[4125]: E0312 13:28:22.029745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.084149 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.123249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.156205 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.188742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: E0312 13:28:22.198183 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.217806 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.253257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.288082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.317324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.349581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.366649 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.387699 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.404316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.421033 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.445478 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.464251 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.483177 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.505133 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.530028 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.551447 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.573782 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.594275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.626937 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.649269 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.673419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.699521 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.718920 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.739239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.771679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.798664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.829287 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.845877 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.864111 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.879990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.891523 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.891654 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.902281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.919371 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.935904 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.951448 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.967406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:22 crc kubenswrapper[4125]: I0312 13:28:22.985286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.005667 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.026081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.026364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.026890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.025795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.026519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.026657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.027022 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.027156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.027418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.027647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.027761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.027938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.028774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029355 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.029955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.030964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031071 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.031920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.032035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:23 crc kubenswrapper[4125]: E0312 13:28:23.032766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.033376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.048763 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.064630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.083080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.099455 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.117672 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.134550 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.149226 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.166197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.203485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.227228 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.244035 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.263059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.283518 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.306765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.330031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.347223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.368184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.400531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.421785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.438917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.461054 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.481776 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.504279 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.527046 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.549695 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.574027 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.899513 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:23 crc kubenswrapper[4125]: I0312 13:28:23.900472 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:24 crc kubenswrapper[4125]: I0312 13:28:24.025687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:24 crc kubenswrapper[4125]: I0312 13:28:24.026382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:24 crc kubenswrapper[4125]: E0312 13:28:24.033573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:24 crc kubenswrapper[4125]: E0312 13:28:24.034460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:24 crc kubenswrapper[4125]: I0312 13:28:24.888618 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:24 crc kubenswrapper[4125]: I0312 13:28:24.889288 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.025998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026206 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.026433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026051 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.026599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026868 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026928 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.027305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.027406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027470 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.027700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026928 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.026972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.027170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.026921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.027007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.027979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.028883 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.029003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.029191 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.029718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029876 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.029941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.029674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030023 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030289 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.030368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.030446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.031506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.032167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.032284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.032335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.032390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.032570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.032774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.033048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.033336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.033517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.033671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.033954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.034159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.034395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.034537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.034673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.034992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.035322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.035440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.035692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.035761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.036021 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:25 crc kubenswrapper[4125]: E0312 13:28:25.036247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.887190 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:25 crc kubenswrapper[4125]: I0312 13:28:25.888508 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:26 crc kubenswrapper[4125]: I0312 13:28:26.025944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:26 crc kubenswrapper[4125]: E0312 13:28:26.026543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:26 crc kubenswrapper[4125]: I0312 13:28:26.026739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:26 crc kubenswrapper[4125]: E0312 13:28:26.027130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:26 crc kubenswrapper[4125]: I0312 13:28:26.888310 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:26 crc kubenswrapper[4125]: I0312 13:28:26.889800 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.025224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.025333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.025474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.025549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.025577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.025680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.025955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.026018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.026221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.026352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.026374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.026498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.026637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.026643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.026759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.026985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.027467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.027585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.027963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028036 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.028270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.028583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.027284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.028774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.029018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.029163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.029271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.029328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.029408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.029475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.029645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.029658 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.029899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.029994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.030199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.030347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.030469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.030627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.030991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.031198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.031263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.031533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.031744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.031971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.032155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.032308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.032325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.032418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.032646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.032677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.032741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.032952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.033005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.033229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.033327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.033455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.033513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.033656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.033671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.033936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.034177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.034310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.034379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.034472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.034603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.034722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.034801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.035072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.035088 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.035356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.035748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.036196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.036236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.036339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.036566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.036769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.037076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.037297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.037535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.037672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:27 crc kubenswrapper[4125]: E0312 13:28:27.199772 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.887961 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:27 crc kubenswrapper[4125]: I0312 13:28:27.888185 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:28 crc kubenswrapper[4125]: I0312 13:28:28.025351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:28 crc kubenswrapper[4125]: E0312 13:28:28.025756 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:28 crc kubenswrapper[4125]: I0312 13:28:28.026077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:28 crc kubenswrapper[4125]: E0312 13:28:28.026783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:28 crc kubenswrapper[4125]: I0312 13:28:28.888227 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:28 crc kubenswrapper[4125]: I0312 13:28:28.888449 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.025412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.025552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.025600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.025672 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.025743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.025946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.026018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.026399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.026590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.026586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.027162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.027344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.027632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.027773 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.027785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.028256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.028682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.028694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.029033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.029193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.029428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.029733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.029985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.030225 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.030343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.030600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.030731 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.030924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.030980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.031244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.031287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.031385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.031467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.031658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.031711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.031926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.032053 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.032148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.032294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.032445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.032447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.032531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.032569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.032597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.032690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.033152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.033365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.033933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.034231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.034505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.034753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.034939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.035062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.035328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.035653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.035963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.036055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.036252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.036308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.036426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.036589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.036777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.037058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.037307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.037631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.037795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.845720 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.845938 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.845975 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.846010 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.846054 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:29Z","lastTransitionTime":"2026-03-12T13:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.875199 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.884170 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.884272 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.884307 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.884347 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.884386 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:29Z","lastTransitionTime":"2026-03-12T13:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.887722 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.887968 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.917959 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.926061 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.926454 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.926491 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.926527 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.926569 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:29Z","lastTransitionTime":"2026-03-12T13:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.948079 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.954519 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.954588 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.954611 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.954637 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.954670 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:29Z","lastTransitionTime":"2026-03-12T13:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:29 crc kubenswrapper[4125]: E0312 13:28:29.980078 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.987632 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.987930 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.987966 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.988009 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:29 crc kubenswrapper[4125]: I0312 13:28:29.988052 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:29Z","lastTransitionTime":"2026-03-12T13:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:30 crc kubenswrapper[4125]: E0312 13:28:30.017553 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:30 crc kubenswrapper[4125]: E0312 13:28:30.017722 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:28:30 crc kubenswrapper[4125]: I0312 13:28:30.025276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:30 crc kubenswrapper[4125]: I0312 13:28:30.025450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:30 crc kubenswrapper[4125]: E0312 13:28:30.026059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:30 crc kubenswrapper[4125]: E0312 13:28:30.026237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:30 crc kubenswrapper[4125]: I0312 13:28:30.890730 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:30 crc kubenswrapper[4125]: I0312 13:28:30.891914 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.026529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.026983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.027054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.027072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.027186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.027410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.028483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.028610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.028735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.028800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.029009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.029297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.029668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.029977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.030278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.030725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.030728 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.030948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.032004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031023 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.032057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031071 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.031474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.031771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.034348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.034445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.034586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.034982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.035077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.035228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.035458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.035402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.035781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.036189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.035737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.036416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.036665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.037004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.037364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.037609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.038036 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.038373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.038543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.038689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.038953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.039403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.039575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.039620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.039721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.039783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.040072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.040462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.040737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.041094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.041512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.042210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.042241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.042405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.042558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.042697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.043217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:31 crc kubenswrapper[4125]: E0312 13:28:31.043499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.418053 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.418373 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.418493 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.418604 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.418677 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.941696 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:31 crc kubenswrapper[4125]: I0312 13:28:31.941803 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.025255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:32 crc kubenswrapper[4125]: E0312 13:28:32.025594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.025984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:32 crc kubenswrapper[4125]: E0312 13:28:32.026206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.050383 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.070991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.103968 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.128419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.164559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.189905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: E0312 13:28:32.202137 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.212378 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.232351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.251026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.268547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.297748 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.327526 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.348689 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.371259 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.393253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.416919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.450089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.468964 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.489398 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.509549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.536596 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.558276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.597967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.634178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.655229 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.699229 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.735351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.768956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.806457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.849075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.882069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.890165 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.890317 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.916004 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.950787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:32 crc kubenswrapper[4125]: I0312 13:28:32.974955 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.009436 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.027231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.027458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.027654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.027295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.027390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.028953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029017 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.029992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.030147 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.030271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.030363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.030683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.030744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.030936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.031062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.031242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.031339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.031433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.031581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.031623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.032062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.032362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.032364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.032426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.032877 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.032912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.032972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.032989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.033034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.033155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.033243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.033286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.033584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.033700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.034029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.034327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.034569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.034760 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.034967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.035035 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.035317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.035444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.035584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.035696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.035797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.036690 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.036941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.037237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.037450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.037476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.037481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.037622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.037657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.039078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.039423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.039659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.039799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:33 crc kubenswrapper[4125]: E0312 13:28:33.040969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.051579 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.070781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.108672 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.132546 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.162674 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.190802 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.253341 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.287570 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.310051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.331432 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.355188 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.377474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.397602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.425743 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.447332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.469009 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.501328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.528624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.563380 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.605045 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.639453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.669144 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.698070 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.725672 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.769540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.814333 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.845573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.876270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.887790 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.888480 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.907969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.938943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:33 crc kubenswrapper[4125]: I0312 13:28:33.964404 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:34 crc kubenswrapper[4125]: I0312 13:28:34.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:34 crc kubenswrapper[4125]: I0312 13:28:34.025959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:34 crc kubenswrapper[4125]: E0312 13:28:34.026947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:34 crc kubenswrapper[4125]: E0312 13:28:34.027219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:34 crc kubenswrapper[4125]: I0312 13:28:34.888357 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:34 crc kubenswrapper[4125]: I0312 13:28:34.888559 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.025635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.025767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.026221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.026912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.026940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.026674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.027025 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.027176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.027329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.027349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.027385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.027498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.027567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.027578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.028380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.028549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.028950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.030179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.030257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.030285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.030467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.030665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.029982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.030734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.031237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.031284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.031315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.031389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.031457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.031512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.031749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.031801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.032060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.032094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.032183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.032226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.032326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.032076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.032523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.032984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.033226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.033353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.033381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.033546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.033689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.034375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.036052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.034633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.034648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.034708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.034727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.035290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.036273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.035476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.036323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.035779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.036231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.036406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.036535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.036661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.036939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.036955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.037165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.037453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.038079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.038375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.038454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.038576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.039036 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.039158 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.039171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.038605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.039348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.039515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.039702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:35 crc kubenswrapper[4125]: E0312 13:28:35.040232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.886949 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:35 crc kubenswrapper[4125]: I0312 13:28:35.887939 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:36 crc kubenswrapper[4125]: I0312 13:28:36.025475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:36 crc kubenswrapper[4125]: I0312 13:28:36.025692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:36 crc kubenswrapper[4125]: E0312 13:28:36.027704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:36 crc kubenswrapper[4125]: E0312 13:28:36.028427 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:36 crc kubenswrapper[4125]: I0312 13:28:36.887990 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:36 crc kubenswrapper[4125]: I0312 13:28:36.888227 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.025939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.026338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.026584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.026968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027058 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.027277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.027438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.027660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.027744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.028047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.028405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.028531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.028764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.028769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.029191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.029412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.029505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.029796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.029974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.029977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.030079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.030265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.030313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.030430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.030441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.030715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.031167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.031375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.031446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.031558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.031670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.031750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.031983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.032056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.032320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.032417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.032497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.032649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.032678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.032782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.033265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.033284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.033493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.033792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.033741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.034337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.034451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.034510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.034676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.035081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.035319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.035506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.035717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.035803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.036046 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.036479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.036774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.037033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.037313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.037401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.037496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.037622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.037996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.038093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.038308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.038446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.038736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.039264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:37 crc kubenswrapper[4125]: E0312 13:28:37.205477 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.890190 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:37 crc kubenswrapper[4125]: I0312 13:28:37.890328 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:38 crc kubenswrapper[4125]: I0312 13:28:38.025514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:38 crc kubenswrapper[4125]: I0312 13:28:38.025686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:38 crc kubenswrapper[4125]: E0312 13:28:38.025766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:38 crc kubenswrapper[4125]: E0312 13:28:38.026283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:38 crc kubenswrapper[4125]: I0312 13:28:38.888436 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:38 crc kubenswrapper[4125]: I0312 13:28:38.888746 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.025518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.025597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.025627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.025545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.026339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.026353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.026479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.026537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.026609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.027246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.027541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.027569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.027677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.027763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.027939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.027974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.028048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.028329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.028673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.028997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.029102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.029231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.029211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.029338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.029180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.029636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.030168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.030349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.030512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.030917 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.031192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.031421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.031570 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.031719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.032046 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.032240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.032253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.032322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.032749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.032996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033105 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.033336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.033771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.033902 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.034089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.034298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.034480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.034555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.034596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.034657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.034725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.034732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.034915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.034920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.035073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.035269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.035306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.035382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.035557 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.035736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.035994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.036058 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.036474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.036912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.037083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.037375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.037946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.038187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.038261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.038284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.038422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.038586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:39 crc kubenswrapper[4125]: E0312 13:28:39.038431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.243397 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" probeResult="failure" output="" Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.887422 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:39 crc kubenswrapper[4125]: I0312 13:28:39.887617 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.026186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.026242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.026494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.026781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.239419 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.239596 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.239647 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.239710 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.239774 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:40Z","lastTransitionTime":"2026-03-12T13:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.270094 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.279777 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.280031 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.280066 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.280104 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.280210 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:40Z","lastTransitionTime":"2026-03-12T13:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.310002 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.317737 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.317989 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.318039 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.318088 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.318231 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:40Z","lastTransitionTime":"2026-03-12T13:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.340786 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.350067 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.350258 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.350297 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.350335 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.350405 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:40Z","lastTransitionTime":"2026-03-12T13:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.373976 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.383777 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.384410 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.384675 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.385222 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.385742 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:40Z","lastTransitionTime":"2026-03-12T13:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.413988 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:40 crc kubenswrapper[4125]: E0312 13:28:40.415005 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.888106 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:40 crc kubenswrapper[4125]: I0312 13:28:40.888347 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.025967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.026445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027109 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.027439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.027648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.027947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028030 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.028186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.028354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.028481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.028602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.028752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028806 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.028980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.029002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.029199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.029388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.029503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.029622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.029763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.029984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.030024 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.027060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.030166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.030090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.030345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.030411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.030472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.030519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.030564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.030632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.030795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.031268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.031527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.031674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.031920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.032152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.032162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.032368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.032583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.032690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.032962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.033224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.033405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.033609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.033789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.034063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.034343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.034522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.034693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.035255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.035343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.035459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.035571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.035731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.035954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.036045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.036264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:41 crc kubenswrapper[4125]: E0312 13:28:41.036362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.887686 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:41 crc kubenswrapper[4125]: I0312 13:28:41.887973 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.025503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.026000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.026187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.026482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.059571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.099544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.133426 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.165573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.207610 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.209447 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.241389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.277572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.305374 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.339064 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.374409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.407088 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.437047 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.470069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.486055 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.489665 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.489955 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.489908115 +0000 UTC m=+556.813294524 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.516531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.539592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.572513 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.607701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.629468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.651711 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.674085 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.695468 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.695760 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.695936 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.695965 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.695931348 +0000 UTC m=+557.019317677 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.696060 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696174 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.696274 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.696345 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696508 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696512 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.696488036 +0000 UTC m=+557.019874045 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.696517 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696658 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696663 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.69662566 +0000 UTC m=+557.020012169 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.696749 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696794 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.696755 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.696726603 +0000 UTC m=+557.020112912 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697071 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697195 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697263 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.69723021 +0000 UTC m=+557.020616699 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697312 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697350 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697373 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.697352344 +0000 UTC m=+557.020738663 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697408 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.697393855 +0000 UTC m=+557.020780214 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697457 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697490 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697515 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697568 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697583 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.69755397 +0000 UTC m=+557.020940489 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697654 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697656 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697710 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.697694894 +0000 UTC m=+557.021081123 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.697754 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.697945 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.698017 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698062 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698032204 +0000 UTC m=+557.021418814 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698179 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698252 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698235471 +0000 UTC m=+557.021621650 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.698271 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698322 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698373 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698358656 +0000 UTC m=+557.021744954 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698384 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.698432 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698457 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698488 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698458669 +0000 UTC m=+557.021845168 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698511 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698569 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698550002 +0000 UTC m=+557.021936221 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698604 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698589373 +0000 UTC m=+557.021975662 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698651 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698662 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698717 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698698616 +0000 UTC m=+557.022085006 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.698570 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.698759 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.698744928 +0000 UTC m=+557.022131247 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.698967 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699041 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699097 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699217 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699281 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699384 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699465 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699592 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.699925 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700027 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700082 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700381 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700513 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700655 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700735 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.700985 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701046 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701076 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701243 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701306 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701328 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701357 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701430 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.701401243 +0000 UTC m=+557.024787612 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701479 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701543 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.701524257 +0000 UTC m=+557.024910636 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701555 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701603 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701645 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701663 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.70164324 +0000 UTC m=+557.025029629 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701735 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701771 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.701947 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.701964 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.70194066 +0000 UTC m=+557.025327039 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702042 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702103 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702088184 +0000 UTC m=+557.025474563 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702180 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702202 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702266 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702246237 +0000 UTC m=+557.025632576 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702344 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702303701 +0000 UTC m=+557.025690240 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702355 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702376 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702419 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702404392 +0000 UTC m=+557.025790711 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702340 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702447 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702473 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702457 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702432865 +0000 UTC m=+557.025819274 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702540 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702577 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702549828 +0000 UTC m=+557.025936427 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702611 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702597051 +0000 UTC m=+557.025983300 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702646 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702629922 +0000 UTC m=+557.026016231 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702677 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702662533 +0000 UTC m=+557.026048812 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702645 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702710 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702736 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702722425 +0000 UTC m=+557.026108824 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702739 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702777 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702756716 +0000 UTC m=+557.026143135 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.702979 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.702804207 +0000 UTC m=+557.026190686 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703026 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.703009944 +0000 UTC m=+557.026396323 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703049 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703106 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703233 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.70319436 +0000 UTC m=+557.026580939 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703296 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.703273302 +0000 UTC m=+557.026659691 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703301 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703362 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.703345544 +0000 UTC m=+557.026731843 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703437 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703449 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703501 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.703486579 +0000 UTC m=+557.026872888 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703541 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.70351488 +0000 UTC m=+557.026901349 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703578 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703659 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.703642774 +0000 UTC m=+557.027029303 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703682 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.703776 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.703747117 +0000 UTC m=+557.027133576 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.704067 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.704304 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704375 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704410 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704481 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.70445321 +0000 UTC m=+557.027839829 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704527 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.704506671 +0000 UTC m=+557.027893100 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704568 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704627 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.704608975 +0000 UTC m=+557.027995264 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.704694 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.704961 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.704991 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705045 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.705025116 +0000 UTC m=+557.028411455 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705220 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705362 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.705336804 +0000 UTC m=+557.028723173 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.705480 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.705544 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.705601 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705662 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.705699 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.705755 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705772 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.705744281 +0000 UTC m=+557.029130780 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705781 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705964 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.705973 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.705991 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706016 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706001546 +0000 UTC m=+557.029387465 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706060 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706030006 +0000 UTC m=+557.029416325 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706063 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706165 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.706200 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706238 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706204 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.706290 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706293 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706319 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706278968 +0000 UTC m=+557.029665437 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706374 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706353916 +0000 UTC m=+557.029740175 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706409 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706394397 +0000 UTC m=+557.029780576 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706440 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706424638 +0000 UTC m=+557.029810817 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706466 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.706495 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706501 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706528 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.706579 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.706640 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706687 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706671126 +0000 UTC m=+557.030057365 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706692 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706712 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706756 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.706737589 +0000 UTC m=+557.030123988 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706784 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706964 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.707005 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.706790 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.70677426 +0000 UTC m=+557.030160599 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.707290 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.707382 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.707396 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.707362412 +0000 UTC m=+557.030748952 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.707463 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.707443941 +0000 UTC m=+557.030830390 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.707530 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.707583 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.707662 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.707747 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.707994 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.707960241 +0000 UTC m=+557.031346840 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.707767 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708046 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708369 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708064 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708373 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.708333229 +0000 UTC m=+557.031720378 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.708468 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708500 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.708482253 +0000 UTC m=+557.031868502 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708406 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708535 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.708581 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708687 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708708 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708742 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708763 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.708594 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.708576266 +0000 UTC m=+557.031962555 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.710235 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.710373 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.710635 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.710611163 +0000 UTC m=+557.033997242 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.710670 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.710659074 +0000 UTC m=+557.034045083 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.710733 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.710972 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.711320 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.711043594 +0000 UTC m=+557.034430013 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.711541 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.711496304 +0000 UTC m=+557.034883033 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.721013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.743485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.766453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.789393 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.812021 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.812325 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812401 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.812443 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812461 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812486 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.812508 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.812593 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812634 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.812700 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812744 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.812712851 +0000 UTC m=+557.136099190 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812943 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.812996 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.812927106 +0000 UTC m=+557.136313015 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813034 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813051 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813078 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.813145 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813197 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.813107893 +0000 UTC m=+557.136494212 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813264 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.813224356 +0000 UTC m=+557.136610725 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813292 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813360 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.813336919 +0000 UTC m=+557.136723198 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.813435 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.813534 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.813720 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813754 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813984 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.813894 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.813783673 +0000 UTC m=+557.137169962 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.814088 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.814068892 +0000 UTC m=+557.137455171 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.814415 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.814392853 +0000 UTC m=+557.137779192 (durationBeforeRetry 1m4s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.814479 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.814672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.814655822 +0000 UTC m=+557.138041991 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.814693 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.814705 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.814690983 +0000 UTC m=+557.138077162 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.814961 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.814934219 +0000 UTC m=+557.138320528 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.815606 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.816040 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816251 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816292 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816318 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.816333 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.816483 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816500 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.81647404 +0000 UTC m=+557.139860449 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.816571 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816604 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816628 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816687 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.816669524 +0000 UTC m=+557.140055843 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.816734 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816776 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816805 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816966 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.816979 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817043 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.817019388 +0000 UTC m=+557.140405937 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817088 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.81707191 +0000 UTC m=+557.140458169 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817203 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817327 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.817306324 +0000 UTC m=+557.140692703 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817417 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817473 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.817458709 +0000 UTC m=+557.140845058 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.816785 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817546 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.817646 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817672 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.817724 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817741 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.817705387 +0000 UTC m=+557.141091696 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.817797 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.817780499 +0000 UTC m=+557.141166678 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818063 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818085 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818099 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818195 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818220 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818269 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.818249747 +0000 UTC m=+557.141636076 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818329 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818351 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818393 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.818375969 +0000 UTC m=+557.141762288 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818440 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818463 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818484 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818515 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818539 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.818522175 +0000 UTC m=+557.141908504 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818586 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818636 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.818621236 +0000 UTC m=+557.142007455 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818646 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818592 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818727 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818733 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.818706802 +0000 UTC m=+557.142093121 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818751 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818770 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818794 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.818807 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819094 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.818993 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819191 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819025 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.818995888 +0000 UTC m=+557.142382307 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819218 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819230 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819257 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.819233898 +0000 UTC m=+557.142620177 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819273 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819291 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.819271129 +0000 UTC m=+557.142657528 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.819349 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819359 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.819330108 +0000 UTC m=+557.142716557 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.819417 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.819480 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819505 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819541 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819559 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819612 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.819596547 +0000 UTC m=+557.142982876 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.819663 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819718 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819746 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819765 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.819911 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.819925 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.819804237 +0000 UTC m=+557.143190536 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.820041 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.820105 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820212 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820370 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820401 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820420 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820466 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820484 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.820463138 +0000 UTC m=+557.143849547 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820494 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.820371 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820581 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820513 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.820645 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.82062795 +0000 UTC m=+557.144014119 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.820743 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.820938 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.821016 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821034 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821067 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.821077 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821190 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.821109915 +0000 UTC m=+557.144496234 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821315 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821343 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821361 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821366 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821402 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821415 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.821400805 +0000 UTC m=+557.144787134 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821422 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821456 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.821441946 +0000 UTC m=+557.144828215 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821487 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.821472867 +0000 UTC m=+557.144859154 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821559 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821597 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.821574573 +0000 UTC m=+557.144961137 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.821640 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.821624815 +0000 UTC m=+557.145011064 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.823092 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.823195 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.823220 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.823586 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.823286778 +0000 UTC m=+557.146673157 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.855027 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.883419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.887182 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.887316 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.922477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.923245 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.923776 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.924686 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925168 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925336 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925393 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925608 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925667 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.925686 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925722 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.925753 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.925772 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.925995 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926011 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926032 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926082 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926170 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926089254 +0000 UTC m=+557.249475663 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926222 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926200927 +0000 UTC m=+557.249587256 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926271 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926298 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926258369 +0000 UTC m=+557.249644728 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.925782 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926353 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926359 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926337061 +0000 UTC m=+557.249723460 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926382 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926407 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926454 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926527 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926555 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926572 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.926577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926590 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926625 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926644 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926627 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926609761 +0000 UTC m=+557.249996130 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926691 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926750 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926731085 +0000 UTC m=+557.250117394 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.925789 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926759 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926973 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.926953891 +0000 UTC m=+557.250340311 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927020 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.927002943 +0000 UTC m=+557.250389202 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927058 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.927043604 +0000 UTC m=+557.250429923 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927092 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.927078255 +0000 UTC m=+557.250464504 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927097 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.926979 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927187 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927199 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927213 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.926940 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.927272 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.927254981 +0000 UTC m=+557.250641570 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.927452 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.927519 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.927597 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.927681 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.927731 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.927965 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928076 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928241 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928307 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928411 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928486 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928594 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928690 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928741 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.928982 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.929048 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.929209 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929423 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929465 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929485 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929530 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929596 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929624 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929636 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929663 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929680 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929780 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929786 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929922 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929952 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930010 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930081 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930110 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930277 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930312 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930330 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930348 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930378 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930398 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930451 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930477 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930496 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930510 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930523 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929547 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930534 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930551 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930561 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.929555 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.929535044 +0000 UTC m=+557.252921393 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930575 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930280 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930623 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930597398 +0000 UTC m=+557.253983747 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930671 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930642448 +0000 UTC m=+557.254028767 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930704 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930712 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930696109 +0000 UTC m=+557.254082288 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930730 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930749 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.93073264 +0000 UTC m=+557.254118939 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930756 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930774 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930780 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930765741 +0000 UTC m=+557.254151930 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930915 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930797412 +0000 UTC m=+557.254183601 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.930956 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930940497 +0000 UTC m=+557.254326676 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931005 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.930990598 +0000 UTC m=+557.254376857 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931035 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931022179 +0000 UTC m=+557.254408458 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931186 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931091392 +0000 UTC m=+557.254477561 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931238 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931217815 +0000 UTC m=+557.254604075 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931283 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931267169 +0000 UTC m=+557.254653716 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931317 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931302128 +0000 UTC m=+557.254688757 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931353 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931337399 +0000 UTC m=+557.254723678 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931478 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.93137178 +0000 UTC m=+557.254758069 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931569 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931544246 +0000 UTC m=+557.254930415 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931611 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.931595597 +0000 UTC m=+557.254981766 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:28:42 crc kubenswrapper[4125]: E0312 13:28:42.931687 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:46.93166787 +0000 UTC m=+557.255054039 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.968494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:42 crc kubenswrapper[4125]: I0312 13:28:42.995417 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.027060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.027229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.027586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.027944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.029089 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.029172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.029215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.029226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.029317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.029354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.029384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.029418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.028490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.030048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.030156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.030274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.030386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.030655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.030761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.030915 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.031067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.031080 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.031180 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.031212 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.031253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.031212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.031533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.031579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.031668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.032027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.032565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.032682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033272 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:29:47.033252673 +0000 UTC m=+557.356638352 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.033316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.033379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.033739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.034326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034339 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.034423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034582 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.034617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.034742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.034884 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.034974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.035016 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035096 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035138 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035156 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035244 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035257 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.035298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035267 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035355 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:29:47.0353419 +0000 UTC m=+557.358727679 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035440 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:29:47.03540812 +0000 UTC m=+557.358794459 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.035492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.035884 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.036091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.036252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.036387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.036566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.037108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.037194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.037336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.037592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.037660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.037704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.038029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.038742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.038908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.038996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.039064 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.039280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:43 crc kubenswrapper[4125]: E0312 13:28:43.039410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.060652 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.088338 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.112450 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.142328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.169923 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.194421 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.225031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.244242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.266075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.286012 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.310556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.335098 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.366583 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.390719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.411192 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.432472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.468399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.502690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.526677 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.551289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.578340 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.609191 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.630080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.647219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.667465 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.684898 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.700721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.717158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.737720 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.759313 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.779297 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.829008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.848587 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.867358 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.883630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.885777 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:43 crc kubenswrapper[4125]: I0312 13:28:43.885929 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:44 crc kubenswrapper[4125]: I0312 13:28:44.025052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:44 crc kubenswrapper[4125]: I0312 13:28:44.025548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:44 crc kubenswrapper[4125]: E0312 13:28:44.025503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:44 crc kubenswrapper[4125]: E0312 13:28:44.026077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:44 crc kubenswrapper[4125]: I0312 13:28:44.027435 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:28:44 crc kubenswrapper[4125]: E0312 13:28:44.027843 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:28:44 crc kubenswrapper[4125]: I0312 13:28:44.889501 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:44 crc kubenswrapper[4125]: I0312 13:28:44.889759 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.026053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.026224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027096 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.027542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.028054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.028331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.027303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.028516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.028710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.028731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.029225 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.029447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.029646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.029736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.029998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030080 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.030141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.030233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.030465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.030633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.030722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.030867 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.031531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.031594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.031663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.031683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.031986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.032278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.032532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.032654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.033209 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.033357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.033655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033894 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.033949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.034276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.034309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.034392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.034584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.034893 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.035202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.035655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.036799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.037056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.037203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.037616 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.038510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.038606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.038757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.038885 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.039020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.039219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:45 crc kubenswrapper[4125]: E0312 13:28:45.040106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.895990 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:45 crc kubenswrapper[4125]: I0312 13:28:45.896101 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.025667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:46 crc kubenswrapper[4125]: E0312 13:28:46.026614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.026029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:46 crc kubenswrapper[4125]: E0312 13:28:46.027635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.135948 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/0.log" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.136031 4125 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630" exitCode=1 Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.136067 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630"} Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.136577 4125 scope.go:117] "RemoveContainer" containerID="ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.168687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.200569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.227952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.255404 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.283299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.306945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.326559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.356546 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.394190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.450679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.471282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.484326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.499215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.513682 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.528630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.542390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.556219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.568672 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.582391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.595265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.609043 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.623199 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:46 crc kubenswrapper[4125]: I0312 13:28:46.640350 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.011863 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.012466 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.014177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.014385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.015600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.018361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019066 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.018562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019865 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.019934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.019945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020681 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020703 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.020950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.020998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.021220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.021341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.021601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.021664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.021733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.022913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.022924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023064 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.023291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.023777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.024361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.024801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:48 crc kubenswrapper[4125]: E0312 13:28:48.025082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.027036 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/0.log" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.030359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.033645 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f"} Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.050010 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.065928 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.080055 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.094049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.117722 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.139087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.159190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.182700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.205761 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.225979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.245937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.885585 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:48 crc kubenswrapper[4125]: I0312 13:28:48.885680 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.037106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:49 crc kubenswrapper[4125]: E0312 13:28:49.037276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.076372 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.111051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.138473 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.171797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.224579 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.255676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.286109 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.310527 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.331670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.360700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.398678 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.432027 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.453620 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.477200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.497773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.525379 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.548893 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.576071 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.603080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.627507 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.667011 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.702533 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.724630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.751288 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.770623 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.802186 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.827693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.852395 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.873508 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.888986 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.889089 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.913686 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:49 crc kubenswrapper[4125]: I0312 13:28:49.936092 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:49Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.030658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.031454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.031775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.032268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.033494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.033734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.034556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.034688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.035109 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.035592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.035899 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.036199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.036397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.036574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.036532 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.038449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.038691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.038787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.039058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.039204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.039255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.039332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.039600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.039700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.039789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.039942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.040388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.040484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.040653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040865 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040902 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.040957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.040970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.041277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.041464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041869 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041933 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.041946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.041978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042061 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.042707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.042776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.043028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.043292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.043464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.043551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.044639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.044698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.044908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.045987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.046077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.046246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.047377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.047670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.072694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.112416 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.135021 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.163221 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.179565 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.209980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.229861 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.244376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.264212 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.285682 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.309787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.332349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.355544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.390470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.411700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.435378 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.435487 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.435502 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.435565 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.435599 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:50Z","lastTransitionTime":"2026-03-12T13:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.439601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.462049 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.470050 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.470290 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.470309 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.470331 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.470363 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:50Z","lastTransitionTime":"2026-03-12T13:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.490267 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.497228 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.497374 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.497414 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.497453 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.497481 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.497517 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:50Z","lastTransitionTime":"2026-03-12T13:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.526520 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.532420 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.532454 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.532466 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.532495 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.532523 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:50Z","lastTransitionTime":"2026-03-12T13:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.540389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.562341 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.576242 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.576523 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.576614 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.576730 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.576911 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:28:50Z","lastTransitionTime":"2026-03-12T13:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.579202 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.601627 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: E0312 13:28:50.602000 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.608724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.637321 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.659087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.696191 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.716389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.735664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.757877 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.799984 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.819762 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.850556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.872897 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.886184 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.886797 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.904223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.935574 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.957425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:50 crc kubenswrapper[4125]: I0312 13:28:50.980492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.009222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.026037 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:51 crc kubenswrapper[4125]: E0312 13:28:51.026535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.043424 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.081444 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.107072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.127576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.150194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.173075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.193669 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.209793 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.231489 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.253247 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.272893 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.291963 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.321320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.355665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.397088 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.431465 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.448948 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.467625 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.486606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.508152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.526295 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.540775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.559525 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.577719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.594867 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.608937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.623184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.637583 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.658642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.673599 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.693589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.714760 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.889213 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:51 crc kubenswrapper[4125]: I0312 13:28:51.889421 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.024881 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.024918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.025024 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.025335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.025760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.026045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.026281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.026386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.026528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.026664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.026879 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.026985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.027167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.027273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.027421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.027564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.027746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.028281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.027895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.030243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.030645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.029901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.031447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.031454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.031721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.031944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.032183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.032381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.032432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.032484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.032515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.032565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.032655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033452 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.033579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.033683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.033980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.034291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.034516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.034543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.034574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.034941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.035186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.036353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.036378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.036478 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.036675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.036775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.036999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.037938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.038920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:52 crc kubenswrapper[4125]: E0312 13:28:52.041026 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.057619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.078718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.102278 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.123036 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.140764 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.158049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.178515 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.195067 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.215512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.240360 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.256929 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.279986 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.306183 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.327654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.359907 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.386044 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.405459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.424193 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.442171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.457669 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.477073 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.504092 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.526797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.546747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.561920 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.590299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.614486 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.636425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.661031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.680437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.694549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.709090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.722502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.743205 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.759485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.776304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.796275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.815290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.832502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.847524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.874668 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.885775 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.885918 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.895559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.915200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.942485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.961399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.978280 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:52 crc kubenswrapper[4125]: I0312 13:28:52.995346 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.014533 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: E0312 13:28:53.015284 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.024913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:53 crc kubenswrapper[4125]: E0312 13:28:53.025503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.041584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.067261 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/0.log" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.068549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.071561 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf" exitCode=1 Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.071614 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf"} Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.075172 4125 scope.go:117] "RemoveContainer" containerID="c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.092960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.114369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.135773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.174562 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.191687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.208707 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.253014 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.290216 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.331553 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.376362 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.426118 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.466475 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.505622 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.541549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.582739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.622575 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.670282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.712271 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.740961 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.780034 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.817245 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.857308 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.889015 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.889165 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.903215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.932924 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:53 crc kubenswrapper[4125]: I0312 13:28:53.975190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.011883 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.046663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046911 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.046943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.046417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047116 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.047776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.047951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.048893 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.048955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.049024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.049097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.049197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.049246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.049277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.049388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.049442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.049490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.049538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.049882 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.050047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.050181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.050239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.050307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.050435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.050496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.050607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.050701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.050871 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.050976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.051086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051498 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.051979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052907 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.052973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:54 crc kubenswrapper[4125]: E0312 13:28:54.053030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.072052 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.080908 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/0.log" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.090753 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1"} Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.091727 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.112189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.138035 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.174734 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.211059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.261469 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.293175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.333484 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.370030 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.427431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.454319 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.494085 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.537550 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.571454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.613563 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.660637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.693039 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.736023 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.770485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.816001 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.849007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.888039 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.888226 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.890987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.931747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:54 crc kubenswrapper[4125]: I0312 13:28:54.970267 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.012925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.025431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:55 crc kubenswrapper[4125]: E0312 13:28:55.025757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.050033 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.093259 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.153101 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.185461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.225273 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.273626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.309670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.333055 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.376048 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.426606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.462041 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.502528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.555187 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:52Z\\\",\\\"message\\\":\\\"3:28:52.862007 14996 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:52.862011 14996 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:52.862016 14996 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:52.862026 14996 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:52.862034 14996 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:28:52.862114 14996 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:28:52.862305 14996 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:52.862315 14996 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:52.862321 14996 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:52.862329 14996 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:28:52.862336 14996 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:52.862345 14996 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:52.862352 14996 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:52.862394 14996 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.589376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.620046 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.650276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.697472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.740636 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.787961 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.834651 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.865075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.888112 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.888341 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.902060 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.941535 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:55 crc kubenswrapper[4125]: I0312 13:28:55.976556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.024979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.025064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.025098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.025313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.026564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.027195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.027210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.027657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.027918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.028112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.028303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.028440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.026691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.028599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.028750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.028940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.029086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.029259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.029440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.029599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.029676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.029913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.030018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.030210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.030407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.030541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.030678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.028254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.030943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.031053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.031242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.031314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.031406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.031519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.031665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.032297 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.032649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.033214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.033315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.033431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.033548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.033695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.034085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.034086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.034296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.034343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.034443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.034507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.034596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.034731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.034797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.035013 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.035255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.035327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.035428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.035539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.035688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.035931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036511 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.036659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.036751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.036970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.037937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.037966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.038110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.038281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.038291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.038438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.038598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.038770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.038948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.039051 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.039253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.039424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.039471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.039576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.043533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.043747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.044011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.044230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.044385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.044558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.045197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.045221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.045314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.045431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.045692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.045949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.067216 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.095747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.104782 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/1.log" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.106532 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/0.log" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.115548 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1" exitCode=1 Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.115652 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1"} Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.115718 4125 scope.go:117] "RemoveContainer" containerID="c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.119369 4125 scope.go:117] "RemoveContainer" containerID="269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1" Mar 12 13:28:56 crc kubenswrapper[4125]: E0312 13:28:56.120509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.147550 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.175055 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.219962 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.261068 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.293704 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.342913 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.379949 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.384508 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.423639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.455369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.503931 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.534085 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.574953 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.622685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.658727 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.692937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.732003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.772650 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.816506 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.853009 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.886991 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.887623 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.891219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.930511 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:56 crc kubenswrapper[4125]: I0312 13:28:56.982558 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.019636 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.025216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:28:57 crc kubenswrapper[4125]: E0312 13:28:57.026482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.069399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.094900 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.126115 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/1.log" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.147947 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.172555 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.218323 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.258587 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.311987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.343608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.385289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.420704 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.460952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.499004 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.545656 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.578437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.627787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.669314 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.699798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.737692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.783388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.825520 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.858633 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.889612 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:28:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:28:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:28:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.889767 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.896093 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.942551 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:57 crc kubenswrapper[4125]: I0312 13:28:57.983130 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:52Z\\\",\\\"message\\\":\\\"3:28:52.862007 14996 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:52.862011 14996 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:52.862016 14996 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:52.862026 14996 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:52.862034 14996 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:28:52.862114 14996 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:28:52.862305 14996 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:52.862315 14996 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:52.862321 14996 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:52.862329 14996 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:28:52.862336 14996 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:52.862345 14996 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:52.862352 14996 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:52.862394 14996 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.018025 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.023472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:28:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.028100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.028361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.028552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.028648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.028776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.028924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.029060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.029192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.029323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.029401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.029528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.029602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.029711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.029789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.029973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.030054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.030199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.030282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.030406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.030492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.030606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.030677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.030777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.030927 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.031060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.031167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.031296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.031369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.031479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.031574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.031675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.031748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.031925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.032005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.032113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.032236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.032354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.032434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.032594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.032682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.032790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.032925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.033053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.033168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.033297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.033373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.033495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.033574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.033694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.033765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.033911 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.033994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034759 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.034934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.034979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.035083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.035238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.035350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.035457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.035579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.035698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.035776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.036038 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.036284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.036405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.036701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.036872 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.037185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.037438 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.037667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.037797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.038020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.038217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.038445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:28:58 crc kubenswrapper[4125]: I0312 13:28:58.038569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:28:58 crc kubenswrapper[4125]: E0312 13:28:58.038794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.221973 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.222528 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.225274 4125 kubelet.go:2517] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="3.2s" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.226163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.226202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.226344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.226716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.227215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.226168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.227627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.227895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.227959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228104 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.228297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.228732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.228791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.228949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.229008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.229049 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.229166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.229231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.229313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.229400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.234171 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.234458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.234552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.234668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.234743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.235673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.235755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.235980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236066 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.236775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.240098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.240216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.240499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.240918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.244233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.244657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.245575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.245798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.246166 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.246659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.246759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.246775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.247391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.249805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257372 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.257748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259225 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.259657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.263195 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.263250 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.263266 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.263304 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.263469 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:01Z","lastTransitionTime":"2026-03-12T13:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.487262 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.488709 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.497517 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.497565 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.497579 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.497615 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.497636 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:01Z","lastTransitionTime":"2026-03-12T13:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.515510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.533864 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.542014 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.542108 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.542128 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.542179 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.542211 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:01Z","lastTransitionTime":"2026-03-12T13:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.567916 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.569113 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.579288 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.579387 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.579404 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.579425 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.579454 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:01Z","lastTransitionTime":"2026-03-12T13:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.608989 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.615451 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.615521 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.615549 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.615569 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.615615 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:01Z","lastTransitionTime":"2026-03-12T13:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.622244 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.657478 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: E0312 13:29:01.657566 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.674059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.694621 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.712456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.728102 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.742697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.757031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.777180 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.792791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.816870 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.831183 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.848427 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.868281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.885012 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.893216 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.893281 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.909883 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.938116 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.965199 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:01 crc kubenswrapper[4125]: I0312 13:29:01.986998 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.007392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.024559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.057794 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.085406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.125728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.144736 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.164353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(2eb2b200bca0d10cf0fe16fb7c0caf80)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.191860 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.223689 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.232004 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/6.log" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.234099 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"2eb2b200bca0d10cf0fe16fb7c0caf80","Type":"ContainerStarted","Data":"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3"} Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.255082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:52Z\\\",\\\"message\\\":\\\"3:28:52.862007 14996 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:52.862011 14996 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:52.862016 14996 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:52.862026 14996 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:52.862034 14996 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:28:52.862114 14996 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:28:52.862305 14996 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:52.862315 14996 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:52.862321 14996 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:52.862329 14996 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:28:52.862336 14996 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:52.862345 14996 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:52.862352 14996 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:52.862394 14996 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.273554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.298646 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.315349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.333504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.352022 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.372896 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.400200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.420518 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.443775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.476383 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.498668 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.518381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.547289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.567344 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.592913 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.610225 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.630299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.663299 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.683247 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.706044 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.733932 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.757621 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.788075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.824747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.850010 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.884635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.888996 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.889299 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.914749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.953100 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:02 crc kubenswrapper[4125]: I0312 13:29:02.983421 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.009266 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.020512 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.025968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026030 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.026377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.026998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.027019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027088 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.027268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.027504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.027518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.027703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.028015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.028287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.028364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.028527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.028598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.028990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.029007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.029215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.029345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.029519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.029571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.029676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.029734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.029927 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.029986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.030196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.030228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.030339 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.030492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.030495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.030534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.030695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.030942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.031076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.031214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.031289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.031352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.031465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.031692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.032454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.032536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.032554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.032477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.032730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.032434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.032645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.032797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.032981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.033083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.033229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.033357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.033427 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.033523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.033556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.033651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.033757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.034017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.034102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.034358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.034495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.034633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.034690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.034941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.035044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.035296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.035446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.035527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.035651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.035797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.035974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.036133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.036340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.036473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.036620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.036739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.037006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.037222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.037356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.037499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:03 crc kubenswrapper[4125]: E0312 13:29:03.037632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.049957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.072793 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.107334 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.136428 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.167208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.197027 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.221684 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.246947 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.272720 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.298795 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.320492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.348474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.380016 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.417297 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.442990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.465289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.485872 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.504939 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.529208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.548277 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.569248 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.589561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.605518 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.624960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.650075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.670883 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.694191 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.717059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.746522 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.774262 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.797413 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.820329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.846756 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.870428 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.887505 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.887658 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.897200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:03 crc kubenswrapper[4125]: I0312 13:29:03.981608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.004208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.034430 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.063582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.089713 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.120288 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.146667 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.181304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.211716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.237468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.267041 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.292537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.317695 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.342328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.367779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.400723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.425083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.442783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.462218 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.487690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.511123 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.534063 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.571612 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.597952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.630328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.658220 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.689297 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":7,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.718446 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.746545 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.794042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2bf7f2e935ce79be374b1715f26040ead4a59cc173cdeea0c1a23d8b100eacf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:52Z\\\",\\\"message\\\":\\\"3:28:52.862007 14996 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:52.862011 14996 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:52.862016 14996 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:52.862026 14996 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:52.862034 14996 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:28:52.862114 14996 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:28:52.862305 14996 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:52.862315 14996 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:52.862321 14996 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:52.862329 14996 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:28:52.862336 14996 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:52.862345 14996 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:52.862352 14996 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:52.862394 14996 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.822975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.847131 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.875347 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.888680 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.888981 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.903634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:04 crc kubenswrapper[4125]: I0312 13:29:04.985584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.019719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.025479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.025778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026090 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.026095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.026364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.026516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.026689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026884 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.027059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.027071 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.027064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.026502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.027350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.027365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.027558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.027970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028023 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.027991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.028957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.029084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.029218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.029287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.029571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.029761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.030999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.031233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.031425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.031627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.031791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.032197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.032351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.032429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.032529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.032711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.032973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.033335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.033500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.033698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.033958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.034132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.034989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.035245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.035435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.035633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.035803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.036071 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.036310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.036409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.036524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.036656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.037017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.037245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.037414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.037570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.037727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:05 crc kubenswrapper[4125]: E0312 13:29:05.038217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.068437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.099117 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.127654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.155785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.187530 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.207988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.235779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.260322 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.288026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.311545 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.337694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.369120 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.388326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.409439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.426578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.445378 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.465538 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.492318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.532442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.572367 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.616317 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.656530 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.697408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.740663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.782196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.807685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.850617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.887073 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:05 crc kubenswrapper[4125]: I0312 13:29:05.887335 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:06 crc kubenswrapper[4125]: I0312 13:29:06.888682 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:06 crc kubenswrapper[4125]: I0312 13:29:06.888978 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.027021 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.027027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.026977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.027028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.027296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.027379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.027544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.027561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.027682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.027958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.027990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.028241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.028285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.028429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.028474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.028612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.028670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.028741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.028738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.028935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.029104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.029105 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.029306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.029438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.029555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.029701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.029930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.030120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.030319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.030390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.030481 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.030552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.030602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.030654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.030944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.031065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.031100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.031289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.031369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.031486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.031549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.031630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.031743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.031763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.031915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.032352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.032361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.032521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.032644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.032764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.032962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.033056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.033069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.033229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.033298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.033428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.033494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.033627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.033770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.034026 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.034274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.034299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.034457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.034494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.034593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.034770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.035042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.035087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.035347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.035386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.035595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.035593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.035732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.035797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.036070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.036308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.036388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.036572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.036636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.036774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.037037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.037217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.037381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.037577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.037727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.037991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.038350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:07 crc kubenswrapper[4125]: E0312 13:29:07.038505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.519915 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.886713 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:07 crc kubenswrapper[4125]: I0312 13:29:07.887004 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:08 crc kubenswrapper[4125]: E0312 13:29:08.022179 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:08 crc kubenswrapper[4125]: I0312 13:29:08.887604 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:08 crc kubenswrapper[4125]: I0312 13:29:08.887746 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.024708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.024796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.025381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.024760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.025537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.024951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.024717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.026094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.026341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.026700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.026656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.027054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.027131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.027268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.027319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.027665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.027733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.027969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.028040 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.028335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.028482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.028609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.028721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.028794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.028970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.029078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.029317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.029494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.029738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.029960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.030722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.030797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.031045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.031656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.031778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.031985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.032418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.032537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.032657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.032939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.033007 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.033010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.033301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.033655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.033988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.034224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.034383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.034457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.034712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.034982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.035971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:09 crc kubenswrapper[4125]: E0312 13:29:09.036117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.887000 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:09 crc kubenswrapper[4125]: I0312 13:29:09.887666 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.028051 4125 scope.go:117] "RemoveContainer" containerID="269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.062695 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.092097 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.116436 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.176520 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.199737 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.249708 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.279461 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.284751 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.292557 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.299013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.314089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.328542 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.349153 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.371166 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.388171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.410849 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.427764 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.446724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.462207 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.485564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.506610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:10 crc kubenswrapper[4125]: I0312 13:29:10.524060 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.545645 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.545733 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.546741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.546920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.546971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547438 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547888 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.547940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.547276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.548758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548883 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.548978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.549071 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.549091 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.549205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.549217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.549344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.549406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.549495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.549558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.549581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.549589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.552109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.549928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.550084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.550106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.552689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.552755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.552786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.553049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.550196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.553288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.550571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.550805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.550897 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.551046 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.551179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.551381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.551559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.561558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.564877 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.565405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.566318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.579977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.580212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.593699 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.593759 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.593782 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.593805 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.593878 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:12Z","lastTransitionTime":"2026-03-12T13:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.607159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.612515 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/1.log" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.619316 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827"} Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.621448 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.622292 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.628351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.631164 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.631435 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.631478 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.631501 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.631520 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.631540 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:12Z","lastTransitionTime":"2026-03-12T13:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.649895 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.652026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.655464 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.655534 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.655551 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.655570 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.655590 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:12Z","lastTransitionTime":"2026-03-12T13:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.673624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.677945 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.686614 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.686665 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.686681 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.686700 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.686720 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:12Z","lastTransitionTime":"2026-03-12T13:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.694065 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.700356 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.703908 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.703982 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.704000 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.704019 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.704043 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:12Z","lastTransitionTime":"2026-03-12T13:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.709990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.717514 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: E0312 13:29:12.717568 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.725346 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.744789 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.758660 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.777239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.793397 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.810227 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.831492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.851523 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.869124 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.885517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.887937 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.888415 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.904444 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.921608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.943410 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.963798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:12 crc kubenswrapper[4125]: I0312 13:29:12.985570 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.008799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: E0312 13:29:13.023580 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.026197 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:29:13 crc kubenswrapper[4125]: E0312 13:29:13.026543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.037303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.067300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.094253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.121051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.147492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.169071 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.189594 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.228481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.257499 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.287183 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.312037 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":7,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.335004 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.384477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.406884 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.436957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.466277 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.495773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.528572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.552461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.579002 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.619384 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.647265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.665390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.690024 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.710788 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.730235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.748054 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.766714 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.782101 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.814935 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.849263 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.873312 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.887355 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.887556 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.892631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.909419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.925936 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.941031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.960159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:13 crc kubenswrapper[4125]: I0312 13:29:13.979294 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.003478 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.020543 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.025698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.025945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.025989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026871 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026899 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.026992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.026998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.027072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.027194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.027266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.027575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.027387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.027783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.028658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.028918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.029453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.029366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.029969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.030256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.030394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.030517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.030650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.031068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.031223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.031373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.031519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.031645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.031751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.032710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.038735 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.054083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.068687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.085709 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.102067 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.123965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.142718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.162209 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.180353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.197016 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.213512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.230438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.248063 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.270567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.299408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.321209 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.348318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.372045 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.397351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.438701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.465674 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.486795 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.516409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.556235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.585790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.611492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.628436 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/2.log" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.630788 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/1.log" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.639106 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827" exitCode=1 Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.639248 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827"} Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.639318 4125 scope.go:117] "RemoveContainer" containerID="269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.639771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.653572 4125 scope.go:117] "RemoveContainer" containerID="6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827" Mar 12 13:29:14 crc kubenswrapper[4125]: E0312 13:29:14.655351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.665257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.687110 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.714734 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.735008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.758713 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.778281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.797176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.818943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.860376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.881340 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.889235 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.889365 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.906597 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.937456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.969702 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:14 crc kubenswrapper[4125]: I0312 13:29:14.994416 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.019973 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.024730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.024935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:15 crc kubenswrapper[4125]: E0312 13:29:15.025119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:15 crc kubenswrapper[4125]: E0312 13:29:15.025443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.036791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.076576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.099202 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.125126 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.166580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.193292 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.215036 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.239312 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.273637 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.300090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.327740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.354054 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.375495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.408003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.438608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.484956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.511026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.532275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.559347 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.595655 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.643696 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.645101 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/2.log" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.683992 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.707903 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.724391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.740029 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.786298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.820243 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.867101 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.886414 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.887274 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.903051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.943028 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:15 crc kubenswrapper[4125]: I0312 13:29:15.987001 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.024756 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025049 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025082 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.025520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.025800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.025996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026057 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.026256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.026455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.026651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.026893 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.026946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.027051 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.027124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.027526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.027723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.028005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.028247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.028445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.028630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.028902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.029063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.029195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.029250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.029358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.029519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.029615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.029757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.029954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.030112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.030259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.030403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.030501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.030644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.030734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.030937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.031032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.031214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.031343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.031606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.031783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.031925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.032037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.032085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.032269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.032328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.032416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.032465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.032554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.032603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.032692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.032741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.032922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.032981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.033072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.033122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.033267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.033341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.033445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.034422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.034549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.034679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.034217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.034933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.034990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.035263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.035414 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.035531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.035674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.035760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.036009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.036078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.036196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:16 crc kubenswrapper[4125]: E0312 13:29:16.036262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.247016 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.261189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.276181 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.295937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.317268 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.340242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.358071 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.372967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.389618 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.425564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.460558 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.501588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.544744 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.582253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.627967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.685361 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.709091 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.757512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.802290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.829772 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.862431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.887716 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.887789 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.901202 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.939685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:16 crc kubenswrapper[4125]: I0312 13:29:16.987084 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.024632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:17 crc kubenswrapper[4125]: E0312 13:29:17.025191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.025450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:17 crc kubenswrapper[4125]: E0312 13:29:17.027750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.028485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.060631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.106242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.156728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.190974 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.224582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.276306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.318783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.345930 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.384357 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.428558 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.470014 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:17 crc kubenswrapper[4125]: I0312 13:29:17.519496 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.031974 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.037199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.037437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.037439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.037546 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.037647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.037874 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.037985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.038074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.038299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.038441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.038586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.038771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.038954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.039032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.039223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.039294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.039420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.039567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.039727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.039982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.040059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.040185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.040357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.040424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.040503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.040629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.040640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.040884 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.041087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.041580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.041676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.041784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.041991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.042101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.042203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.042323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.042388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.042450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.042636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.042747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.045582 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046117 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.046471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.046020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.046661 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.046705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.047049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.047095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.047384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.047519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.046275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.048386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.048599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.049195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.049408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.049604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.049717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.050522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.050729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.050928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.051009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.051281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.051342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.051448 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.051563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.051639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.051702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.051756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.051928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.052151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.052250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.052310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.052465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.052161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.053504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.053712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.053955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054034 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.054461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.054621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.055091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.055173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.055226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.055286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:19 crc kubenswrapper[4125]: E0312 13:29:19.055781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.090392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.107802 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.132037 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.152476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.170975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.186089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.888541 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:19 crc kubenswrapper[4125]: I0312 13:29:19.888767 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:20 crc kubenswrapper[4125]: I0312 13:29:20.890192 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:20 crc kubenswrapper[4125]: I0312 13:29:20.890416 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.025999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.026021 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026051 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.026343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.026641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.026758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.027055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.027258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.027354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.027362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.027489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.027505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.027592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.027676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.027696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.027745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.028098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.028513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.028775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.029223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.029464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.029574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.029603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.029688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.029688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.030519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.031556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.031595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.031730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.031738 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.031788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.032039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.032072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.032117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.032187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.032306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.032380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.032388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.032735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.033004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.033371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.033473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.033769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.034025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.034285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.034509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.034742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.034805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.035045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.035274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.035995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.036204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.036238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.036263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.036329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.036375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.036611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.036743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.036944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.037628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.038210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.038305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.038440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.038707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.039005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.039272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.039421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.039621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.040051 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.040259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.040410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:21 crc kubenswrapper[4125]: E0312 13:29:21.040459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.887354 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:21 crc kubenswrapper[4125]: I0312 13:29:21.887471 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.059177 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.099942 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.140671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.168198 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.195536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.223715 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.252677 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.280436 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.307637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.339186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.360400 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.378730 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.400575 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.424945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.448741 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.482203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.513560 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.535398 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.565051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.581803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.600182 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.623522 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.640559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.660175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.678561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.696459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.716348 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.736241 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.754479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.775267 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.794477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.813671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.819228 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.819295 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.819319 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.819351 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.819384 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:22Z","lastTransitionTime":"2026-03-12T13:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.837798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: E0312 13:29:22.842709 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.852373 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.852451 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.852471 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.852495 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.852519 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:22Z","lastTransitionTime":"2026-03-12T13:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.867201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: E0312 13:29:22.868507 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.875112 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.875210 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.875230 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.875256 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.875281 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:22Z","lastTransitionTime":"2026-03-12T13:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.887894 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.888015 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:22 crc kubenswrapper[4125]: E0312 13:29:22.890469 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.892264 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.898491 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.898683 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.898933 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.899504 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.899968 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:22Z","lastTransitionTime":"2026-03-12T13:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.916471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: E0312 13:29:22.917615 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.925640 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.925971 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.926117 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.926321 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.926487 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:22Z","lastTransitionTime":"2026-03-12T13:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.945619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: E0312 13:29:22.946248 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: E0312 13:29:22.946326 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.963792 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.978617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:22 crc kubenswrapper[4125]: I0312 13:29:22.996091 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.015236 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025872 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.026544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.026687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.026890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.026980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.025995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026080 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027910 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.027996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028064 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028927 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.028993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.029097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.035760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.036081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.036396 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.036474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.036595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.036728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.037323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.026418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.034645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.034712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.035263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.036950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.037415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.037525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.037874 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:23 crc kubenswrapper[4125]: E0312 13:29:23.038518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.196649 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.216491 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.232976 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.253517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.288489 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://269f47d2aa5bba9d3fcde3ebfa26b7be460fe18b5cca5d76284df6b1fa5b11f1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:55Z\\\",\\\"message\\\":\\\" 16321 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:28:55.001005 16321 handler.go:203] Sending *v1.EgressIP event handler 8 for removal\\\\nI0312 13:28:55.001009 16321 reflector.go:295] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001014 16321 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:28:55.001023 16321 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:28:55.001040 16321 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:28:55.001052 16321 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:28:55.001063 16321 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:28:55.001066 16321 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:28:55.001073 16321 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:28:55.001079 16321 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:28:55.001088 16321 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:28:55.001173 16321 reflector.go:295] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:28:55.001268 16321 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:28:55.001309 16321 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:28:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.307393 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.330537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.353557 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.372880 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.397412 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.424774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.450194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.467738 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.483201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.504725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.522547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.543635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.563458 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.577332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.598765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.622007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.640379 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.656190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.674025 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.693787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.723082 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.889662 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:23 crc kubenswrapper[4125]: I0312 13:29:23.889944 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:24 crc kubenswrapper[4125]: E0312 13:29:24.048457 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:24 crc kubenswrapper[4125]: I0312 13:29:24.893351 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:24 crc kubenswrapper[4125]: I0312 13:29:24.893716 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.025921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.025983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.026316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.026467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.026895 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.026966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.027285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.027666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027768 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.027754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.027963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.028057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.028092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.028304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.028369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.028432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.028532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.028600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.028313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.028968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.029394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.029616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.029777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.030330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.030528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.030557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.030560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.031090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.031170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.031242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.031370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.031389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.031482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.031536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.031549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.031661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.032039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.032103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.032256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.032308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.032352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.032556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.032559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.032692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.032776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.032953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.033194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.033335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.033491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.034033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.034219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.034228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.034303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.034570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.035013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.035480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.035679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.035785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.035995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.036540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036882 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.036990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.037081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.037343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.037387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.037477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.037562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.037706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.039963 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:29:25 crc kubenswrapper[4125]: E0312 13:29:25.040956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.886701 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:25 crc kubenswrapper[4125]: I0312 13:29:25.886850 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.038789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.039371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.039527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.039771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.040189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.040340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.040462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.039374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.040634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.040761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.040762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.041067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.041340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.041453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.041651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.041769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.041919 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042376 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.042800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.042957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.043005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.043065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.043216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.043268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.043267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.043393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.043458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.043510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.043572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.043619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.043792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.044100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.044160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.044325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.044431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.044574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.044717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.045256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.045368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.045529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.045639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.045777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.045999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.046180 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.046235 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.046668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.046791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.046999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.047228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.047400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.047548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:27 crc kubenswrapper[4125]: E0312 13:29:27.047642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.890232 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:27 crc kubenswrapper[4125]: I0312 13:29:27.891014 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:28 crc kubenswrapper[4125]: I0312 13:29:28.889101 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:28 crc kubenswrapper[4125]: I0312 13:29:28.889328 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.025565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.025740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.025966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.025782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.025939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.026499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.026637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.027060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.027233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.027367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.027516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.027695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.027703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.028085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.028476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.028702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.028804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.029037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.029106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.029199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.029338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.029508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.029646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.029803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.030109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.030279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.030355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.030370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.030454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.030757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.030912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.030962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.031030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.030967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.031084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.031384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.031787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.025560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.031990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.031946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.032255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.032373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.032597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.032782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.033028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.032933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.033490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.033769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.034329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.034537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.035268 4125 scope.go:117] "RemoveContainer" containerID="6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.035686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.035791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.035936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.035874 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.035902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.036022 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.036116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.036571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.036793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.037107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.037398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.037504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.038665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.039087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.039426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.039790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.040268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:29 crc kubenswrapper[4125]: E0312 13:29:29.053230 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.101762 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.135681 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.169237 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.193774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.235665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.260330 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.280083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.303549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.346262 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.383076 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.417709 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.444320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.465592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.494162 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.513886 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.538641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.565005 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.584431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.607406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.635006 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.657956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.685406 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.710292 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.740007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.771040 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.799976 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.828728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.877670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.888727 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.889093 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.910284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.935696 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.960607 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:29 crc kubenswrapper[4125]: I0312 13:29:29.990974 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.023099 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.052475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.078026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.103086 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.127394 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.146592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.178281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.197967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.216409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.235966 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.254360 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.275365 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.294547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.315375 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.342983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.378255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.410008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.437001 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.460500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.484805 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.508072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.529803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.553234 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.577608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.605540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.630106 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.650028 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.670369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.691283 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.711714 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.740453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.761191 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.788427 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.809683 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.828376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.886108 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:30 crc kubenswrapper[4125]: I0312 13:29:30.886394 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.025710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.025763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.026040 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.025931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.026177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.026081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.026241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.026530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.026764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.026978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.027188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.027491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.027926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.027946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028071 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.028283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.028570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.028786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.028979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.029197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.029320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.029467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.029631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.029767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.029888 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.030077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.030312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.030421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.030615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.030950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.030963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.031212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.031304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.031450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.031516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.031521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.031628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.031719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.031771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.031924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.032081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.032152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.032239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.032271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.032294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.032362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.032435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.032585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.033625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.033891 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.033948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.033977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.033965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.034173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.034744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.035712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.036032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.036275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.036411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.036517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:31 crc kubenswrapper[4125]: E0312 13:29:31.036655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.419801 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.420293 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.420338 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.420391 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.420427 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.887222 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:31 crc kubenswrapper[4125]: I0312 13:29:31.887378 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.062644 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.101250 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.125498 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.146510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.174100 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.208443 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.235222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.265274 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.300614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.334409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.364710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.397542 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.427889 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.453721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.477584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.503619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.538576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.572461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.596648 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.613361 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.636082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.655495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.669442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.688742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.722512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.750781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.770634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.790885 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.812949 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.840433 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.862368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.887562 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.888452 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.889449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.913795 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.951331 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:32 crc kubenswrapper[4125]: I0312 13:29:32.977631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.000480 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.026000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.026513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.026759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.027106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.027369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.027443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.027504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.026539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.028044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.028327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.026563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.026625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.026681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.027726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.027769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.028503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.027944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.028613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.028794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.029378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.027996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.029681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030021 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.030273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.031102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.031577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.031757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.032268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.032526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.032932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.033073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.033561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.034031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.034458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.034713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.035040 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.035062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.035623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.035953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.036199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.036369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.036644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.036945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.037200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.037469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.037571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.037716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.038425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.039268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.039497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.039579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.039774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.040022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.041610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.043578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.044734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.045260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.045706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.046182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.046551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.039796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.048153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.048542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.048871 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.049100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.049247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.049562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.049742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.050357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.050367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.050634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.050710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.050920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.051095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.052373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.052652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.053080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.053245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.053335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.053389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.053642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.054072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.057722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.058976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.060905 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.068431 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.068523 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.068546 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.068579 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.068611 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:33Z","lastTransitionTime":"2026-03-12T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.074979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.085619 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.091362 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.091479 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.091505 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.091534 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.091564 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:33Z","lastTransitionTime":"2026-03-12T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.104568 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.111607 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.117887 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.118024 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.118108 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.118376 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.118494 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:33Z","lastTransitionTime":"2026-03-12T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.126431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.136871 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.143741 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.144299 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.144562 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.144996 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.145724 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:33Z","lastTransitionTime":"2026-03-12T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.157512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.164987 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.172187 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.172232 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.172245 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.172264 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.172282 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:33Z","lastTransitionTime":"2026-03-12T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.186753 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.189721 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: E0312 13:29:33.189789 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.206947 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.228540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.246589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.271935 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.296742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.317298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.341447 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.367329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.386981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.402787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.429392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.453090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.483859 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.504740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.525799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.545099 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.560943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.590909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.611312 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.626504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.647354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.672544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.700665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.728179 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.762908 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.888577 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:33 crc kubenswrapper[4125]: I0312 13:29:33.888767 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:34 crc kubenswrapper[4125]: E0312 13:29:34.054750 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:34 crc kubenswrapper[4125]: I0312 13:29:34.888329 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:34 crc kubenswrapper[4125]: I0312 13:29:34.888486 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.025168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.025295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.025491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.025512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.025600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.025772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.025916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.026239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.026646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.027064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.027112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.027340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.027447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.027542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.027591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.027671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.027694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.027769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.028011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.028090 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.028283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.028314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.028453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.028474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.028622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.028708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.028969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.028971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.029218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.029244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.026491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.029366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.029499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.029556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.029704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.029800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.030068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.030285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.030533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.030439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.031096 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.031308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.031326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.030573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.030669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.030775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.031006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.031182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.031176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.031643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.031675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.032006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.032093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.032014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.032243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.032353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.032421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.032504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.032636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.032679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.033057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.033267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.033073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.033422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.033505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.033518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.033587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.033786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.033990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.034034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.034177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.034205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.034297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.034387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.034531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.035047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.035083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.035201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.035224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.035310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.035508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.035538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.035641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.036090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.036403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.036409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.036673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.037334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.039201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:35 crc kubenswrapper[4125]: E0312 13:29:35.039634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.889652 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:35 crc kubenswrapper[4125]: I0312 13:29:35.889768 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:36 crc kubenswrapper[4125]: I0312 13:29:36.028499 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:29:36 crc kubenswrapper[4125]: E0312 13:29:36.030177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:29:36 crc kubenswrapper[4125]: I0312 13:29:36.890558 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:36 crc kubenswrapper[4125]: I0312 13:29:36.890763 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025339 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.025615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026080 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.026182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026104 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.026610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.025257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.026913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.026694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.027244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.027454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.027467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.028028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.028387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.028477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.028513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.028397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.028658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.029029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.029515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.029724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.030083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.030386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.030473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.030674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.030753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.031067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.031250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.031391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.031633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.032221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.032934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.033356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.033414 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.034094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.034488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.034547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.035750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.036101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.036220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.036233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.036564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.037545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.037574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.037712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.037765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.037916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.038170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.038526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.038587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.038654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.038788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.039171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.039682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.039951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.040102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.040365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.040678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.041068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.041363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.041629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.041970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.216456 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/1.log" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.218316 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/0.log" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.218751 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f"} Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.219066 4125 scope.go:117] "RemoveContainer" containerID="ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.218775 4125 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f" exitCode=1 Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.221294 4125 scope.go:117] "RemoveContainer" containerID="88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f" Mar 12 13:29:37 crc kubenswrapper[4125]: E0312 13:29:37.222694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.261195 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.289188 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.323457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.347721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.374561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.395309 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.417775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.438592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.457329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.486450 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.520346 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.553451 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.589108 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.619495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.651615 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.683294 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.718253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.744285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.774351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.808458 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.848442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.878409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.885208 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.885293 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.915008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.936409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.956351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.971853 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:37 crc kubenswrapper[4125]: I0312 13:29:37.988452 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.006294 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.022627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.039981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.056869 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.080293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.110541 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.134017 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.162970 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.191419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.212957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.229201 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/1.log" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.245965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.266360 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.285260 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.307710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.352925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.380197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.401548 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.421742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.444614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.471433 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.494670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.517679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.546454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.573052 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.600946 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.628705 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.657574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.684323 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.710050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.730270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.754886 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.777793 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.797626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.822875 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.844894 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.865439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.886937 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.887082 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.893144 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.921495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.945767 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:38 crc kubenswrapper[4125]: I0312 13:29:38.964905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.025055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.025237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.025697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.025762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.025914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.026296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.026439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.026595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.026734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.026904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.027206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.027313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.027459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.027670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.027799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.028099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.028257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.028379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.028498 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.029192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.029238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.029436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.029494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.029595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.029774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.030081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.030167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.030365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.030531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.030683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.030957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.030979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.031056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.031023 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.031267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.031434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.031572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.031715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.032001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.032218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.032425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.032660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.032951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.033394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.033209 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.033635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.033724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.033956 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.034009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.034049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.034234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.034254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.034283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.034506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.034625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.034712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.034762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.034915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.035081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.035316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.035591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.035711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.035746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.035946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.036089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.036527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.036995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.036997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.037315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.037526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.037988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.038147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:39 crc kubenswrapper[4125]: E0312 13:29:39.055905 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.888242 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:39 crc kubenswrapper[4125]: I0312 13:29:39.888428 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:40 crc kubenswrapper[4125]: I0312 13:29:40.888219 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:40 crc kubenswrapper[4125]: I0312 13:29:40.888453 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.026417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.026697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.027083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.027090 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.027250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.027383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.027518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.027679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.027774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.027778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028071 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.028325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.028539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.028748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.029032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.028031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.029328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.029403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.029573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.029647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.029776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.030041 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.030264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.030448 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.030560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.031029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.031376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.031681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.032088 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.032320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.032347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.032387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.032391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.032519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.032619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.032433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.033046 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.033355 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.033642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.034170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.034207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.034295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.034988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.035094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.035005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.035292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.035313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.035377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.035669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.035794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.036199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.036429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.036661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.036955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.037335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.037386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.037581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.037580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.037647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.037768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.037999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.038214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.038219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.038403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.038610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.038751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.039009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.039217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.039381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.039763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.039419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.039524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.039653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.040548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.040647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.040708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.041069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:41 crc kubenswrapper[4125]: E0312 13:29:41.041435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.887517 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:41 crc kubenswrapper[4125]: I0312 13:29:41.887668 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.055077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.079730 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.123326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.156652 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.203758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.246143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.284473 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.312965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.329515 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.346408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.370211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.391160 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.411184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.430562 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.445989 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.464003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.480939 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.497246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.519457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.536413 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.557217 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.585777 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.610468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.631212 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.649156 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.669944 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.688011 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.710309 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.734360 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.750071 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.770180 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.788945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.804401 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.820157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.836070 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.853310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.870514 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.885666 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.885789 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.889935 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.910969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.930968 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.950972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.967409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:42 crc kubenswrapper[4125]: I0312 13:29:42.982225 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.003703 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.026604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025511 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.025988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026036 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.026093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.026754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.027276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.027523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.027745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.028024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.028155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.028211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.028319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.028394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.028470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.028666 4125 scope.go:117] "RemoveContainer" containerID="6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.029878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030864 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.030941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.031956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.032181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.032387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.032469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.032629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.032739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.032949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.033786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.034042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.034206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.050240 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.074981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.107561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.137316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.154977 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.171611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.191410 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.207221 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.225899 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.245369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.259481 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/2.log" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.263921 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.267158 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1"} Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.268064 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.292294 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.309569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.332255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.350353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.368374 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.547391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.569434 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.578780 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.578928 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.578946 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.578965 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.578985 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:43Z","lastTransitionTime":"2026-03-12T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.598878 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.604721 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.609586 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.609635 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.609652 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.609672 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.609690 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:43Z","lastTransitionTime":"2026-03-12T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.628414 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.631589 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.636668 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.636733 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.636750 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.636790 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.636858 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:43Z","lastTransitionTime":"2026-03-12T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.650761 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.651979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.655686 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.655787 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.655849 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.655875 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.655900 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:43Z","lastTransitionTime":"2026-03-12T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.667446 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.670634 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.674234 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.674293 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.674309 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.674329 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.674347 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:43Z","lastTransitionTime":"2026-03-12T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.679598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.687320 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: E0312 13:29:43.687372 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.696458 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.716148 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.731685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.751265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.771617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.788782 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.811383 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.826468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.846927 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.868922 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.886564 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.887000 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.889784 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.919028 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.941564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.961927 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:43 crc kubenswrapper[4125]: I0312 13:29:43.983797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.007240 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.024316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.048359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: E0312 13:29:44.056915 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.070906 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.093042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.119374 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.150261 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.168949 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.193679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.215516 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.228231 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.244700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.264614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.284239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.298903 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.315740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.344991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.369710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.390939 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.436329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.461601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.484310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.506536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.523048 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.538406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.555505 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.568908 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.582804 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.597297 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.617944 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.638328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.651667 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.689080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.727366 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.759562 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.808984 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.847163 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.883632 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.886269 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.886476 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.923600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:44 crc kubenswrapper[4125]: I0312 13:29:44.964005 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.018910 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.026085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.026399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.026940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.027065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.027263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.027430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.027572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.027629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.027757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.027760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.028375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.028632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.028183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.026437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.030683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.031214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.031514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.031506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.031905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.032471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.032588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.032663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.032705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.032978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033037 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.032991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.033635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.033976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.034329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.034498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.034679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.035367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.035638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.036256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.036455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.036694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.036969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.037227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.037389 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.037529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.037708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.037981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.038964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.039061 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.039214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.039353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.039459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.039721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.039929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040207 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.040781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.041720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.042170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.042775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.036047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.043323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.052586 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.092502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.123381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.158546 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.199978 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.242670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.282275 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/3.log" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.284366 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/2.log" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.294516 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" exitCode=1 Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.294643 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1"} Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.294712 4125 scope.go:117] "RemoveContainer" containerID="6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.308458 4125 scope.go:117] "RemoveContainer" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.309677 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: E0312 13:29:45.310005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.343532 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.371006 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.402093 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.440444 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.480959 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.522878 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.564778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.608715 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.649496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.687389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.726077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.776086 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.817415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.858093 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.879791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.891718 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:29:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:29:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:29:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.891877 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.891992 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.893426 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec"} pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" containerMessage="Container router failed startup probe, will be restarted" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.893537 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" containerID="cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec" gracePeriod=3600 Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.932025 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:45 crc kubenswrapper[4125]: I0312 13:29:45.958599 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.016944 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.047588 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.075563 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.122749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.162164 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.199776 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.243035 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.293775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.303941 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/3.log" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.334580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.369283 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.404013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.440406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.482253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.505778 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.506106 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.506277 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.506239035 +0000 UTC m=+678.829625094 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.524794 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.561320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.603158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.642456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.680728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713475 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713613 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.713658 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713680 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713714 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.713733 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.713713006 +0000 UTC m=+679.037099065 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713771 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.713815 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713920 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713962 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.713985 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.713997 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714027 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.714013014 +0000 UTC m=+679.037398873 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714057 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.714062 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.714210 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.714320 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.714410 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.714478 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714595 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714638 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714711 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714795 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714943 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715019 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715108 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.714641 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.714627833 +0000 UTC m=+679.038013782 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715224 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715209779 +0000 UTC m=+679.038595618 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715244 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.71523394 +0000 UTC m=+679.038619779 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715275 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715252881 +0000 UTC m=+679.038638720 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715294 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715285152 +0000 UTC m=+679.038671011 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715314 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715304002 +0000 UTC m=+679.038689821 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715331 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715323233 +0000 UTC m=+679.038709052 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715351 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715341843 +0000 UTC m=+679.038727812 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715408 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715421 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715445 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715434506 +0000 UTC m=+679.038820375 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715469 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715457278 +0000 UTC m=+679.038843247 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715567 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715616 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715657 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715720 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715734 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715720915 +0000 UTC m=+679.039106774 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715775 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715785 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715885 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.715802918 +0000 UTC m=+679.039188887 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715928 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.715953 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716014 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.716001344 +0000 UTC m=+679.039387203 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716016 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716053 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716090 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.716062465 +0000 UTC m=+679.039448344 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716160 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.715967 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716169 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.716153168 +0000 UTC m=+679.039539027 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716249 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716304 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716312 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.716359 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.716346915 +0000 UTC m=+679.039732874 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716368 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716428 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716463 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716505 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716543 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716582 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716638 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716676 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716714 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716765 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.716886 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717012 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717061 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717088 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717165 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717195 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717246 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.71722766 +0000 UTC m=+679.040613719 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717279 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.717266251 +0000 UTC m=+679.040652330 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717284 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717092 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717302 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717314 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717363 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717387 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717405 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717415 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717452 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.717439926 +0000 UTC m=+679.040825765 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717474 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.717463027 +0000 UTC m=+679.040848876 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717504 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717596 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717635 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717649 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717705 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.717691935 +0000 UTC m=+679.041077904 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717757 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717768 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717793 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717810 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.717796167 +0000 UTC m=+679.041182016 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717909 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717923 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717950 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.717983 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.717996 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.717982842 +0000 UTC m=+679.041368721 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718031 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718044 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718081 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.718069296 +0000 UTC m=+679.041455245 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718156 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718178 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718238 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.718224349 +0000 UTC m=+679.041610228 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718265 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718288 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718301 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.718290582 +0000 UTC m=+679.041676541 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718344 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718353 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718390 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.718380415 +0000 UTC m=+679.041766244 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718391 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718429 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718460 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718478 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718505 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.718494868 +0000 UTC m=+679.041880707 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718537 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718538 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718573 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.71856276 +0000 UTC m=+679.041948719 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718601 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718618 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718635 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718659 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.718650103 +0000 UTC m=+679.042035912 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718686 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718754 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.718875 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.719163 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.719210 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.719374 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.719766 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.719958 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.719992 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720041 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.718429 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720075 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720375 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720420 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720479 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720531 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720574 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720619 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720676 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.724267 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.724300 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720714 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720764 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.724413 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.724431 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720800 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720922 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.720967 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721008 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721040 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721060 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.721047162 +0000 UTC m=+679.044433214 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725165 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725275 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725328 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725487 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725602 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725726 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.725784 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.725904 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726074 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.725942107 +0000 UTC m=+679.049328019 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721164 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726308 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.72629024 +0000 UTC m=+679.049676249 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726356 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726421 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726499 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726603 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726694 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726975 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.726362 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.726349752 +0000 UTC m=+679.049735591 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727181 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727094821 +0000 UTC m=+679.050481010 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727466 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727437882 +0000 UTC m=+679.050824111 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727563 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727535755 +0000 UTC m=+679.050922004 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727615 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727595286 +0000 UTC m=+679.050981695 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727657 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727636738 +0000 UTC m=+679.051023057 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727698 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727677809 +0000 UTC m=+679.051064208 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727731 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.72771808 +0000 UTC m=+679.051104329 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727760 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727746041 +0000 UTC m=+679.051132220 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727788 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727775672 +0000 UTC m=+679.051161851 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721201 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727925 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727802382 +0000 UTC m=+679.051188561 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727968 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727952557 +0000 UTC m=+679.051338736 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721252 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.727999 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.727982451 +0000 UTC m=+679.051368400 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721337 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728033 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728016982 +0000 UTC m=+679.051402821 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728181 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728098101 +0000 UTC m=+679.051484270 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728234 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728213004 +0000 UTC m=+679.051599423 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728276 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728256286 +0000 UTC m=+679.051642695 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728324 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728301317 +0000 UTC m=+679.051687746 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728355 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728342138 +0000 UTC m=+679.051728377 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.721294 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728386 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728370759 +0000 UTC m=+679.051756998 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728424 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.72841094 +0000 UTC m=+679.051797189 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728451 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728438771 +0000 UTC m=+679.051824950 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728484 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728468802 +0000 UTC m=+679.051855081 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728543 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728498353 +0000 UTC m=+679.051884412 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728577 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728563895 +0000 UTC m=+679.051949894 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728608 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728590925 +0000 UTC m=+679.051976904 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728630 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728618866 +0000 UTC m=+679.052004755 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728657 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728642296 +0000 UTC m=+679.052028416 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728677 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728666308 +0000 UTC m=+679.052052197 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728697 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.728685968 +0000 UTC m=+679.052071957 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.728719 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.72870961 +0000 UTC m=+679.052095609 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.733510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.763344 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.805257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebf9cd338069114f85afb58419767381e71aedf0a2f616136468a8b480328630\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:28:45Z\\\",\\\"message\\\":\\\"2026-03-12T13:27:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753\\\\n2026-03-12T13:27:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4b7867d-f4e7-4efb-aa52-9b9721892753 to /host/opt/cni/bin/\\\\n2026-03-12T13:28:00Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:00Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:28:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.828279 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.828563 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.828720 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829107 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829215 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829295 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829330 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829364 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829402 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829448 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829484 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829539 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829574 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829619 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829654 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829688 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829723 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829757 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829792 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.829895 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830005 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830067 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830234 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830369 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830489 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830579 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830618 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830655 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830688 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830749 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830783 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830885 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.830947 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.831021 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.831056 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.831102 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.831269 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.831485 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.831883 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.831978 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.831957705 +0000 UTC m=+679.155343674 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832289 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832339 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832358 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832403 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.832389507 +0000 UTC m=+679.155775476 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832553 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832589 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.832579223 +0000 UTC m=+679.155965213 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832750 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.832903 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.83281363 +0000 UTC m=+679.156199709 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833082 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833162 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833210 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.833194511 +0000 UTC m=+679.156580470 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833429 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833481 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833497 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833536 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.833524601 +0000 UTC m=+679.156910570 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833691 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.833759 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.833746118 +0000 UTC m=+679.157132077 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834011 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834086 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.834073317 +0000 UTC m=+679.157459276 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834282 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834357 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.834343955 +0000 UTC m=+679.157729914 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834522 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834572 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834588 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834628 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.834616433 +0000 UTC m=+679.158002382 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834770 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.834807 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.834797179 +0000 UTC m=+679.158183138 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835032 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835107 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.835091058 +0000 UTC m=+679.158477148 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835327 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835369 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835413 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.835400607 +0000 UTC m=+679.158786676 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835576 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835621 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835637 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835674 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.835663245 +0000 UTC m=+679.159049304 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835895 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835956 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836020 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836069 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836154 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836170 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836179 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.835966 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.835952813 +0000 UTC m=+679.159338772 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836234 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.83622315 +0000 UTC m=+679.159608899 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836250 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836242421 +0000 UTC m=+679.159628180 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836258 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836267 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836259231 +0000 UTC m=+679.159645090 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836264 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836318 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836331 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836346 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836284 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836278592 +0000 UTC m=+679.159664341 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836386 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836386 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836429 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836434 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836395658 +0000 UTC m=+679.159782007 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836483 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836490 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836496 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836487 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.83646408 +0000 UTC m=+679.159850439 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836507 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836527 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836542 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836526392 +0000 UTC m=+679.159912561 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836492 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836574 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836580 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836559463 +0000 UTC m=+679.159945712 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836588 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836605 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836600 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836632 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836443 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836679 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836694 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836627 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836605034 +0000 UTC m=+679.159991893 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836702 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836583 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836722 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836710836 +0000 UTC m=+679.160096765 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836745 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836735487 +0000 UTC m=+679.160121406 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836510 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836768 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836759608 +0000 UTC m=+679.160145537 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836791 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836782508 +0000 UTC m=+679.160168437 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836800 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836862 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836878 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836878 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836802039 +0000 UTC m=+679.160187958 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836448 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836956 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836986 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837018 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836766 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837196 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837222 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836373 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837298 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.836910 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.836899362 +0000 UTC m=+679.160285149 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837381 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837352043 +0000 UTC m=+679.160737922 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837409 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837401074 +0000 UTC m=+679.160786833 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837427 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837419715 +0000 UTC m=+679.160805474 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837446 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837438555 +0000 UTC m=+679.160824304 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837460 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837454086 +0000 UTC m=+679.160839835 (durationBeforeRetry 2m2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837475 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837468596 +0000 UTC m=+679.160854355 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837492 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837485087 +0000 UTC m=+679.160870946 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837508 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837500977 +0000 UTC m=+679.160886736 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.837525 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.837517968 +0000 UTC m=+679.160903727 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.848077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.881353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.922665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.934713 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935038 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.935209 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935388 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935420 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935441 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935583 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.935212751 +0000 UTC m=+679.258598820 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935861 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.935835788 +0000 UTC m=+679.259221567 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.935874 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935945 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935961 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.935971 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936006 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.935997923 +0000 UTC m=+679.259383692 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936157 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936221 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.93620815 +0000 UTC m=+679.259594189 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.936037 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.936493 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.936543 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.936579 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.936633 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936782 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936726 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936907 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936934 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936936 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936947 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936958 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936895 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.936879598 +0000 UTC m=+679.260265417 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937159 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937241 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937255 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.93723379 +0000 UTC m=+679.260619749 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937288 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.937277152 +0000 UTC m=+679.260663071 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937313 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937335 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937347 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937389 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.937375203 +0000 UTC m=+679.260761182 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937447 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937445 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937486 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937503 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937529 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937537 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937591 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937616 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.937588761 +0000 UTC m=+679.260975220 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937662 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937676 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937689 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937704 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937730 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937745 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937750 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937768 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937778 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937790 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.937777376 +0000 UTC m=+679.261163335 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937894 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.937803915 +0000 UTC m=+679.261189864 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.937942 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938003 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938009 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938031 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938043 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938052 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938081 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938090 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938078716 +0000 UTC m=+679.261464685 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938183 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938199 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938208 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938216 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938239 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938229808 +0000 UTC m=+679.261615577 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938280 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938278 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938317 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938306943 +0000 UTC m=+679.261692912 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938344 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938347 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938370 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938363372 +0000 UTC m=+679.261749221 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938393 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938474 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938510 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938499888 +0000 UTC m=+679.261885937 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938508 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938558 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938576 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938586 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938596 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938619 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.93860868 +0000 UTC m=+679.261994529 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937944 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938644 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.93863265 +0000 UTC m=+679.262018489 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938672 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938659951 +0000 UTC m=+679.262045790 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938708 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938720 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938729 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938742 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938757 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938747094 +0000 UTC m=+679.262133033 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938432 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938778 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938768596 +0000 UTC m=+679.262154605 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.937693 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938803 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.938793425 +0000 UTC m=+679.262179444 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.938560 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938974 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.938996 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.939002 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939008 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.939046 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.939154 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939214 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939237 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939249 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939290 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.939279301 +0000 UTC m=+679.262665350 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939329 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939376 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.939364182 +0000 UTC m=+679.262750021 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.939410 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939419 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939432 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939501 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939538 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.939527789 +0000 UTC m=+679.262913708 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939561 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.93954994 +0000 UTC m=+679.262935949 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.939583 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.939574128 +0000 UTC m=+679.262960140 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.939791 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.940066 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.940089 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.940138 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.940190 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.940177888 +0000 UTC m=+679.263563847 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.940217 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.940208509 +0000 UTC m=+679.263594428 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.936751 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: E0312 13:29:46.940418 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:48.940319362 +0000 UTC m=+679.263705321 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:29:46 crc kubenswrapper[4125]: I0312 13:29:46.957993 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.003876 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.025353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.025479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.025663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.025398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.025917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.025804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.026014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.026065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.026099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.026484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.026688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.026736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.026797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.026921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027066 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.027272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.027400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.027670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.027915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.027810 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.027175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.028999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.029918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.030064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.030218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.030314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.030633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.031778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.032039 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.032076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.032099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.032276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.032573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.032779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.033075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.033358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.033539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.033767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.034251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.034485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.034941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.035326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.035501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.035528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.035755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.035757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.036010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.036176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.036283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.036397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.036522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.037279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.037689 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.037772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.038231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.038322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.038332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.038414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.038682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.042942 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043192 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043355 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043414 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043437 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043511 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:31:49.043487878 +0000 UTC m=+679.366874027 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043300 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043581 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.043633 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:31:49.043617918 +0000 UTC m=+679.367003787 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.043254 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.044169 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.044442 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.044508 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.044530 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:29:47 crc kubenswrapper[4125]: E0312 13:29:47.044591 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:31:49.044573255 +0000 UTC m=+679.367959314 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.047502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.078629 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.119295 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.181275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.199099 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.235912 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.277242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.318614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.356973 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.396351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.452852 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.481697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.521620 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.560610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.605919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.642143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.679401 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.721577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.761663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.806360 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.836606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.880031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.934982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:47 crc kubenswrapper[4125]: I0312 13:29:47.956610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:47Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:48 crc kubenswrapper[4125]: I0312 13:29:48.003803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:48 crc kubenswrapper[4125]: I0312 13:29:48.042657 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:48 crc kubenswrapper[4125]: I0312 13:29:48.087646 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.027204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.027307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.027347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.027391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.025997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.026774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:49 crc kubenswrapper[4125]: I0312 13:29:49.027100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.029744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.030574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.030983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.031338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.031543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.031936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.032394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.032555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.032940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.033176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.033328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.033680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.034050 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.034764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.034787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.035334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.035460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.035576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.035790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.036085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.036310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.036441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.036638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.036945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.037192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.037360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.037491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.037685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.037941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.038179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.038469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.038524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.038623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.038804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.041598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.041989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.042737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.043059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.043385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.043930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.044408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.044792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.045281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.045518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.045715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.046036 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.046367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.046648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:49 crc kubenswrapper[4125]: E0312 13:29:49.064063 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.036363 4125 scope.go:117] "RemoveContainer" containerID="88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.079774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.119204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.143281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.176688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.214298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.238630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.256675 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.283481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.302265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.325040 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.334364 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/1.log" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.334442 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0"} Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.349600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.367168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.385785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.404284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.429934 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.462613 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.484481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.509449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.532618 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.559733 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.584065 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.606664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.633507 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.658272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.685219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.705210 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.734572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.767353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.789368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.808129 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.830011 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.847234 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.869917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.894512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.914472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.934733 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.956381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.974167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:50 crc kubenswrapper[4125]: I0312 13:29:50.997301 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.016362 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.026442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.026545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.026460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.027912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.027982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.027986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.028188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.028357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.028478 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.028711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.028903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.029026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.029030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.029257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.029497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.029667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.029737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.029970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.030190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.030360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.030482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.030707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.030740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.030975 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.030979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.031348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.031518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.031910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.031930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.032085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.032200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.032270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.032462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.032741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.033019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.033329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.033542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.033901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.034081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.034388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.034600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.034952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.035244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.035499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.035646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.035911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.036275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.036461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.036899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.037294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.037575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.037909 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.038266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.038454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.038698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.039983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.040930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:51 crc kubenswrapper[4125]: E0312 13:29:51.041028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.043353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.061531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.081886 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.105174 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.122366 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.138241 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.155496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.173662 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.197398 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.215286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.238414 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.258581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.279796 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.302624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.327380 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.370290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.390933 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.409448 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.437921 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.458168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.477919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.495273 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.519574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.537890 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.598062 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.624399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.644083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.662042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.677084 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.690681 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.707270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.721999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.737608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.751967 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.771972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.788260 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.805786 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.825751 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.843528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.863316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.878414 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.894487 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.912639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.935588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.966315 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:51 crc kubenswrapper[4125]: I0312 13:29:51.991615 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.007802 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.026787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.052058 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.071422 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.093687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.132051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.154373 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.221787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.247448 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.278037 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.310687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.349580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.377455 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.411576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.436528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.461771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.495274 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.518740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.551918 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.571619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.591504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.613574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.633771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.655743 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.677539 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.706706 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.737679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.757718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.778528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.805517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.825189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.849941 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.872325 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.895708 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.923616 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:52 crc kubenswrapper[4125]: I0312 13:29:52.963239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.001599 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.025167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.025263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.025396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.025443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.025455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.025632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.025806 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.026012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.026220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.026184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.026183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.026229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027088 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.027992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.028751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.029211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.029280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.029311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.029322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.029471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.029611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.029722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.029785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.029961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.030940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.031929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.032022 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.032061 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.032139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.032369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.032450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.032910 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033658 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.033759 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.034037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.054477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.080457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.116270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.158980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.196693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.234024 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.277640 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.321325 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.361772 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.397457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.437012 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.473465 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.522445 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.558928 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.612235 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.639365 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.680614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.721176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.765421 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.806509 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.846554 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.846976 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.847058 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.847157 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.847263 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:53Z","lastTransitionTime":"2026-03-12T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.852590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.880300 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.883079 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.888568 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.888663 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.888693 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.888730 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.888767 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:53Z","lastTransitionTime":"2026-03-12T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.927563 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.930290 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.936206 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.936346 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.936687 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.936917 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.937030 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:53Z","lastTransitionTime":"2026-03-12T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.963241 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.963705 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.969734 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.969912 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.970078 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.970211 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:53 crc kubenswrapper[4125]: I0312 13:29:53.970299 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:53Z","lastTransitionTime":"2026-03-12T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:53 crc kubenswrapper[4125]: E0312 13:29:53.993950 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.000917 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.001060 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.001189 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.001298 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.001394 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:29:54Z","lastTransitionTime":"2026-03-12T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.004204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: E0312 13:29:54.023293 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: E0312 13:29:54.023391 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.038500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: E0312 13:29:54.067494 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.084957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.123389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.169610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.207748 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.245306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.282671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.321491 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.363993 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.404540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.449084 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.486583 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.526531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.568476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.607329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.647459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.687290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.735010 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.761988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.807999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.842076 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.886186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.927449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:54 crc kubenswrapper[4125]: I0312 13:29:54.964076 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.002501 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.025646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.025781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.025917 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026041 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.027526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.027777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.026562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.028049 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.028149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.028248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.028412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.026999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.027875 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.028087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.028562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.028730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.028873 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.028941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.029197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.029477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.029686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.029772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.029874 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.030059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030478 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.030798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.031096 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.031318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.031483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.031646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.031775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.032085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.032292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.032481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.032639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.032706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.032964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.033085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.033433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.033563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.033624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.033765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.033955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.034505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.034578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.034673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.034772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.034930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.034996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.035052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.035395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.035287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:55 crc kubenswrapper[4125]: E0312 13:29:55.036203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.041236 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.081388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.134186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.167077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.204403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.246243 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.291930 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.322070 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.368610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.403430 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.452437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f8b43c9b2762dda48e024751afbb4593826424a2303d9ead17cf66d13967827\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:13Z\\\",\\\"message\\\":\\\".go:203] Sending *v1.Pod event handler 3 for removal\\\\nI0312 13:29:13.750970 16760 handler.go:203] Sending *v1.Pod event handler 6 for removal\\\\nI0312 13:29:13.750979 16760 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:29:13.750985 16760 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:29:13.750991 16760 handler.go:203] Sending *v1.Node event handler 2 for removal\\\\nI0312 13:29:13.750996 16760 handler.go:203] Sending *v1.Node event handler 7 for removal\\\\nI0312 13:29:13.751000 16760 handler.go:203] Sending *v1.Node event handler 10 for removal\\\\nI0312 13:29:13.751006 16760 handler.go:217] Removed *v1.Node event handler 10\\\\nI0312 13:29:13.751011 16760 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:29:13.751017 16760 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:29:13.751097 16760 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:13.751315 16760 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:29:13.751386 16760 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:29:13.751392 16760 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:29:13.751397 16760 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:29:13.751407 16760 handler.go:217] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.483931 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.530219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.562065 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.608531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.656293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.711509 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.735416 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.775773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.809941 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.844613 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.882568 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.926022 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:55 crc kubenswrapper[4125]: I0312 13:29:55.956418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:56 crc kubenswrapper[4125]: I0312 13:29:56.012281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:56 crc kubenswrapper[4125]: I0312 13:29:56.040077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:56 crc kubenswrapper[4125]: I0312 13:29:56.088495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:56 crc kubenswrapper[4125]: I0312 13:29:56.126586 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.025983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026043 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026013 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.026800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027050 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.026372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.027086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.027373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.027595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.027708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.028032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.028039 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.028342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.028379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.028789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.028926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.029173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.029204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.029387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.029482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.029628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.029639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.030031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.030069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.030225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.030234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.030349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.030500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.030573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.030658 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.031410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.031515 4125 scope.go:117] "RemoveContainer" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.031577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.031760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.031964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.032516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.032806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.033177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.033359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.033434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.033469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.033693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.033795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.034044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.034062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.033949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.034365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.034449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.034468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.034545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.034563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.034625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.035008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.035456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.035596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.035234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.035772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.036045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.036265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.036407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.036520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.036649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.036929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.037267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.037342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.037458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.037673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.037930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:57 crc kubenswrapper[4125]: E0312 13:29:57.038086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.068756 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.096718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.133964 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.161069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.191496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.256495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.287700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.315594 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.347217 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.389577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.415015 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.449562 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.480020 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.533617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.576895 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.605921 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.636079 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.664051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.693999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.757329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.806000 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.830335 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.851178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.868571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.890178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.908918 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.925764 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.949506 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.964917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:57 crc kubenswrapper[4125]: I0312 13:29:57.989240 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:57Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.031059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.060235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.088972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.120753 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.148152 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.171662 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.194432 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.218442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.240719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.260692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.290013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.312166 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.334733 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.360260 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.384451 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.413470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.435364 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.462958 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.501514 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.527237 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.561700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.588060 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.617991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.638805 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.669052 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.703660 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.737464 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.769636 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.792265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.816383 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.840276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.861606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.891682 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.917418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.949418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.975285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:58 crc kubenswrapper[4125]: I0312 13:29:58.998987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:29:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.025740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.025943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.025486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026414 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.026783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.026947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.027669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.027755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028038 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.028975 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.029012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.028980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.029554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.029637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029891 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.029950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.030465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.030695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:29:59 crc kubenswrapper[4125]: I0312 13:29:59.030779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030888 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.030956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.031010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.031062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:29:59 crc kubenswrapper[4125]: E0312 13:29:59.069936 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:00 crc kubenswrapper[4125]: I0312 13:30:00.030005 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:30:00 crc kubenswrapper[4125]: E0312 13:30:00.031007 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.025652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.025182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.026088 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.026198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.026387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.026429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.026477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.026995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.027016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.027435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.027641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.027949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.028049 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.028322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.028472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.028540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.028574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.028742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.028921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.028999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.029312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.029492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029570 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.029700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.029737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.030077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.030231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.030319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.030553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.030696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.030965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.031291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.031507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.031615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.031749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.032090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.032292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.032371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.032531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.032713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.033038 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.033275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.033491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.033677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.034203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.034366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.034312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.034438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.034509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.034587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.035003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.035007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.035251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.035421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.035688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036035 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036448 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:01 crc kubenswrapper[4125]: I0312 13:30:01.036510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.036740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.037082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.037307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.037422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.037597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.037733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.038042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:01 crc kubenswrapper[4125]: E0312 13:30:01.038315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.056950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.092922 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.124050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.158358 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.205724 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.235283 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.274971 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.307598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.339199 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.372372 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.414662 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.439227 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.478686 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.513305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.553354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.595603 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.624743 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.654243 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.682524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.706016 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.747019 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.777058 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.812339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.830407 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.857473 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.874568 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.888656 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.906950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.922474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.941196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.957993 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.974567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:02 crc kubenswrapper[4125]: I0312 13:30:02.995080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.014486 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.024960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.025165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.025205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.025400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.025496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.025508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.025653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.025757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.025979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.026168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.026304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.027294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.027492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.027624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.027783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027884 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.027954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.028159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.028246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028414 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.028544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.028668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.028729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.029013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.029088 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.029285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.029415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.029567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.030067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.030172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.030176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.030222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.030238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.030327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.030706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.030858 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.031010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.031271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.031507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.031580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.031744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.032311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.032434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.032578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.032609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.032714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.032923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033046 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.033982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:03 crc kubenswrapper[4125]: E0312 13:30:03.034995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.041246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.063291 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.084232 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.108759 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.126371 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.149983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.174885 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.203789 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.225712 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.248209 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.286779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.314246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.347564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.375439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.405922 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.428907 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.455649 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.477503 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.498685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.522343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.559166 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.585147 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.613560 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.645261 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.669631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.694298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.717697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.750376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.774170 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.794017 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.830357 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.857198 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:03 crc kubenswrapper[4125]: I0312 13:30:03.879266 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.072147 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.372404 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.372704 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.372755 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.372802 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.373011 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:04Z","lastTransitionTime":"2026-03-12T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.414741 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.432758 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.433016 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.433061 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.433271 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.433332 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:04Z","lastTransitionTime":"2026-03-12T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.461774 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.470964 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.471036 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.471056 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.471082 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.471157 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:04Z","lastTransitionTime":"2026-03-12T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.495176 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.504167 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.504367 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.504515 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.504637 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.504768 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:04Z","lastTransitionTime":"2026-03-12T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.531498 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.538806 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.539054 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.539088 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.539193 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:04 crc kubenswrapper[4125]: I0312 13:30:04.539246 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:04Z","lastTransitionTime":"2026-03-12T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.565699 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:04 crc kubenswrapper[4125]: E0312 13:30:04.565805 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025570 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.026159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026051 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.025942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026452 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.026533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.026773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.026788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.026453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.027075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.027142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.027364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.027604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.027699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.027990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.028221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.028278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.028279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.027750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.028445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.028358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.028375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.028437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.028479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.028588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.028373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.027723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.028703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.028982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:05 crc kubenswrapper[4125]: I0312 13:30:05.029300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.029468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.029585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.029798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.030010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.030357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.030582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.030798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.031344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.031460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.031630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.031802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.032419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.032793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.033288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.034074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.034412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.034621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.034979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.035271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.035502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.035777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.036293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.036362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.036474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.036613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.036711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.036955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.037903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:05 crc kubenswrapper[4125]: E0312 13:30:05.038236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.026597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.026966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.027046 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.027054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.026692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.027310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.027349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.027316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.027485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.027750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.027937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.028050 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.028277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.028362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.028392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.028459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.028643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.028741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.028944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.028953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.029146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.029253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.029281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.029158 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.029397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.029409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.029525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.029621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.029751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.030026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.030037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.030240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.030274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.030346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.030360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.030396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.030645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.030701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.030955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.031198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.031374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.031443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.031525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.031637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.031784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.032031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.032197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.032330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.032465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.032518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.032600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.032711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.032770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.033065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.033244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.033387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.033463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.033549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.033658 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.033730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.033790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.033749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.034241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.034287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.034384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.034460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.034585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.034682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.034732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.035333 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.035507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.035652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.035911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.036930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.037033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.037171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:07 crc kubenswrapper[4125]: I0312 13:30:07.037958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:07 crc kubenswrapper[4125]: E0312 13:30:07.038210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.026229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.026367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.026243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.026722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.026779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.026801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.026990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.027251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.027506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.027573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.027644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.027657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.027735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.027757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.027993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028021 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.028390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.028785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.028980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.029058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.029327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.029598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.029602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.029755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.031387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.031665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.031508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.032576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.033324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.033441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.033705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.033962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.034213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.034265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.033984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.034065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.034547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.034187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.034661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.034666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.034702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.034796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.034736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.035018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.035070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.035380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.035678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.035744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.036357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.036648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.037234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.037240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.037336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.037399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.037476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.037641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.037948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.037962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.038220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.038285 4125 scope.go:117] "RemoveContainer" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.039005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.039306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.039386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.039469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.039573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.039739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.040459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.040551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.040655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.040673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.040994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.041048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.041307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.042776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.043055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.043482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.043627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.043721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.043805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.044015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.044261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:09 crc kubenswrapper[4125]: E0312 13:30:09.075155 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.229189 4125 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 12 13:30:09 crc kubenswrapper[4125]: I0312 13:30:09.229486 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.025361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.025477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.025763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.025806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.025973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.026262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.026308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.026456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.026589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.026601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.026695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.026963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.026969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.027151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.027312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.027315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.027513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.027630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.027653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.027713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.027985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.025433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.028200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.028334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.028338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.028475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.028717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.028981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.029193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.029271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.029367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.029685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.029756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.029996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.030487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.030742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.031189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.031336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.031409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.031482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.031492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.031752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.031751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.031994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.032296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.032364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.032362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.032395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.032510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.032521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.032637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.032674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.032514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.033213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.033433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.033459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.033557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.033997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.034041 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.034329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.034536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.034598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.035209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.035338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.035560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.035788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.036212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.036314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.036441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.036505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.036631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.036733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.036971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.037230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.037290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.037382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.037527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.037604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.037770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.038083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.038274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.038411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.038445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.038767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.039284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.039461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.039667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:11 crc kubenswrapper[4125]: I0312 13:30:11.039703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.040066 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.040247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.040446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.040757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.041275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.041282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.041747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:11 crc kubenswrapper[4125]: E0312 13:30:11.041996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.059296 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.107465 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.144355 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.188701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.224435 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.258540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.320367 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.353003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.385460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.425326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.462168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.492956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.530496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.567635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.594070 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.629188 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.659008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.688349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.735443 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.765038 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.802018 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.835806 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.859236 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.888774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.922721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.946189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:12 crc kubenswrapper[4125]: I0312 13:30:12.975782 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.006136 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.025904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.026173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.026440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.026590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.027192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.027399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.027644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.027723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.027743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.027655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.028200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.027422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.028483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.028937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.029176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.029488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.029768 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.029781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.029999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.030986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.031762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.031902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.032040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.032506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.032689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.033020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.033062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.033341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.033358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.033510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.033517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.033633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.033736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.033912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.034358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.034422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.034526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.034533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.034717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.034965 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.034987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.035662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.035983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.036430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.036968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.037025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.037571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.037748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.038970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.039049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.039192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.039336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.039487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.043360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.045143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.045260 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: E0312 13:30:13.052662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.079134 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.105230 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.129150 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.152749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.177564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.196199 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.222126 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.243708 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.267354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.288407 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.316139 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.337570 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.354575 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.379031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.414714 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.447968 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.483757 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.505139 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.523877 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.540945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.560021 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.577420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.599013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.616632 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.642642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.672007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.695538 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.714747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.738282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.763582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.783691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.811631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.837283 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.860642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.884320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.920185 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.946053 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:13 crc kubenswrapper[4125]: I0312 13:30:13.971687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.079381 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.593263 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.593406 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.593440 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.593477 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.593525 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:14Z","lastTransitionTime":"2026-03-12T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.623191 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.633186 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.633356 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.633386 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.633425 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.633461 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:14Z","lastTransitionTime":"2026-03-12T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.666187 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.676287 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.676558 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.676782 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.677220 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.677531 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:14Z","lastTransitionTime":"2026-03-12T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.706314 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.715033 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.715441 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.715662 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.715971 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.716342 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:14Z","lastTransitionTime":"2026-03-12T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.747155 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.756744 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.757062 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.757355 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.757444 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:14 crc kubenswrapper[4125]: I0312 13:30:14.757497 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:14Z","lastTransitionTime":"2026-03-12T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.785736 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:14 crc kubenswrapper[4125]: E0312 13:30:14.786063 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.027015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.027049 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.026329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.027793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.029377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.029765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.030139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.030930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.030944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.031216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.031219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.031272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.031294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.031583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.031680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.032029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.032244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.032317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.032383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.032610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.033024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.033049 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.033572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.033578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.033715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.034191 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.034407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.034428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.034468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.034504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.033374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.035211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.035293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.035363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.035462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.035604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.036791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.037027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.037180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.037213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.037300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.037384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.037432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.037569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.037660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.037676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.037990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.038235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.038374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.038575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.038730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.035630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.035718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.036080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.036312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.036335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.036345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.036615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.036716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.039207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:15 crc kubenswrapper[4125]: I0312 13:30:15.039270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.039444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.039575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.040656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.041078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.041260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.041454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.041602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.041755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.041982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.042186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.042300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.042454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.042644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.042805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:15 crc kubenswrapper[4125]: E0312 13:30:15.043030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.025745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025880 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.025986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.026392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.026496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.026691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.026783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.027011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.027196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.027257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.027118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.027647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.027616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.027740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.027785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.027933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.027965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028872 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028067 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.027976 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.029084 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.029256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.028314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.028432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028470 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.029589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.029683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.029791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.028527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.028558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.028657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.028862 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.029530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.030291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.030443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.030591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.030648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.030770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.030966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.030973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.031120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.031481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.031598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.031602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.031742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.031959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.032192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.032417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.032565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.032746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.033021 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.033231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.033441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.033518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.033906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.034156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.034245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:17 crc kubenswrapper[4125]: I0312 13:30:17.034284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.034406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.034555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.034748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.034965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.035059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.035234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:17 crc kubenswrapper[4125]: E0312 13:30:17.035431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.025920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.026710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026877 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.026924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.027033 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.027268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.027392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.027410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.027487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026289 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.027580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.027741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.027055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.026249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.028527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.028583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.028757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.028997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.029371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.029424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.029509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.029594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.029654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.029754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.029804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.029955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.030321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.030990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.031197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.031349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.031447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.031494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.031705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.031782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.032306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.032414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.032490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.032689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.032888 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:19 crc kubenswrapper[4125]: I0312 13:30:19.033379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.033941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:19 crc kubenswrapper[4125]: E0312 13:30:19.081967 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.024717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.024767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.024980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.024950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.025438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.027480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.027605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:21 crc kubenswrapper[4125]: I0312 13:30:21.025816 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.027760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.028060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.028230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.028536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.028754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.029170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.029388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.029531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.029710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.030002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.030434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.030776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.031070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.031245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.031315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.031397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.031538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.031639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.032050 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.032312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.032499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.034450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.036551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.036970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037372 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.037774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.038032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:21 crc kubenswrapper[4125]: E0312 13:30:21.038217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.059952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.086669 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.127143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.145392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.169186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.196509 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.216676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.239651 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.265239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.299416 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.325676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.353318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.383727 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.404203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.423467 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.444876 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.461977 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.482930 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.503280 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.530350 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.554072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.583564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.615261 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.637045 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.661415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.682685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.697320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.719053 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.736915 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.753409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.774726 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.793621 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.809242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.830901 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.855052 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.873507 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.891475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.920209 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.951547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:22 crc kubenswrapper[4125]: I0312 13:30:22.980667 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.000189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.024895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.024996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.025044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.025149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.025167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.025205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.025291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.025373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.025398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.026661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.026908 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.027288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.027685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.028103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.028221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.028243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.028423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.029192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.030156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.030355 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.030369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.030420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.030512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.030573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.030698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.030762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.030956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.031110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.031281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.031315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.031500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.031699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.031787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.033273 4125 scope.go:117] "RemoveContainer" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.034982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.038289 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.038299 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.039426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.040464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.041339 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.041464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.041346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.041352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.041512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.041579 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.041614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.041696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.041714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.041729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.041785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.042183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.042524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.042606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.042761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.043485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.043602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.043740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.043920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.044039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.044342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.044431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.044509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.044649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.044705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.045399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.045520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.045604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.045731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.045792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.046442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.046582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.046715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.046925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.047427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047886 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.047901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.047942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.047980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.048042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.048196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:23 crc kubenswrapper[4125]: E0312 13:30:23.048489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.059281 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.084690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.109206 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.128199 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.145303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.164004 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.178758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.197300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.218263 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.242211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.271525 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.297448 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.325308 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.368479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.403572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.428371 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.458286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.491315 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.520157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.567554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.600379 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.632224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.668521 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.699454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:23 crc kubenswrapper[4125]: I0312 13:30:23.723326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:24 crc kubenswrapper[4125]: E0312 13:30:24.083708 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.014491 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.015327 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.015472 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.015592 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.015693 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:25Z","lastTransitionTime":"2026-03-12T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.025393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.026520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.026577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.026683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.026727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.026874 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.026896 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.026970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.026943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.027994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028023 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.027958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.028272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.028131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.028383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.028536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.028697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.028771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.028939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029025 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.029174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.029248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.029393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.029546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.029611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.029765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.029997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.030797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.030974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.031964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.032117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.032172 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.032241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.032402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.032523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.032631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.039400 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:25Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.045109 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.045169 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.045190 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.045217 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.045245 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:25Z","lastTransitionTime":"2026-03-12T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.067658 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:25Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.073579 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.073892 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.074446 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.074574 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.074694 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:25Z","lastTransitionTime":"2026-03-12T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.101329 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:25Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.108319 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.108379 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.108400 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.108424 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.108451 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:25Z","lastTransitionTime":"2026-03-12T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.126981 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:25Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.135361 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.135399 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.135412 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.135439 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:25 crc kubenswrapper[4125]: I0312 13:30:25.135474 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:25Z","lastTransitionTime":"2026-03-12T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.155309 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:25Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:25 crc kubenswrapper[4125]: E0312 13:30:25.155429 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:30:26 crc kubenswrapper[4125]: I0312 13:30:26.028959 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:30:26 crc kubenswrapper[4125]: E0312 13:30:26.030037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.028303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.028761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.026997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.029228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.027354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.029533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.028581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.029014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.029347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.029653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:27 crc kubenswrapper[4125]: I0312 13:30:27.029690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.030161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.030320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.030461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.030567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.030729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.030945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.031320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.031510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.031703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.031777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.031805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.032148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.032399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.032594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.033906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.034026 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.034271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.034446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.034934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.034674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.035921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.036153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.036227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.036423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.036526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.036668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.036885 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:27 crc kubenswrapper[4125]: E0312 13:30:27.037038 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.025616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.025706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.026148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.025640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.026582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.026777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.026150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.027211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.027332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.027419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.027452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.027509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.027688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.027705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.028345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028030 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.028766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.028618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.030178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.030622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.030910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.031237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.031373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.031650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.032232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.032450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.033249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.034057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.034488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.034712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.035118 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.035611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.037013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.037507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.037725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.038459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.039052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.039258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.040180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.040536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.040726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.041216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.041611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.041800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.043020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.043212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.043598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.043804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.044397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.044742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.045039 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.045298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.045431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.045611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.045733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.046062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.046221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.047217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.047417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.047637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.047714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.047670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.048124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.048217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.048586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.048715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.049029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.049335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.049446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.049949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.050463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.051188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:29 crc kubenswrapper[4125]: I0312 13:30:29.051305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.051405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.051720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.052359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.055278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:29 crc kubenswrapper[4125]: E0312 13:30:29.086771 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.025204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.025620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.026244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.026664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.026712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.026995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.027037 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.027301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.027435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.027644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.027683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.027729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.027780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.028146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.028307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.028487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.028655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.028946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.028957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.029302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.029574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.029796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.030018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.030269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.030462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.030700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.030918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.030993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.031285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.031415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.031578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.031734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.031973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.032038 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.032275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.032304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.032527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.032779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.033141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.034688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.035150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.035464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.035741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.036005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.036308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.036448 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.036592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.036719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.036936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.037260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.037427 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.037558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.037612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.037770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.038030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:31 crc kubenswrapper[4125]: E0312 13:30:31.039154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.421496 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.421698 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.421761 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.421971 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:30:31 crc kubenswrapper[4125]: I0312 13:30:31.422040 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.057025 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.078268 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.105539 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.131445 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.151159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.169287 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.193646 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.224738 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.243538 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.261927 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.280222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.299866 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.319378 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.344883 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.358542 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.396370 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.412795 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.432788 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.450461 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.467229 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.485351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.499415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.515949 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.531925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.565489 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.575742 4125 generic.go:334] "Generic (PLEG): container finished" podID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerID="a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec" exitCode=0 Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.575958 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerDied","Data":"a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec"} Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.576014 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a"} Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.592627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.613650 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.634482 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.655006 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.677159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.702692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.722692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.742803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.760456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.775725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.795456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.812539 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.837664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.862046 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.883653 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.888777 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.889708 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.889929 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.919204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.939752 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.957982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.977694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:32 crc kubenswrapper[4125]: I0312 13:30:32.996060 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.017269 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.025677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.025953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.025988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026116 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.026761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.026956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.027126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.027280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.027420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.027613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027872 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.027876 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.027923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.028275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.028453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.028908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.028965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.029052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.029154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.029250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.029332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.029440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.029635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.029784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.030008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.030135 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.030567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.030721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.030912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.030963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031026 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.031059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:33 crc kubenswrapper[4125]: E0312 13:30:33.031887 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.049990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.068334 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.088790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.108612 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.130114 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.156979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.176688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.197750 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.217496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.245015 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.287005 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.314846 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.331243 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.349293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.367444 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.383277 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.401482 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.418343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.436566 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.452410 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.468885 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.484231 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.509237 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.526435 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.544207 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.561026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.582998 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.601686 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.620573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.641290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.664316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.690171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.711435 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.726792 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.741602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.757761 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.777629 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.796023 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.815470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.837707 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.867024 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.885888 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.889326 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.889473 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.903701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.922734 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.939345 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.957154 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:33 crc kubenswrapper[4125]: I0312 13:30:33.983209 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.017479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.041260 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.058700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.078957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: E0312 13:30:34.088530 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.100671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.122633 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.154892 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.176320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.197488 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.219567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.241009 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.277301 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.306937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.324436 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.368975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.422464 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.442896 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.488459 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.523401 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.566272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.607272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.649407 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.686224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.728054 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.767159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.812754 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.863250 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.888451 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.888631 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.898434 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.925183 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:34 crc kubenswrapper[4125]: I0312 13:30:34.968924 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.022424 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.025904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.025958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.026671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.026678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.027064 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.027328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.027621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.027725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.029521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.029600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.029650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.029765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.030194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.030315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.030236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.030797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.031131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.031730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.032258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.032562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.032652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.032923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.033731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.034404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.034670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.034700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.035181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.035366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.035441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.035690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.035709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.036019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.036293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.036482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.036686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.037165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.037365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.038476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.039042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.039378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.039656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.040190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.040407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.040501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.040673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.040925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.041166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.041357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.041455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.041549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.041707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.041997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.042198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.042390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.042555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.042693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.043292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.043501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.043656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.043914 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.044279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.044991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.045766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.044766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.046134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.046155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.046357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.046476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.046683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.047321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.047522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.047661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.047696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.051459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.058616 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.082628 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.124403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.166934 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.203501 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.244944 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.285324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.322332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.361806 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.410129 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.562947 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.563051 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.563147 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.563191 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.563237 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:35Z","lastTransitionTime":"2026-03-12T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.605699 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.616018 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.616200 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.616237 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.616274 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.616314 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:35Z","lastTransitionTime":"2026-03-12T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.656555 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.666461 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.666530 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.666556 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.666592 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.666637 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:35Z","lastTransitionTime":"2026-03-12T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.690353 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.697705 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.698035 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.698260 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.698412 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.698543 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:35Z","lastTransitionTime":"2026-03-12T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.725954 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.734372 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.734528 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.734707 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.734755 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.734895 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:35Z","lastTransitionTime":"2026-03-12T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.769423 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:35 crc kubenswrapper[4125]: E0312 13:30:35.769493 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.888235 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:35 crc kubenswrapper[4125]: I0312 13:30:35.889476 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.029501 4125 scope.go:117] "RemoveContainer" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.606930 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/3.log" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.616116 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/2.log" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.616905 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/1.log" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.616977 4125 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0" exitCode=1 Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.617020 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0"} Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.617104 4125 scope.go:117] "RemoveContainer" containerID="88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.617739 4125 scope.go:117] "RemoveContainer" containerID="4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0" Mar 12 13:30:36 crc kubenswrapper[4125]: E0312 13:30:36.618338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.649934 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.672480 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.695390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.714663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.733925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.751243 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.787737 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.808055 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.834624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.852772 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.867172 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.888574 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.888663 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.901857 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.920798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.938595 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.959045 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.975778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:36 crc kubenswrapper[4125]: I0312 13:30:36.997676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.015571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.025603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.025769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.025943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.025589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026027 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.026031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026050 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026134 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026147 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.026146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.026230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.026368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026532 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.026546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.026902 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.026955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.027162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.027275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.027322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.027645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.027725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027891 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.027963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.028286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.028423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.028655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.028753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028886 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.028968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.029156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029355 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029859 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.029977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030053 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030372 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:37 crc kubenswrapper[4125]: E0312 13:30:37.030506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.039097 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.057008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.077176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.100990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.131475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.152475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.188376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.222146 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.246886 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.271647 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.301554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.332182 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.355925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.375559 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.401196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.424263 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.446364 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.470969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.495014 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.564991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.596751 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.622559 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/2.log" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.623029 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.626345 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/3.log" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.631263 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360"} Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.633538 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.655989 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.681625 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.699270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.723136 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.739044 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.755697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.773225 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.790013 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.805547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.822779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.845876 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.863420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.880731 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.885574 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.885895 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.900477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:37 crc kubenswrapper[4125]: I0312 13:30:37.980512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.005906 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.035231 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.054253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.070363 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.089457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.108192 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.137283 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.153585 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.173739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.192105 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.208770 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.233335 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.255140 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.282988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.297968 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.314020 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.333688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.354027 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.372666 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.403710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.444438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.490681 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.521424 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.572957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.606022 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.636790 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/4.log" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.638124 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/3.log" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.642573 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" exitCode=1 Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.642641 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360"} Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.642954 4125 scope.go:117] "RemoveContainer" containerID="117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.644269 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:30:38 crc kubenswrapper[4125]: E0312 13:30:38.644868 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.659235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.681908 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.721172 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.763022 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.806208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.845794 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.884405 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.887430 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.889274 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.889445 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.932204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:38 crc kubenswrapper[4125]: I0312 13:30:38.968468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.004721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:38Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.024898 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.028551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.029254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.029980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.030930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.031925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.032672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025448 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.033297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.034016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.034188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.034422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.034962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.035244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.035407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.027985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.029721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.035529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.035545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.035687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.035800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.036087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.036179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.036280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.036376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.036590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.036949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.037170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.037441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.037717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.037985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.038251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.038399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.038628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.038953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.039253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.039418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.039521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.048778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.087208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.093354 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.119339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.206215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.222270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.227676 4125 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.227795 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.246804 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.278474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.322655 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.360895 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.411207 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.456364 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://117d19f6844b87e5fb8f4b0d8b429ab1610ad6eccb60d31090d4f1c8659720d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:44Z\\\",\\\"message\\\":\\\"objects of type *v1.EgressFirewall\\\\nI0312 13:29:44.479148 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPPod\\\\nI0312 13:29:44.479173 17386 egressqos.go:302] Shutting down EgressQoS controller\\\\nI0312 13:29:44.479212 17386 admin_network_policy_controller.go:299] Shutting down controller default-network-controller\\\\nI0312 13:29:44.479213 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressIPNamespace\\\\nI0312 13:29:44.479185 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *factory.egressFwNode\\\\nI0312 13:29:44.479225 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.Pod\\\\nI0312 13:29:44.479191 17386 egressservice_zone.go:261] Shutting down Egress Services controller\\\\nI0312 13:29:44.479231 17386 obj_retry.go:432] Stop channel got triggered: will stop retrying failed objects of type *v1.NetworkPolicy\\\\nI0312 13:29:44.479311 17386 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:29:44.479405 17386 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:29:44.479444 17386 metrics.go:552] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI0312 13:29:44.479449 17386 reflector.go:295] Stopping\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.485131 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.523480 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.573321 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.605362 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.649472 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/4.log" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.652220 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.655684 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:30:39 crc kubenswrapper[4125]: E0312 13:30:39.656308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.680944 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.723408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.762458 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.806527 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.853726 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.880489 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.885242 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.885383 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.921385 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.959985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:39 crc kubenswrapper[4125]: I0312 13:30:39.998861 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:39Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.027276 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:30:40 crc kubenswrapper[4125]: E0312 13:30:40.027678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.042950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.090999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.120038 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.159205 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.202359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.242898 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.289153 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.331866 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.363179 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.414408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.444676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.487447 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.530624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.570316 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.611602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.654257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.689731 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.735319 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.779994 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.816494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.849663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.889184 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.889347 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.890960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.931234 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:40 crc kubenswrapper[4125]: I0312 13:30:40.983194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.020115 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.025762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.026196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.026474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.026710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.026951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.027131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.027333 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.027693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.027728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.028155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.028239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.028161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.028354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.028406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.028433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.028536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.028372 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.028544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.028561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.025320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.028878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.029205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.029311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.029420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.030293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.030299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.029459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.029664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.029711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.029766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.029892 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.029930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.029960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.029986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.030218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.029517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.030519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.030559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.030911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.031204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.031409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.031484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.031744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.031996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.032048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.032213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.032276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.032412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.032545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.032620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.032788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.033015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.033207 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.033358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.033365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.033419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.033659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.033935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.034151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.034250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.034407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.034613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.034797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.035180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.035309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.035511 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.035677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.036013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.036141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:41 crc kubenswrapper[4125]: E0312 13:30:41.036299 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.049733 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.086569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.122748 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.170343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.213624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.252603 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.292169 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.331481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.365725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.406004 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.459332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.486410 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.525804 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.562176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.612606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.647922 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.696229 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.728904 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.778450 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.809133 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.850615 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.889432 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.890415 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.892870 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.923731 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:41 crc kubenswrapper[4125]: I0312 13:30:41.969095 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:41Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.015185 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.049286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.084787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.124737 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.171332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.215369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.250125 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.285315 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.330324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.364788 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.443518 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.475656 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.512264 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.552306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.585981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.622573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.652996 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.697148 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.729789 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.776500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.811397 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.849630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.887390 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.887595 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.892497 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.926659 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:42 crc kubenswrapper[4125]: I0312 13:30:42.978520 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.008044 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.026988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027050 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027111 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.027319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.031690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.032161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.032411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.032653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.032978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.032981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.033364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.033499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.033615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.034049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.034219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.034354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.034361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.034602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.034759 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.035201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.036109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.036578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.037151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.037265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.037460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.037782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.038231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.038378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.038508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.038788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.039205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.039316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.039514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.039651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.040018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.040418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.040624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.041366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.041450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.041520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.041604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.041917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.042183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.042250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.042389 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.042775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.043183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.043316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.043498 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.043657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.043980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.044360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:43 crc kubenswrapper[4125]: E0312 13:30:43.044496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.057685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.096418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.133997 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.179475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.209027 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.245526 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.289650 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.332683 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.369991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.410454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.445493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.484241 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.529711 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.574309 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.610325 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.653582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.694750 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.726649 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.770945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.815282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.850276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.888501 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.888743 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.898591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.935320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:43 crc kubenswrapper[4125]: I0312 13:30:43.973621 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.000912 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.043083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.081526 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: E0312 13:30:44.095620 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.124657 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.159798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.212969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.245347 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.282105 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.333403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.360501 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.412122 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.473585 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.490785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.525483 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.561383 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.598493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.636192 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.685234 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.723114 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.763337 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.813420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.847780 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.883734 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.888147 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.888332 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.929042 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:44 crc kubenswrapper[4125]: I0312 13:30:44.963347 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:44Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.017386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.025961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026301 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026902 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.026949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.027742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.027199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.027330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.027964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.028016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.028035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.027550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.027606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.026173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.028296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.028586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.028645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.028700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.028745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.028787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.028798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.028918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.028988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.029002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.029271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.029366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.029379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.029377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.029413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.029426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.029640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.030022 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.030321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.030487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.030722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.030806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.030997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.031761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.031896 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.031973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.032024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.032158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.032158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.032339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.032623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.032955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.032995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.033218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.033394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.033541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.033682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.034739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.035039 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.035438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.035614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.035789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.036132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.053602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.081739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.149190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.173715 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.200177 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.243267 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.277245 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.325767 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.366540 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.401597 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.443090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.491439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.524721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.564719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.602708 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.647984 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.691109 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.725484 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.768339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.813383 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.834798 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.834987 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.835021 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.835120 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.835171 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:45Z","lastTransitionTime":"2026-03-12T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.852506 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.864728 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.873131 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.873217 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.873240 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.873326 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.873375 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:45Z","lastTransitionTime":"2026-03-12T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.886378 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.886492 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.890365 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.896803 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.903971 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.904167 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.904208 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.904248 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.904297 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:45Z","lastTransitionTime":"2026-03-12T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.924394 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.930626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.932462 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.932596 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.932630 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.932665 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.932701 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:45Z","lastTransitionTime":"2026-03-12T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.954573 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.961417 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.961535 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.961573 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.961612 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.961662 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:45Z","lastTransitionTime":"2026-03-12T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:45 crc kubenswrapper[4125]: I0312 13:30:45.971754 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.982467 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:45Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:45 crc kubenswrapper[4125]: E0312 13:30:45.982516 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:30:46 crc kubenswrapper[4125]: I0312 13:30:46.011691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88adc00978181250cb6e8c3aeeb95f7c6c9a301f3c0f5567a7bd2d48328b4b7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:29:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:28:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b\\\\n2026-03-12T13:28:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dbde7fff-6ab3-4f7f-9cc6-f0982300347b to /host/opt/cni/bin/\\\\n2026-03-12T13:28:51Z [verbose] multus-daemon started\\\\n2026-03-12T13:28:51Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:29:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:46 crc kubenswrapper[4125]: I0312 13:30:46.042141 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:46 crc kubenswrapper[4125]: I0312 13:30:46.080315 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:46 crc kubenswrapper[4125]: I0312 13:30:46.130728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:46Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:46 crc kubenswrapper[4125]: I0312 13:30:46.888349 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:46 crc kubenswrapper[4125]: I0312 13:30:46.888569 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.026169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.026894 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.027015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027672 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028017 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.027738 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.028369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.028952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.029221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.029394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.029737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.030199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.030444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.030757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.031798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.032177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.032332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.032461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.032595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.032691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.032713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.033135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.033241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.033322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.033470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.035530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.035977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.036217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.036381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.036803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.037221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.037384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.037556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.037712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.038015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.038806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.039588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.039761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.039600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.040362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.040765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.041397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.042027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.042421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.043217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.043491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.043676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:47 crc kubenswrapper[4125]: E0312 13:30:47.044010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.888974 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:47 crc kubenswrapper[4125]: I0312 13:30:47.889218 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:48 crc kubenswrapper[4125]: I0312 13:30:48.887350 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:48 crc kubenswrapper[4125]: I0312 13:30:48.887568 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.025130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.025595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.025611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.026590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.026643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.026982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.027236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.027258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.027305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.027502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.027516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.027632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.027642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.027707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.027970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.028307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.028607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.028753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.028947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.029200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.029254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.029406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.029780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.030195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.030341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.030390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.030397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.030474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.029794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.030662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.030946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.030946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.031278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.031331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.031427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.031534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.031429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.031640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.031666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.031661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.031790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.032031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.032151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.032214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.032249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.032380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.031921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.032532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.032594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.032717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.032919 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.033043 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.033213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.033212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.033484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.033635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.034408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.034700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.035012 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.035175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.035219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.035551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.035683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.035696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.035937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.036034 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.036170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.036471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.036562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.036624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.036700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.037007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.037127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.037211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.037129 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.037310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.037414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.037555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.037693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.037962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.038307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.038482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.038597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.038722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:49 crc kubenswrapper[4125]: E0312 13:30:49.098487 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.887521 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:49 crc kubenswrapper[4125]: I0312 13:30:49.887717 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:50 crc kubenswrapper[4125]: I0312 13:30:50.887319 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:50 crc kubenswrapper[4125]: I0312 13:30:50.887478 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.025419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.025595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.025686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.025789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025889 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.025936 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.026025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026476 4125 scope.go:117] "RemoveContainer" containerID="4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.026639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.026745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.026946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.026992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.027165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.027483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.028111 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.028196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.028230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.028257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027796 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.028715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.028804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.028732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.027912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.027867 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.028758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029891 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.029939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.029999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.030002 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.030194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.030474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.030534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.030608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.030774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.030958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.031120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.031461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.031590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.031659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.031683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.031595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.031929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.032158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.032750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:51 crc kubenswrapper[4125]: E0312 13:30:51.033789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.050482 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.070301 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.091735 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.120223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.148308 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.168025 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.187140 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.207174 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.234303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.262423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.285622 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.307718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.338471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.369632 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.398800 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.421904 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.445313 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.471130 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.500322 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.525576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.552649 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.582751 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.605965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.631767 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.654285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.676596 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.700490 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.726441 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.749751 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.784892 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.805969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.828303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.850534 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.872890 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.885731 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.885895 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.902631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.925691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.951510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:51 crc kubenswrapper[4125]: I0312 13:30:51.989883 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:51Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.011990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.040033 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.060789 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.083537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.116550 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.144306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.170169 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.214740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.239723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.271569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.306175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.338945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.375663 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.424602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.457577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.482766 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.514178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.531362 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.551006 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.573954 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.591494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.620169 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.639443 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.664104 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.691737 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.716169 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.743690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.767985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.792314 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.826370 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.854664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.884147 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.887679 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.887967 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.904359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.926744 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.955514 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:52 crc kubenswrapper[4125]: I0312 13:30:52.978648 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.009371 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.026386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.026571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.026753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.027191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.027658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.028016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.028368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.028648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028917 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.028983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.029083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.029324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.029494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029922 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.029799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.030332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.030427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.030536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.030643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.030682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.030729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.030764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.030915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.031192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.031234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.031476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.031544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.031582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.031685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.031743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.031970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.032258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.032524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.032805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.032928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.033175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.033214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.033305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.033499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.033558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.033672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.033689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.033723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.033799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.033991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.034492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.034642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.034970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.035075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.035168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.035218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.035323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.035424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.035754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.035923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036869 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.036972 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:53 crc kubenswrapper[4125]: E0312 13:30:53.037092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.043252 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.063313 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.082580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.108502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.135915 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.157971 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.180435 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.200900 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.236671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.259977 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.285611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.324009 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.347983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.380562 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.411255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.445155 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.479234 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.505610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.535322 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.569596 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.594451 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.625081 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.656694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.682582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.706794 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.731087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.755464 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.788446 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.811761 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.833377 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.857256 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.882487 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.887156 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.887268 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.905959 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.932619 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.958995 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:53 crc kubenswrapper[4125]: I0312 13:30:53.990981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.012167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.028384 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:30:54 crc kubenswrapper[4125]: E0312 13:30:54.030010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.042098 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.075974 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: E0312 13:30:54.101150 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.103210 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.129392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.149025 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.175239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.194581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.219710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.245090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.262108 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.283119 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.301672 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.331422 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.354114 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.383691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.415618 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.437473 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.461168 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.519380 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.540684 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.569590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.590613 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.888345 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:54 crc kubenswrapper[4125]: I0312 13:30:54.889178 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.026673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.026804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.027112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.027187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.027284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.027315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.027512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.027575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.027672 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.026693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.027993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.026763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.028231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.028472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.028668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.028918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.028958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029109 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.029314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.029485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.029722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.029991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.030090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.030262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.030450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.030638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.030772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.031092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.031337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.031404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.031446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.031488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.031530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.031750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.032399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.032465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.032613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.032983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.033693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.034278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.034503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.034728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.034917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.035945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.036951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.037210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:55 crc kubenswrapper[4125]: E0312 13:30:55.037386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.887207 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:55 crc kubenswrapper[4125]: I0312 13:30:55.887980 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.302109 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.302317 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.302354 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.302391 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.302437 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:56Z","lastTransitionTime":"2026-03-12T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:56 crc kubenswrapper[4125]: E0312 13:30:56.333192 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.343589 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.343725 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.343767 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.343962 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.344109 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:56Z","lastTransitionTime":"2026-03-12T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:56 crc kubenswrapper[4125]: E0312 13:30:56.374158 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.382382 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.382669 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.383121 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.383396 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.383595 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:56Z","lastTransitionTime":"2026-03-12T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:56 crc kubenswrapper[4125]: E0312 13:30:56.443168 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.450518 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.450965 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.451266 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.451427 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.451598 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:56Z","lastTransitionTime":"2026-03-12T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:56 crc kubenswrapper[4125]: E0312 13:30:56.478353 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.482869 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.483024 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.483155 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.483365 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.483714 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:30:56Z","lastTransitionTime":"2026-03-12T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:30:56 crc kubenswrapper[4125]: E0312 13:30:56.512713 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:30:56Z is after 2024-12-26T00:46:02Z" Mar 12 13:30:56 crc kubenswrapper[4125]: E0312 13:30:56.512796 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.887947 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:56 crc kubenswrapper[4125]: I0312 13:30:56.888224 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.025606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.025688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.025739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.025692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.025648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.026112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.026225 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.026379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.025633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.026709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.026733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.026958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.026965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.027254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.027339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.027431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.027622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.027760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.027798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.028072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.027937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.028141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.028414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.028743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.028799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.029327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.029426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.029454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.029362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.029391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.029763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.029791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.029940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.029985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.030090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.030359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.030428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.030475 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.030553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.030562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.030591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.030705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.031286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.031343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.031347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.031756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.031895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.031903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.031986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.032070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.032516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.032683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.032796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.032900 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.033028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.033150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.033176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.034962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.035245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.035393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.035539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:57 crc kubenswrapper[4125]: E0312 13:30:57.035679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.888323 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:57 crc kubenswrapper[4125]: I0312 13:30:57.888550 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:58 crc kubenswrapper[4125]: I0312 13:30:58.887531 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:58 crc kubenswrapper[4125]: I0312 13:30:58.887729 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.024945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.024991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.025419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.025661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.025507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026206 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.026297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.026561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026893 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.026921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.026964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.027427 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.027467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.027586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.027695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.027697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.027732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.028124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.028426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.028553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.028737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.028779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.028790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.029019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.029144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.029076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.029314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.029421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.029459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.029581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.029611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.029724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.030003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.030100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.030135 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.030117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.030172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.030190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.030228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.030373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.033479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.033788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.034009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.034229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.034389 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.034596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.034736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.035184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.035335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.035503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.035638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.036011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.036273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.036463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.036596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.036795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.037029 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.037309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.037500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.037637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.037983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.038169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.038313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.038476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.038629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.038783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.039180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.039327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.039524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.039652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:30:59 crc kubenswrapper[4125]: E0312 13:30:59.105520 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.889969 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:30:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:30:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:30:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:30:59 crc kubenswrapper[4125]: I0312 13:30:59.892099 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:00 crc kubenswrapper[4125]: I0312 13:31:00.887118 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:00 crc kubenswrapper[4125]: I0312 13:31:00.887288 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.025674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.026088 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.025347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.026232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.026115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.026743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.026797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.027109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.027386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.027623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.027987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028023 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.027989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028118 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.028434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.028507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.028651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.028771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028876 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.028954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.029178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.029301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.029385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.029416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.029512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.029553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.029989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.030268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.030429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.030630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.030689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.030716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.030890 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.030966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.031383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.031464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.031652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.031916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032901 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.032952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.033007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.033021 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.033229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.033403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.033537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.033629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.033945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.034181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.034195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.034249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.034566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:01 crc kubenswrapper[4125]: E0312 13:31:01.034638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.888695 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:01 crc kubenswrapper[4125]: I0312 13:31:01.889006 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.044771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.074590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.102512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.142145 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.175695 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.212991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.250783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.274378 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.302123 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.336987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.360635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.394865 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.418775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.443160 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.468361 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.488023 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.517180 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.534725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.566722 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.592474 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.618561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.642453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.670775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.701290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.728386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.757980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.786342 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.824235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.858075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.885206 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.887572 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.888396 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.913268 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.940311 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:02 crc kubenswrapper[4125]: I0312 13:31:02.974089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.005691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026135 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.027700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.027883 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.028273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.028373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.028433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026889 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.028939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.029114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.029130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.029233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027066 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.029393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.029459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.027525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.026696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.029603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.029930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.030014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.030093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.030208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.030316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.030367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.030488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.030606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.030615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.030689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.030780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.031011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.031689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.031221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.031437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.031461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.027222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.034205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.038171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.038433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.038614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.038798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.039135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.039312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.039472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.039696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.040939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.041288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.041510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.041708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.042423 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.042502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.043000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.043339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:03 crc kubenswrapper[4125]: E0312 13:31:03.043506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.050698 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.079388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.110909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.153746 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.181118 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.211602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.237358 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.260116 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.298591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.329889 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.354942 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.380623 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.404403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.428534 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.453226 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.485159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.520357 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.552986 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.588285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.618636 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.644796 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.672235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.701433 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.724940 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.751566 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.775238 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.805580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.838740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.879727 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.888415 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.888513 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.909265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.940803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.966783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:03 crc kubenswrapper[4125]: I0312 13:31:03.982637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:04 crc kubenswrapper[4125]: E0312 13:31:04.107802 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:04 crc kubenswrapper[4125]: I0312 13:31:04.887784 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:04 crc kubenswrapper[4125]: I0312 13:31:04.889464 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.026321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.026795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.025974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.026628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.027384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.027564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.027171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.027748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.027994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.028277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.028007 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.028526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.028726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.028736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.029014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.029081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.029191 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.029370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.029494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.029598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.029690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.029798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.030120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.030153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.030265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.030278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.030512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.030497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.030679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.030788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.031076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.031232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.031384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.031501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.031589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.032109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.032152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.032187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.032358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.032509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.032675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.032985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.033393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.033517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.033636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.033703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.033979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.034106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.034170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.034316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.034461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.034520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.034657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.034721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.035351 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.035395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.035570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.035699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.035915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.036001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.036224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.036353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.036595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.036902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.036982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.037156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.037303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:05 crc kubenswrapper[4125]: E0312 13:31:05.037445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.887364 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:05 crc kubenswrapper[4125]: I0312 13:31:05.887544 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.026985 4125 scope.go:117] "RemoveContainer" containerID="4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.558615 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.558781 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.558806 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.558904 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.558937 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:06Z","lastTransitionTime":"2026-03-12T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:06 crc kubenswrapper[4125]: E0312 13:31:06.578418 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.585963 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.586145 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.586185 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.586222 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.586271 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:06Z","lastTransitionTime":"2026-03-12T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:06 crc kubenswrapper[4125]: E0312 13:31:06.610252 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.618222 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.618370 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.618405 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.618444 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.618490 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:06Z","lastTransitionTime":"2026-03-12T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:06 crc kubenswrapper[4125]: E0312 13:31:06.646310 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.658613 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.658787 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.658936 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.658988 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.659110 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:06Z","lastTransitionTime":"2026-03-12T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:06 crc kubenswrapper[4125]: E0312 13:31:06.696746 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.707407 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.707562 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.707600 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.707640 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.707685 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:06Z","lastTransitionTime":"2026-03-12T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:06 crc kubenswrapper[4125]: E0312 13:31:06.741940 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: E0312 13:31:06.742140 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.835127 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/2.log" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.835276 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45"} Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.882987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.886473 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.886566 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.909180 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.934212 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.955948 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.980156 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:06 crc kubenswrapper[4125]: I0312 13:31:06.999476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.017598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026511 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.026486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.026664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026878 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.026912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.026906 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.027733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.027913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.028077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.028354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.028481 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.028593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028872 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.028971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.029069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029122 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.029194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.029209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.029373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.029679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029853 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.029883 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030088 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.030130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030172 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.030288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.030953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:07 crc kubenswrapper[4125]: E0312 13:31:07.031514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.045925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.061800 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.078564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.096413 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.113135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.126792 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.141666 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.158305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.175262 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.195219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.209505 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.226633 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.241430 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.258012 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.278382 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.295611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.320404 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.337201 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.362537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.387361 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.406735 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.429642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.449958 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.467282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.489984 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.512944 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.533343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.553391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.578161 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.596328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.614354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.644909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.676066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.702641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.727221 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.749498 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.773593 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.795501 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.818516 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.841749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.865428 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.883293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.887416 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.887607 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.903204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.925456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:07 crc kubenswrapper[4125]: I0312 13:31:07.955875 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.026432 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.054634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.082330 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.097490 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.114957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.134339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.151223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.172486 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.195614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.215415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.231713 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.252296 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.266167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.282592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.303257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.888086 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:08 crc kubenswrapper[4125]: I0312 13:31:08.888317 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.025583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.025796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.025944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.025975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026091 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.027429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.027572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.027703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.027936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.025629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026672 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.026805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.027211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.028128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.028399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.028524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.028732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.028967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.029117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.029310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.029973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.030166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.030339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.030505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.030644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.030781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.031107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.031276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.031454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.031621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.031779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.032151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.032324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.032500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.032635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.033487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.033664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.034104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.034157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.034325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.033700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.034334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.034491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.034615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.034746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.035004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.035314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.035493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.035638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.036291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.037291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.037462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.037525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.037579 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.037648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.037775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.038107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:09 crc kubenswrapper[4125]: E0312 13:31:09.111711 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.228459 4125 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.228617 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.228679 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.230085 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.230498 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3" gracePeriod=600 Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.850984 4125 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3" exitCode=0 Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.851123 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3"} Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.851427 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372"} Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.876461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.887491 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.887642 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.902354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.920871 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.938255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.956895 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:09 crc kubenswrapper[4125]: I0312 13:31:09.986963 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.007285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.027285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.045716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.062685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.085623 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.111673 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.131090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.156438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.175495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.194179 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.209913 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.231571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.248390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.266269 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.282008 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.298876 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.321579 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.342296 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.361052 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.379545 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.402604 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.440513 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.473263 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.519479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.553157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.577166 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.604349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.629443 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.657929 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.686803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.737233 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.764954 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.792409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.813062 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.836574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.867246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.886409 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.886515 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.902068 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.922738 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.948098 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.968461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:10 crc kubenswrapper[4125]: I0312 13:31:10.989985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.011239 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.025532 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.025629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.025735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.025986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.026110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.026784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.026892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.026785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.026953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.026800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027389 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.027784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.027987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.028172 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.028395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.028694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.028930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.028981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029066 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029331 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.029967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.029983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.030084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.030140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.030244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.030294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.030299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.030399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.030482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.030685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.031001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.031234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.031357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.031493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.031553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.031629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.031760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.031890 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.031984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.032107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.032506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.032983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033389 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033877 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:11 crc kubenswrapper[4125]: E0312 13:31:11.033973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.052437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.069063 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.083929 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.104745 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.133193 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.155214 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.180735 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.207903 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.231386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.255272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.283795 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.306602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.326215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.346945 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.363244 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.382909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.413332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.440575 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.890737 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:11 crc kubenswrapper[4125]: I0312 13:31:11.891101 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.050777 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.085136 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.118071 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.149769 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.174441 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.192291 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.232253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.261588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.297790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.322310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.353324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.382278 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.416581 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.455265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.488928 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.542707 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.796452 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.827365 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.852103 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.877290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.886695 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.886777 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.895492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.919100 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.947617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.967736 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:12 crc kubenswrapper[4125]: I0312 13:31:12.994790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.025644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.025706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.025918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.025921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.025992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.026279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.026975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027058 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.027156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.027372 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027082 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.027588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.027703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.027777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.028089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.028229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.028337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.028369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.028546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.028760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.028910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.028969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.029149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.028903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.029284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.029633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.029691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.029999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.030155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.030210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.030277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.030330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.030398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.030453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.030493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.030549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.030700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.030760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.030804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.031077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.031278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.031575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.031657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.031775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.031900 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.032055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.032726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032873 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.032936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.032972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.033198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.033951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.034062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:13 crc kubenswrapper[4125]: E0312 13:31:13.034137 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.051803 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.065886 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.089314 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.104594 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.125792 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.146492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.168933 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.185785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.202364 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.223477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.240780 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.263423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.291317 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.324049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.366041 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.398622 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.420471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.436088 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.465305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.488544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.509771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.534222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.552748 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.576239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.596453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.623098 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.643915 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.670638 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.691661 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.714348 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.742237 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.762472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.788723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.813371 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.836456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.858605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.886693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.888616 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.888775 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.911936 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.942742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.965138 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:13 crc kubenswrapper[4125]: I0312 13:31:13.985349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:14 crc kubenswrapper[4125]: I0312 13:31:14.027645 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:31:14 crc kubenswrapper[4125]: E0312 13:31:14.028582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:31:14 crc kubenswrapper[4125]: E0312 13:31:14.113678 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:14 crc kubenswrapper[4125]: I0312 13:31:14.888949 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:14 crc kubenswrapper[4125]: I0312 13:31:14.889190 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.025748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.027058 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.027416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.025777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.027796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.027967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.027930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.026069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028919 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.028954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.029545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.033249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.033365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.033553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.033702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.034118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.034232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.034459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.034709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.035148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.035633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.035652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.035804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.036178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.036354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.036552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.036776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.036997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.037185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.037346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.037486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.037562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.037756 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.039259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.040943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041010 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:15 crc kubenswrapper[4125]: E0312 13:31:15.041968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.887404 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:15 crc kubenswrapper[4125]: I0312 13:31:15.887581 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.890510 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.890687 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.948295 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.948627 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.948667 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.948707 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.948753 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:16Z","lastTransitionTime":"2026-03-12T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:16 crc kubenswrapper[4125]: E0312 13:31:16.979416 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.989392 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.990098 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.990144 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.990182 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:16 crc kubenswrapper[4125]: I0312 13:31:16.990223 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:16Z","lastTransitionTime":"2026-03-12T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.020437 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.026168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.026540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.026576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.026693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.026770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.027209 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.027524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.027731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.027934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028080 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.028109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.028268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.028434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.028728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.028918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.026242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.029717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.029738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.030302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.030492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.030662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.030154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.030951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.031086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.031312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.031557 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.031997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.031969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.032249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.032273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.032529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.032670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.032725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.033205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.033343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.033417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.033482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.033634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.034102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.034671 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.034729 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.034997 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.035134 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.035204 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:17Z","lastTransitionTime":"2026-03-12T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.035123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.036778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.038961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.039730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.040897 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.043341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.043553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.043575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.043649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.043651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.043712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.043762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.044173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.044442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.044779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.045164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.045351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.045709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.045991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.047092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.047953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.049643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.050226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.051130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.051532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.052104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.053011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.064580 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.073147 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.073234 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.073255 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.073279 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.073304 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:17Z","lastTransitionTime":"2026-03-12T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.100270 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.109712 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.109942 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.110070 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.110125 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.110172 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:17Z","lastTransitionTime":"2026-03-12T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.129967 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:17 crc kubenswrapper[4125]: E0312 13:31:17.130136 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.887978 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:17 crc kubenswrapper[4125]: I0312 13:31:17.888242 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:18 crc kubenswrapper[4125]: I0312 13:31:18.888508 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:18 crc kubenswrapper[4125]: I0312 13:31:18.888620 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.025804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.025983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027118 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026448 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.027969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.028084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.028150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.028160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.028171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.026156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.028612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.029162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.029492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.029547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.029732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.030075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.030268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.030422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.030552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.030767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.030981 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.031097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.031281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.031485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.031663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.031798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.032992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.033190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.033317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.033435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.033546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.034098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.034178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.034362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.034460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.034665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.035124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.035244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.035473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.035624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.036060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.036256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.036438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.036576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.036706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.037134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.037298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.037428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.037565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.037769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.038276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.038739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.039131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.039327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.039509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:19 crc kubenswrapper[4125]: E0312 13:31:19.116457 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.888258 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:19 crc kubenswrapper[4125]: I0312 13:31:19.888499 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:20 crc kubenswrapper[4125]: I0312 13:31:20.888761 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:20 crc kubenswrapper[4125]: I0312 13:31:20.889104 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.025169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.025539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.026329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.026687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.026998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.027247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.027383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.027598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.027790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.027804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.028219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.028352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.028619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.028959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.029082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.028763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.029131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.029344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.029595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.029979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.030312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.030563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.030677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.030958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.031207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.031266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.031207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.031407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.031752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.032213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.032256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.032549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.032652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.033104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.033125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.033179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.033403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.033436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.033580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.034130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.034180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.034379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.034477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.034700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.034800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.026132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.035446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.035959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.035965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.036124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.036334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.036500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.036665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.036777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.037102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.037221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.037352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.037649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.037784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.038072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.038327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.038484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.038674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.039234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.039290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.039408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:21 crc kubenswrapper[4125]: E0312 13:31:21.039561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.888516 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:21 crc kubenswrapper[4125]: I0312 13:31:21.889129 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.067203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.112768 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.147657 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.182178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.205090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.236468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.271147 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.301139 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.332684 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.363131 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.388441 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.425805 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.462082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.485248 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.510571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.536463 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.562554 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.624094 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.654792 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.692207 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.713960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.730100 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.755658 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.774690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.797476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.818249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.835716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.850544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.877351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.885622 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.886411 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.898412 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.919505 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.941721 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.962725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:22 crc kubenswrapper[4125]: I0312 13:31:22.993442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.023760 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.024940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.025242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.024961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.025531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025768 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025857 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.025888 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.025940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.026084 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.026165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.026266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.026397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.026605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.026767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.026890 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.027098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.027308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.027504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.027641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.028107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.028459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.028690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.028989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.029268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.029375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.029516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.029715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.030135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.030337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.030495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.030650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.030748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.030708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.030886 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.030895 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.031196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.031293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.031311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.031388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.031587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.031731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.032100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.032442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.032556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.032664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.033070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.033262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.033454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.033548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.034087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.034459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.034963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.035865 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.035994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.036111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.036142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.036196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.037587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:23 crc kubenswrapper[4125]: E0312 13:31:23.037916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.050388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.072501 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.100718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.135157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.168536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.193569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.219092 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.241567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.261635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.286107 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.317509 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.336431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.360323 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.381101 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.405783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.434787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.459536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.483105 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.505664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.533152 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.555582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.577763 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.605983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.634081 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.660388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.685083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.714096 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.741756 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.773174 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.803595 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.840556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.871220 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.886521 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:23 crc kubenswrapper[4125]: I0312 13:31:23.886660 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:24 crc kubenswrapper[4125]: E0312 13:31:24.118759 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:24 crc kubenswrapper[4125]: I0312 13:31:24.887456 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:24 crc kubenswrapper[4125]: I0312 13:31:24.887553 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.025714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.026365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.026481 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.026670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.026687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.026798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.026896 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.026978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.026988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.027141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.027336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027339 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.027569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.027694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.027771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.027933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.028160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.028342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.028672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.028687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.028972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.028976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.029201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.029417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.029597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.029800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.029945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.030079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.030130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.030227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.030380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.030439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.030501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.030588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.030630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.030721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.030963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.031217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.031330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.031468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.031534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.031621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.031780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.032061 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.032098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.032357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.032509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.032679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.032939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.033094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.033134 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.033277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.033771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.034424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.034361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.034997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.035094 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.035270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.035555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.035632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.035728 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.035956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.036152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.036233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.036334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.036412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.036486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.036750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.037173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.037374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:25 crc kubenswrapper[4125]: E0312 13:31:25.037569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.891412 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:25 crc kubenswrapper[4125]: I0312 13:31:25.891620 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:26 crc kubenswrapper[4125]: I0312 13:31:26.891510 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:26 crc kubenswrapper[4125]: I0312 13:31:26.891676 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025452 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.025761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.026042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.026173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.026366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.026620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.026894 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.027135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.027344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.027461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.027578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.027604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027875 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.027956 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028870 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.028950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.028798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.029070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.029109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.029246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.029368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.029418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.029489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.029529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.029539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.029736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.029747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.029791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.030149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.030909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.030942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.031473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.031594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.031625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.031648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.031950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.032110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.032210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.032788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.032892 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.032930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.033159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.033270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.033343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.033799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.033928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.249699 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.249938 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.249977 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.250076 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.250135 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:27Z","lastTransitionTime":"2026-03-12T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.290157 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:27Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.300701 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.301086 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.301131 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.301167 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.301219 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:27Z","lastTransitionTime":"2026-03-12T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.332726 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:27Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.343006 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.343182 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.343221 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.343258 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.343300 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:27Z","lastTransitionTime":"2026-03-12T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.375426 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:27Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.387627 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.387695 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.387722 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.387758 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.387796 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:27Z","lastTransitionTime":"2026-03-12T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.417274 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:27Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.425476 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.425549 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.425584 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.425630 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.425688 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:27Z","lastTransitionTime":"2026-03-12T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.456283 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:27Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:27 crc kubenswrapper[4125]: E0312 13:31:27.456378 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.889224 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:27 crc kubenswrapper[4125]: I0312 13:31:27.889337 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:28 crc kubenswrapper[4125]: I0312 13:31:28.028576 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:31:28 crc kubenswrapper[4125]: E0312 13:31:28.029561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:31:28 crc kubenswrapper[4125]: I0312 13:31:28.887913 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:28 crc kubenswrapper[4125]: I0312 13:31:28.888123 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.025597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.025960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.026493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.025677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.026395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.026951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.026995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.027142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.027180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.027285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.027388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.027537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.027560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.027930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.028156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.028389 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.028465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.028494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.028547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.028550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.028573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.029006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.029202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.029212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.029250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.029302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.029307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.029398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.029746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.029775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.030004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.030127 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.030238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.030289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.030544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.030695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.030715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.030994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.031579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.032118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.032350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.032434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.032637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.032969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.032990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.033095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.033188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.033379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.033651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.034000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.034268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.034565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.034674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.034804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.035184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.035583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.035593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.035791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.036221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.036461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.036432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.036519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.036571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.036655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.036987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.037245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.037422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.037564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.037680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.038062 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.038228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:29 crc kubenswrapper[4125]: E0312 13:31:29.123715 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.887417 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:29 crc kubenswrapper[4125]: I0312 13:31:29.887688 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:30 crc kubenswrapper[4125]: I0312 13:31:30.026243 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:31:30 crc kubenswrapper[4125]: E0312 13:31:30.026745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:31:30 crc kubenswrapper[4125]: I0312 13:31:30.888785 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:30 crc kubenswrapper[4125]: I0312 13:31:30.890708 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.025644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.025902 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.026135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.026463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.026672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.026754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.027089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.027488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.027666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.027993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.028058 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.028086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.028133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.028308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.028329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.028680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.028784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.028983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.028998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.028333 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.029102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.029262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.029314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.029513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.029669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.029997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.030003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.030122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.030190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.030350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.030516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.030580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.030622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.030667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.030715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.030729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.030770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.031582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.031748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.032131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.032447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.032571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.032566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.032707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.032941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.033147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.033299 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.033462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.033906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.033980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.034147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.034164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.034284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.034315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.034701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.034972 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.035170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.035331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.035450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.035679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.035974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.036179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.036442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.036309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.036917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:31 crc kubenswrapper[4125]: E0312 13:31:31.037238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.423519 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.423688 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.423734 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.423797 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.424000 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.889445 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:31 crc kubenswrapper[4125]: I0312 13:31:31.889599 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.056981 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.089704 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.124279 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.167746 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.194249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.226408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.265664 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.292706 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.326563 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.357593 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.391121 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.425785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.458969 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.508251 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.554947 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.586783 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.615121 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.645925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.665784 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.734604 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.783185 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.823300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.854497 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.888390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.897649 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.899558 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.921314 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.955702 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:32 crc kubenswrapper[4125]: I0312 13:31:32.986578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.014541 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.028173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.028696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.029542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.030164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.030690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.030729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.031932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.032073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.032219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.033415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.033445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.033532 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.034482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.035160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.035530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.035779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.036089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.036301 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.036326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.036487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.036496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.031320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.033682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.033965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.034349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.032310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.033623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.035442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.035371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.040793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.041618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.041691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.041939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042134 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.043121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.043180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.043132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.043424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.043514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.043564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.042472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.042499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.043723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.042153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.043770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.044092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.044143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.045461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.045625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.045709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.045906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.046421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.046529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.046587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.046647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.046784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.046943 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.047143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.047387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.048627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.048761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.048803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.049105 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049883 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.049954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.050282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.050439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.050710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.050998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.051091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.051246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.051426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.051575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.051713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.051953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.052174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.052453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.052676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.052724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.052928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.053356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:33 crc kubenswrapper[4125]: E0312 13:31:33.053798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.059111 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.077163 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.099951 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.122513 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.140611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.161707 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.182191 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.199681 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.216127 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.235258 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.253645 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.273704 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.292662 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.316762 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.336481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.356764 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.379653 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.402405 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.430634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.459445 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.484642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.505537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.523879 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.541800 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.562632 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.578521 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.602152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.621990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.646697 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.670339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.689952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.714594 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.736917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.760973 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.788483 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.810952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.839449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.867999 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.887158 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.888148 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:33 crc kubenswrapper[4125]: I0312 13:31:33.928063 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:34 crc kubenswrapper[4125]: E0312 13:31:34.126139 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:34 crc kubenswrapper[4125]: I0312 13:31:34.888503 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:34 crc kubenswrapper[4125]: I0312 13:31:34.888675 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.026998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.027324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.030526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.030621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.030982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031855 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031891 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031904 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.031944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.032100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.032120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.032287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.032543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.032573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.032648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.033999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.034221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.034251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.034368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.034718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.034929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.035160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.035280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.035384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.035467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.035735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.035987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.036286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.036336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.036490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.036593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.036678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.036742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.037193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.037480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:35 crc kubenswrapper[4125]: E0312 13:31:35.038548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.885648 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:35 crc kubenswrapper[4125]: I0312 13:31:35.886606 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.159758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.160520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.160771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.160992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.161224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.161335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.161496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.161685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.161903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.162210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.162561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.162626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.162704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.162864 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.162957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.163187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.163346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.163536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164104 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.164304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.165072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.165220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.165308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.165403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.164673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.165485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:36 crc kubenswrapper[4125]: E0312 13:31:36.165762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.888105 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:36 crc kubenswrapper[4125]: I0312 13:31:36.888332 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.026151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.027075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025695 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.027556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.028059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.028072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.026005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.026106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.028335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.028513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.028644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.028747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.029007 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.029142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.029239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.029370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.029433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.029523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.029639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.029708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.029797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.030101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.030173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.030263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.030373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.030428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.030507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.030612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.030667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.030937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.031135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.031310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.031443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.031568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.031706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.031910 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.032003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.032199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.032261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.032342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.032445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.032495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.032582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.032685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.032748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.033000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.033073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.033366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.033497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.033550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.033638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.033972 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.034070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.034481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.034584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.034778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.035112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.035336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.496974 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.497090 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.497118 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.497144 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.497172 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:37Z","lastTransitionTime":"2026-03-12T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.531677 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.544269 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.544406 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.544450 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.544498 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.544557 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:37Z","lastTransitionTime":"2026-03-12T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.569145 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.578591 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.578636 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.578658 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.578689 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.578721 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:37Z","lastTransitionTime":"2026-03-12T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.605713 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.614231 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.614371 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.614412 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.614453 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.614506 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:37Z","lastTransitionTime":"2026-03-12T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.631218 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.637221 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.637363 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.637399 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.637443 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.637492 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:37Z","lastTransitionTime":"2026-03-12T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.657250 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:37Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:37 crc kubenswrapper[4125]: E0312 13:31:37.657383 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.887453 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:37 crc kubenswrapper[4125]: I0312 13:31:37.887606 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.026472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.026772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.026978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.027005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.025774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.027324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.027354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.027416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.027535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.027971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.028002 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.028282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.028698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.029179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.029629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.029946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.030124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.030251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.030446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.030663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.030992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.031218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.031413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.031618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.031768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:38 crc kubenswrapper[4125]: E0312 13:31:38.032614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.888122 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:38 crc kubenswrapper[4125]: I0312 13:31:38.888320 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.025796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.025966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.026120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.025805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.025904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.026285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.026538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.026605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.026660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.027590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.028131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.028247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.028292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.028592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.028645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.028804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.028984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.029124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.029151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.029204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.029285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.029296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.030318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.030376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.030514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.030887 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.030975 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.031127 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.031243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.031359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.031480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.031560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.032567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.032676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.033169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.033195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.033462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.033787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.034179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.034255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.034296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.034392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.034254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.034521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.035385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.034994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.035695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.035758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.035897 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.036254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.036300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.036266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.036412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.036436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.036590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.036711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.036977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.037171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.037263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.037337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.037895 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.037984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.038085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:39 crc kubenswrapper[4125]: E0312 13:31:39.127597 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.888208 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:39 crc kubenswrapper[4125]: I0312 13:31:39.888384 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.025991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.026106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.026362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.026681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.027266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.027540 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.027708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.028044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.028133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.028246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.028334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.028679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.027542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.029197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.029340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.029501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.029628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.029786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.029967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.030135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.030184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.030358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.030559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.030670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.030932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.031113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.031399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:40 crc kubenswrapper[4125]: E0312 13:31:40.031500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.887324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:40 crc kubenswrapper[4125]: I0312 13:31:40.887528 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.028908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.029114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.029210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.030258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.030372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.030500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.031278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.030704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.031420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.030905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.030955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.031642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.031678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.030985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.031429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.031464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.031504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.032272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.032597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.032684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.032699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.032735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.032970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.033103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.034318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.034601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.034797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.035162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.035265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.035341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.035550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.035572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.035791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.036158 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.036343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.034472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.036556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.036618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.036765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.037007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.037256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.037321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.037381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.037601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.037637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.037758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.038748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.038976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.039130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.039229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.039419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.039563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.039774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.040418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.040597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.040723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.040981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:41 crc kubenswrapper[4125]: E0312 13:31:41.041111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.888305 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:41 crc kubenswrapper[4125]: I0312 13:31:41.888407 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.025754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.025924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.025998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.026179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.026213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.026296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.026560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.027005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.027302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.027460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.027516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.027547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.027579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.025781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.027653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.027937 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.027965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.028200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.028263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.028595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.028724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.028731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.028913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.029004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.029163 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.029397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.030988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:42 crc kubenswrapper[4125]: E0312 13:31:42.031145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.054043 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.072462 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.093050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.122143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.149600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.173255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.198213 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.220642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.238104 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.261064 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.279336 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.299184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.316775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.341505 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.357354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.373762 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.390988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.406069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.423405 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.441611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.456980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.471594 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.486946 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.504876 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.521599 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.542096 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.558919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.579292 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.611458 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.631755 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.649637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.663804 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.686639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.747712 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.767963 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.782266 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.822069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.837139 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.852931 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.868403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.883717 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.885574 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.886594 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.905700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.928778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.955439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:42 crc kubenswrapper[4125]: I0312 13:31:42.992417 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.013298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.025719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.025988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.026069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.026113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026127 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.026397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.026582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.026880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.026950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.027102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.027220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.027303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.027408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.027461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.027592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.027633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.027688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.027774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.028088 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.028240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.028311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.028395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.028518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.028613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.028801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.029059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.029160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.029290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.029391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.029533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.029611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.029782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.030128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.030979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.031119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:43 crc kubenswrapper[4125]: E0312 13:31:43.031211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.037426 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.062362 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.086289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.116226 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.144696 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.164349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.189032 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.208466 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.226364 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.245894 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.263223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.279339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.294386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.310569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.327317 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.343746 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.360942 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.382646 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.402242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.416715 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.439418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.886753 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:43 crc kubenswrapper[4125]: I0312 13:31:43.886989 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.025416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.025740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.025791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.025984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.026400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.026447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.026522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.026640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.026974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.027386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.027531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.027773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.028184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.028527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.029222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.029440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.029582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.029736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.029922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.030117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.030152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.030314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.030396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.044947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.045798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.046426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.046732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.047302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.047666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.047942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.048114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.048343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:44 crc kubenswrapper[4125]: E0312 13:31:44.129784 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.888414 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:44 crc kubenswrapper[4125]: I0312 13:31:44.888631 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.024937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.026075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.026172 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.025503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.026499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.026543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.026973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.025785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.027175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.027270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.027540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.027781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.028155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.028331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.028608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.028664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.028795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.029190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.029379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.029527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.029655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.029926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.030149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.030327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.030450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.030591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.031192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.031213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.031370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.031488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.031618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.031791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.032247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.032383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:45 crc kubenswrapper[4125]: E0312 13:31:45.032517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.888147 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:45 crc kubenswrapper[4125]: I0312 13:31:45.888336 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.029609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.029970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.029632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.030173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.030296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.030329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.031575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.031703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.031793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.031707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.031962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.032061 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.032190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.032335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.032499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.032730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.032791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.033131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.033236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.033357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.033424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.033582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.033973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.034332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.034590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:46 crc kubenswrapper[4125]: E0312 13:31:46.035124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.888284 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:46 crc kubenswrapper[4125]: I0312 13:31:46.888464 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.025559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.025706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.025753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.025916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.025960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.026287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.026668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.026758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.027115 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.027322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.027393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.027454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.027539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.027723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.027964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.028187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.028235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.028272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.028353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.028513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.028953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.029274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.029350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.029657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.029763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.029982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.030124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.030314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.030416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.030510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.030516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.031142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.030573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.030589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.030607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.030632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.030693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.030902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.031327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.031426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.031560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.031626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.031770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.032326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.032638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.032771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.033121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.033306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.033435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.033539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.033736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:47 crc kubenswrapper[4125]: E0312 13:31:47.034085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.887747 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:47 crc kubenswrapper[4125]: I0312 13:31:47.888099 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.025672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.025909 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.025987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.026095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.026128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.026145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.026446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.026497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.026638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.026747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.026903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.026955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027050 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.027224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.027436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.027793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.028773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.030134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.039472 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.039572 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.039602 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.039634 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.039670 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:48Z","lastTransitionTime":"2026-03-12T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.068318 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.076373 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.076488 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.076519 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.076560 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.076608 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:48Z","lastTransitionTime":"2026-03-12T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.096464 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.104634 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.104761 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.104794 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.105082 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.105136 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:48Z","lastTransitionTime":"2026-03-12T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.135069 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.145672 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.146084 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.146126 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.146169 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.146211 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:48Z","lastTransitionTime":"2026-03-12T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.177426 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.186677 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.186779 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.186900 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.186953 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.186992 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:48Z","lastTransitionTime":"2026-03-12T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.221124 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:48Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.221268 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.517930 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.518321 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.518578 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.518540039 +0000 UTC m=+800.841926388 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.724740 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.724985 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.725152 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.725211 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.725293 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.725262752 +0000 UTC m=+801.048648681 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.725382 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.725418 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.725533 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.725496439 +0000 UTC m=+801.048882798 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.725572 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.725556561 +0000 UTC m=+801.048942830 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.725770 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.725939 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.726062 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.726235 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.726335 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.726376 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.726378 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.726442 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.726420528 +0000 UTC m=+801.049806867 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.726477 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.726461349 +0000 UTC m=+801.049847678 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.726508 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.72649391 +0000 UTC m=+801.049880089 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.726697 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.726925 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727067 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.727150 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727163 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.727134059 +0000 UTC m=+801.050520408 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727249 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727346 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.727312395 +0000 UTC m=+801.050698863 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727395 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.727577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727616 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.727583044 +0000 UTC m=+801.050969363 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727666 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727718 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.727703256 +0000 UTC m=+801.051089475 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.727991 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728135 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.72811046 +0000 UTC m=+801.051496689 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728343 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728400 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728472 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728522 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728528 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728571 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728600 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.728582104 +0000 UTC m=+801.051968443 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728660 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728709 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.728694019 +0000 UTC m=+801.052080248 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728761 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728775 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728916 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.728794772 +0000 UTC m=+801.052181111 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.728979 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.728985 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.729100 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.729133 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.729111781 +0000 UTC m=+801.052498130 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.729210 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.729265 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.729249216 +0000 UTC m=+801.052635445 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.729265 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.729327 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.729430 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.729544 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.729634 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730087 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730160 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730142624 +0000 UTC m=+801.053528863 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730195 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730181955 +0000 UTC m=+801.053568124 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730258 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730307 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730294179 +0000 UTC m=+801.053680478 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730319 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730387 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730395 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730460 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730475 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730404 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.73038384 +0000 UTC m=+801.053770189 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730552 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730535714 +0000 UTC m=+801.053921893 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730582 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730567275 +0000 UTC m=+801.053953564 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730611 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730597166 +0000 UTC m=+801.053983565 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730640 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730625057 +0000 UTC m=+801.054011386 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730708 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.730797 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.730779552 +0000 UTC m=+801.054165581 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.730987 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.731169 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.731272 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.731507 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.731603 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.731584509 +0000 UTC m=+801.054970868 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.731992 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.731938529 +0000 UTC m=+801.055324828 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.732006 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.732215 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.732280 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.732424 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.732528 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.732502665 +0000 UTC m=+801.055889184 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.732661 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.732742 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.732720025 +0000 UTC m=+801.056106334 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.732932 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733109 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733119 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733139 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733202 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.733187416 +0000 UTC m=+801.056573455 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733202 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733277 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733316 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733328 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733348 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733369 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733394 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733435 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.733412836 +0000 UTC m=+801.056799185 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733463 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733485 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733509 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.733496306 +0000 UTC m=+801.056882285 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733549 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733596 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733640 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733674 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733730 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733738 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733749 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733896 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733917 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733806 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733966 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.733765 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734065 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734110 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733970 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.733800139 +0000 UTC m=+801.057186628 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733969 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734164 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.73414308 +0000 UTC m=+801.057529349 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734210 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734198828 +0000 UTC m=+801.057584787 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.734216 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734056 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.733929 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734257 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734288 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734279254 +0000 UTC m=+801.057665180 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734305 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734334 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.734273 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734375 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734373 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734310 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734301911 +0000 UTC m=+801.057687870 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734441 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734431875 +0000 UTC m=+801.057817754 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734460 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734450886 +0000 UTC m=+801.057836825 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734485 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734472657 +0000 UTC m=+801.057858626 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734488 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734502 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734495447 +0000 UTC m=+801.057881226 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.734661 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734735 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.734797 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734784646 +0000 UTC m=+801.058170445 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.735099 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.734931824 +0000 UTC m=+801.058318730 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.735388 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.735366468 +0000 UTC m=+801.058752737 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.836739 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.836990 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.837143 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837240 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837299 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837359 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.837330522 +0000 UTC m=+801.160716461 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837375 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837399 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.837374224 +0000 UTC m=+801.160760643 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837477 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.837454725 +0000 UTC m=+801.160841074 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.837566 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.837692 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837707 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837739 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.837731365 +0000 UTC m=+801.161117144 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.837766 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837800 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.837991 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838085 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.838061794 +0000 UTC m=+801.161448263 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.837861 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.838303 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838360 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.838300463 +0000 UTC m=+801.161686771 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838167 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838494 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.838470797 +0000 UTC m=+801.161857126 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.838492 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.838570 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838401 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838614 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838675 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.838661303 +0000 UTC m=+801.162047532 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838707 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.838692154 +0000 UTC m=+801.162078343 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.838678 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838738 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838774 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.838765106 +0000 UTC m=+801.162150885 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.838914 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.839061 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839148 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.839124357 +0000 UTC m=+801.162510706 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839202 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839261 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.839245041 +0000 UTC m=+801.162631330 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.839207 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839316 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839348 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.839355 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839378 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839437 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.839420817 +0000 UTC m=+801.162807086 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839479 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.839514 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839530 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.83951522 +0000 UTC m=+801.162901509 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839599 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839649 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.839632934 +0000 UTC m=+801.163019203 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.839651 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839709 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.839727 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839756 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.839742927 +0000 UTC m=+801.163129156 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.839920 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.840006 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.840080 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.840055377 +0000 UTC m=+801.163442146 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.840157 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.840215 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.840198681 +0000 UTC m=+801.163584960 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.840479 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.840635 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.840691 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.840789 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.840932 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.840940 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841000 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841068 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841058338 +0000 UTC m=+801.164444117 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.840954 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841092 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841092 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.840962 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841107 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841081858 +0000 UTC m=+801.164468677 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841142 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.84113504 +0000 UTC m=+801.164520789 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.841169 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.841195 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841203 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841253 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841236684 +0000 UTC m=+801.164622863 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841268 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841269 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841280 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841294 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841316 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.841222 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841318 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841311685 +0000 UTC m=+801.164697614 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841213 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841405 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841375427 +0000 UTC m=+801.164761766 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.841517 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841586 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841573565 +0000 UTC m=+801.164959482 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841617 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841603004 +0000 UTC m=+801.164988753 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.841591 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841659 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841675 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841717 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841701307 +0000 UTC m=+801.165087606 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841750 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.841735898 +0000 UTC m=+801.165122207 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.841796 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.841971 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842081 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842111 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.8420844 +0000 UTC m=+801.165470739 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842176 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842223 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842247 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842298 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842258 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842307 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.842289926 +0000 UTC m=+801.165676265 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842359 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842371 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842381 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842398 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842409 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.842401169 +0000 UTC m=+801.165786948 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842314 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842481 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842548 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842564 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.842541673 +0000 UTC m=+801.165927922 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842578 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842455 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842586 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842625 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842663 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842684 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.842668178 +0000 UTC m=+801.166054487 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842740 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842598 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842796 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842928 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.842906156 +0000 UTC m=+801.166292385 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842930 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.842965 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843068 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843088 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.843063029 +0000 UTC m=+801.166449798 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.842946 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843121 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843147 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.843129762 +0000 UTC m=+801.166516001 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843159 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843182 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843243 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.843227396 +0000 UTC m=+801.166613675 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.843294 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.843480 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843629 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.843657 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843673 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843700 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843772 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.843751661 +0000 UTC m=+801.167137900 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843935 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.843946 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.843968 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844065 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.844131 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844146 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.844126635 +0000 UTC m=+801.167512864 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.844214 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844222 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.844273 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844277 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.844260559 +0000 UTC m=+801.167646878 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.844325 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844370 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844398 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844418 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844466 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844551 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844610 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844636 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844656 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844682 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844703 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844469 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.844455355 +0000 UTC m=+801.167841564 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844768 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844777 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.844760584 +0000 UTC m=+801.168146873 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844797 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844936 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.844913 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.844794435 +0000 UTC m=+801.168180614 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.844399 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845001 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.844981211 +0000 UTC m=+801.168367390 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845101 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.845082654 +0000 UTC m=+801.168468893 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845183 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.845163827 +0000 UTC m=+801.168550146 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.845308 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.845378 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.845426 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.845478 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.845576 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845594 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845776 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845705 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.845630 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845709 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845956 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.845922007 +0000 UTC m=+801.169308256 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846163 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.846139564 +0000 UTC m=+801.169525863 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846197 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.846182645 +0000 UTC m=+801.169568974 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846253 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846316 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846371 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846481 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846543 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846657 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846714 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846723 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846763 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845952 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846765 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.846753665 +0000 UTC m=+801.170139434 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846894 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846927 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.84691951 +0000 UTC m=+801.170305279 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.846945 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.846950 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.846942661 +0000 UTC m=+801.170328410 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.845763 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847195 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847186578 +0000 UTC m=+801.170572347 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847227 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847299 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847350 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847620 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847301 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.84728124 +0000 UTC m=+801.170667659 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847653 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847646532 +0000 UTC m=+801.171032281 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847667 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847661413 +0000 UTC m=+801.171047172 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847682 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847676603 +0000 UTC m=+801.171062442 (durationBeforeRetry 2m2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847696 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847690174 +0000 UTC m=+801.171075933 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847725 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847751 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847743105 +0000 UTC m=+801.171128874 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847780 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847801 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847795727 +0000 UTC m=+801.171181586 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.847933 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.847922561 +0000 UTC m=+801.171308320 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.886596 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.886749 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.948975 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949262 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949303 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949326 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949401 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.949380263 +0000 UTC m=+801.272766502 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.949402 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.949495 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949527 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949563 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949649 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.949715 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.949722 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.949700062 +0000 UTC m=+801.273086301 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.949799 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.949984 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950142 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950229 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.950205888 +0000 UTC m=+801.273592227 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950314 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950358 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950414 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950440 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950467 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950531 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.950501237 +0000 UTC m=+801.273887576 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950371 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950567 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950569 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.950552209 +0000 UTC m=+801.273938398 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.950759 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.950736395 +0000 UTC m=+801.274122584 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.950897 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.951141 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951256 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951402 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951441 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951464 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951500 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.951478577 +0000 UTC m=+801.274864906 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.951262 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951711 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.951687414 +0000 UTC m=+801.275073663 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.952098 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.951360 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952210 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952231 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952278 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952296 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.952277513 +0000 UTC m=+801.275663752 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952305 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952324 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952393 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.952375504 +0000 UTC m=+801.275761853 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952480 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.952534 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.95251949 +0000 UTC m=+801.275906049 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.952174 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953147 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953242 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953300 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953375 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953402 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953414 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953433 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953498 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.953479521 +0000 UTC m=+801.276865750 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953580 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953578 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953608 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953628 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953642 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953647 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953661 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953686 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953716 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.953694156 +0000 UTC m=+801.277080515 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953759 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.953742877 +0000 UTC m=+801.277129186 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.953911 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953946 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953979 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953998 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.954154 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954160 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954133459 +0000 UTC m=+801.277519808 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954265 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954301 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954318 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954349 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954371 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954356896 +0000 UTC m=+801.277743125 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.954271 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954411 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954394667 +0000 UTC m=+801.277780946 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.953761 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.954456 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954471 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954457639 +0000 UTC m=+801.277844058 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.954520 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954530 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954583 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954565552 +0000 UTC m=+801.277951891 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954628 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.954651 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954678 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954664095 +0000 UTC m=+801.278050444 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.954724 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954746 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954769 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954786 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954953 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.954930114 +0000 UTC m=+801.278316453 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.954989 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.955063 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955087 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955114 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.955095029 +0000 UTC m=+801.278481378 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.955170 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955173 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.95515127 +0000 UTC m=+801.278537499 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955200 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955231 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955248 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.955287 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955303 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.955287684 +0000 UTC m=+801.278674033 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955385 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955443 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.955428629 +0000 UTC m=+801.278814958 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955467 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955505 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955523 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955579 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.955562344 +0000 UTC m=+801.278948593 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955634 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955653 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.955680 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955704 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.955690288 +0000 UTC m=+801.279076497 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955765 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.955921 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.955802711 +0000 UTC m=+801.279189030 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.956366 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:48 crc kubenswrapper[4125]: I0312 13:31:48.957527 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.957727 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.957767 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.957786 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.957802 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.958340 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.95831547 +0000 UTC m=+801.281701839 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:31:48 crc kubenswrapper[4125]: E0312 13:31:48.958394 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:33:50.958375132 +0000 UTC m=+801.281761631 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.024802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.025528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025642 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.025925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.026293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.026645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.026897 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.027243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.027425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.027587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.027642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.027664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.027904 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.028140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.028712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.028329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.029067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.028450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.028497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.028541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.028576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.028618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.029287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.029660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.029720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.029741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.030109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.030262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.030464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.030541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.030659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.031392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.031422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.031509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.031661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.032997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.033356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.033997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.034195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.034240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.034457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.034660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.061799 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.062191 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.062334 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.062391 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.062414 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.062516 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.062565 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.062585 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.063160 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:33:51.063121457 +0000 UTC m=+801.386507726 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.063293 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:33:51.063271181 +0000 UTC m=+801.386657360 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.064225 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.064408 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.064582 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.064613 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.064742 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:33:51.064709114 +0000 UTC m=+801.388095453 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:31:49 crc kubenswrapper[4125]: E0312 13:31:49.131687 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.887738 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:49 crc kubenswrapper[4125]: I0312 13:31:49.888143 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.025264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.025499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.026134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.026459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.026567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.026764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.027440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.027670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.027963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.027977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.028171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.028233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.028266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.028401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.028589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.028651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.028756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.028997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.029395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.029706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.030263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.030614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.030938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.031350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.031663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:50 crc kubenswrapper[4125]: E0312 13:31:50.032234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.886888 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:50 crc kubenswrapper[4125]: I0312 13:31:50.887060 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.025687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.026304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026546 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.026522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.027119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.027352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.027715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.027958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.028249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.028454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.028726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.028746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.028786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.029217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.029344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.029446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.029680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.030284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.030571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.030710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.031271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.031495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.031653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.032175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.032410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.032725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.033223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.033506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.033676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.033922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.034219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.034375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.034483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.034518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.035257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.035731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.036419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.036639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.036753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.037422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.037431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.037714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.037756 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.038198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.038440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.038602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.038660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.039087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.039262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.039442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.039796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.040621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:51 crc kubenswrapper[4125]: E0312 13:31:51.040961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.886457 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:51 crc kubenswrapper[4125]: I0312 13:31:51.886548 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.025181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.025606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.025727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.026087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.026188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.026244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.026310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.025217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.026460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.026462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.026629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.026769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.026945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.026964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.027205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.027316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.027340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.027444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.027466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.027596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.027689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.027749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.027983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.028187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.028341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.028422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.028490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.028555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.029650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.030142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.030276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.032491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.060638 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.097475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.147774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.203405 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.230779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.247545 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/6.log" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.250628 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"53c1db1508241fbac1bedf9130341ffe","Type":"ContainerStarted","Data":"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf"} Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.252660 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.255796 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/3.log" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.256588 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/2.log" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.256755 4125 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45" exitCode=1 Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.256793 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45"} Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.256890 4125 scope.go:117] "RemoveContainer" containerID="4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.257487 4125 scope.go:117] "RemoveContainer" containerID="bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45" Mar 12 13:31:52 crc kubenswrapper[4125]: E0312 13:31:52.257990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.270382 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.304088 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.325166 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.341087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.359694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.379116 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.397888 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.418249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.449148 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.471276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.490368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.521228 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.547516 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.578734 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.598641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.615224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.637056 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.653754 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.669135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.690196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.708600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.731960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.756151 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.774966 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.792594 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.818505 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.839299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.867113 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.886346 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.886435 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.890101 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.965985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:52 crc kubenswrapper[4125]: I0312 13:31:52.988158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.015423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.026467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.026732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.026992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.026998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.027202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.027341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.027541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.027753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.027913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.028049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.028107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.028146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.028259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.028506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.028722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.028802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029096 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.029149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.029326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.029935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.029989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.030185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.030319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.030471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.030603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.030953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031115 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:53 crc kubenswrapper[4125]: E0312 13:31:53.031670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.045481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.075937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.096089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.120960 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.153460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 5m0s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(53c1db1508241fbac1bedf9130341ffe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.176720 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.213143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.231761 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.255603 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.266194 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/3.log" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.281679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.306261 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.326118 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.359485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.393725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.418502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.436259 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.459591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.479196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.518306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.540456 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.561798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.587324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.602534 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.620167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.641903 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.658184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.681256 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.700246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.725754 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.762577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.793172 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.817199 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.850343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.866892 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.886140 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.886325 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.905094 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.929762 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.951517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.975995 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:53 crc kubenswrapper[4125]: I0312 13:31:53.996186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.012428 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.025182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.025225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.025384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.025702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.025625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.025799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.025884 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.025903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.026766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.026990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.027093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.027214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.027284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.027391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.030725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.047627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.064485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.082119 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.098376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.113757 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.131304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: E0312 13:31:54.133267 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.147454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.163467 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.177457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.196146 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.218039 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.234327 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.250605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.267480 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.283381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.302415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.320438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.337078 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.351125 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.368644 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.382528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.397049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.418728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.433989 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.451003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.469815 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.487510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.505223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.522198 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.543504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.568078 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.591103 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.611924 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.632128 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.673719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.741993 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.763150 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.781287 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.796058 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.824197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.842289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.859348 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.887170 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.887243 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.896587 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.937372 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:54 crc kubenswrapper[4125]: I0312 13:31:54.979472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.017112 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025074 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.025262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.024937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.025306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.025488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.025652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025738 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.025769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.026082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.026143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.026259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.026340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.026390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.026482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.026627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.026649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.027119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.027306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.027401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.027528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.027717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.027940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.028070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.028277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.028334 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.028520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.028737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.028784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.028979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.028990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029084 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.029341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.029918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.030069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.030169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.030305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.030348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.030453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:55 crc kubenswrapper[4125]: E0312 13:31:55.030537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.058493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.100071 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.135156 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.179570 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.221400 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.255423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.301204 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.343927 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.376304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.427280 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:55Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.889284 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:55 crc kubenswrapper[4125]: I0312 13:31:55.889429 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.025771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.027352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.027984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.028534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.028652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026448 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.029117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.029344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.029538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.026715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.029653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.029785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.030223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.030421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.030592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.030764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.031144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.031315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:56 crc kubenswrapper[4125]: E0312 13:31:56.031444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.889738 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:56 crc kubenswrapper[4125]: I0312 13:31:56.890102 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.025555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.026092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.026433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.026689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.027183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.027441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.027752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.028169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.028412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.028641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.028679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.028957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.029257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.029591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.029771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.030118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.030220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.030421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.030420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.030529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.030754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.030993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.031198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.031253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.031349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.031360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.031258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.031511 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.031575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.031606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.033512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.033634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.033782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.034136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.034469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.034659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.034777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.035310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.035179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.035578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.036210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.036583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.036937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.037568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.038274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.038570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.038696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.039115 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.039205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.039498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.039699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.040108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.040276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.040799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.041250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.041317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.041531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:57 crc kubenswrapper[4125]: E0312 13:31:57.041723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.889115 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:57 crc kubenswrapper[4125]: I0312 13:31:57.889295 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.025710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.025805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.026322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.026540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.026702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.026959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.027080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.027168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.027195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.027384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.027473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.027701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.027994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.028001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.028182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.028282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.028448 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.028707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.028796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.029132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.587226 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.587323 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.587357 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.587392 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.587428 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:58Z","lastTransitionTime":"2026-03-12T13:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.617556 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.627714 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.627914 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.627953 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.627991 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.628091 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:58Z","lastTransitionTime":"2026-03-12T13:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.660275 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.667966 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.668123 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.668161 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.668199 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.668246 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:58Z","lastTransitionTime":"2026-03-12T13:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.699569 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.709644 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.709697 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.709722 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.709757 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.709791 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:58Z","lastTransitionTime":"2026-03-12T13:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.739581 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.747405 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.747523 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.747565 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.747609 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.747656 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:31:58Z","lastTransitionTime":"2026-03-12T13:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.775980 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:31:58Z is after 2024-12-26T00:46:02Z" Mar 12 13:31:58 crc kubenswrapper[4125]: E0312 13:31:58.776130 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.888459 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:58 crc kubenswrapper[4125]: I0312 13:31:58.888641 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.025763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.025789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026074 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.026536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.025789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.029304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.029322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.029598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.029620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.029730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.029762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.030083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.030256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.030378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.030536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.030707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.031057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.031223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.031363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.031551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.031706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.031966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.032262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.032422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.032590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.032643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.032791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.033161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.033216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.033356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.033492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.033641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.033793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.034111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.034257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.034392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.034503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.034970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.035592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.035791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.036184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.036359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.036484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:31:59 crc kubenswrapper[4125]: E0312 13:31:59.137239 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.887790 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:31:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:31:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:31:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:31:59 crc kubenswrapper[4125]: I0312 13:31:59.888102 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.025082 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.025317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.025561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.025675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.025951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.026161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.024961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.026451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.026754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.026933 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.027052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.027069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.027193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.027280 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.027397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.027449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.027735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.027794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.028240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.028338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.028527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.028725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.029625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.030067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.030289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:00 crc kubenswrapper[4125]: E0312 13:32:00.030447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.887958 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:00 crc kubenswrapper[4125]: I0312 13:32:00.888150 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.025166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.025235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.025167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.025428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.025693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.025918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.025997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.026396 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.026765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.026990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.027306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.027371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.027554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.027622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.027656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.027732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.027750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.027678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.028192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.028283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.028606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.028693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.028920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.029097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.029269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.029329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.029379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.029411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.029535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.029550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.029712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.029745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.030082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.030195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.030343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.030342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.030659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.031124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.031241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.031605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.032255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.032971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.033304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.033332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.033536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.033607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.033679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.033798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.033929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.033935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.034006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.034155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.034306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.034459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.034749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.035546 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.035693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:01 crc kubenswrapper[4125]: E0312 13:32:01.035960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.892739 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:01 crc kubenswrapper[4125]: I0312 13:32:01.893087 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.025152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.025439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.025518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.025543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.025644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.026165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.026179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.026393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.026524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.026774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.026786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.027170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.027249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.025068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.029377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.030437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.031517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.032705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.033326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.033725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.036254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.037102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.037733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.037745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.038195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.038200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.038433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.048204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.052575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.053181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.053427 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:02 crc kubenswrapper[4125]: E0312 13:32:02.054433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.065597 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.104391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.139111 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.165368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.193495 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.219178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.258494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.289739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.322003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.342369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.379391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.412382 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.438411 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.461957 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.487385 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.520693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.546255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.579626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.609454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.631940 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.660453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.691127 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.740254 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.778741 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.813638 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.859774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.885235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.887276 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.887367 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.914059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.936750 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.961326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:02 crc kubenswrapper[4125]: I0312 13:32:02.984946 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.009791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.051371 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.051599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.052065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.051992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.052237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.052384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.052504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.052632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.052652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.052758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.052770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.053322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.053536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.054067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.054108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.054160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.053154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.054374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.054440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.054550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.054658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.054742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.055068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.055144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.055249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.055314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.055372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.055384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.055460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.055589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.055701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.056386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.056721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.056991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.057224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.057417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.057546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.057554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.057588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.057964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.058224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.058403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.058616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.058678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.058780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.059167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.059411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.059531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.059617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.059749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.060105 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.060260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.060409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.060531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.060619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.060725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.060981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.061468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:03 crc kubenswrapper[4125]: E0312 13:32:03.061741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.075536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.100674 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.137488 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.156494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.174975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.205913 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.225158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.246159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.270143 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.299280 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.328527 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.354430 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.376684 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.407382 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.434753 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.467578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.500640 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.520917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.551157 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.577072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.596299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.614328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.643224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.666973 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.690702 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4153687131b9105a54b9325fcf0c65f4e7b1c108982744a42dbaf93067e96bc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:36Z\\\",\\\"message\\\":\\\"2026-03-12T13:29:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625\\\\n2026-03-12T13:29:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_813826f8-b82f-4b38-8ecf-54c24e913625 to /host/opt/cni/bin/\\\\n2026-03-12T13:29:50Z [verbose] multus-daemon started\\\\n2026-03-12T13:29:50Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:30:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.715207 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.737645 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.773004 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.807400 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.840567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.864673 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.888667 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.888929 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.898137 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.930468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:03 crc kubenswrapper[4125]: I0312 13:32:03.962320 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.025243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.025515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.025629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.025762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.025786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.026144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.026232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.026268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.026437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.026465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.026675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.027139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.027208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.027220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.027302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.027430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.027541 4125 scope.go:117] "RemoveContainer" containerID="bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.027777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.028078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.028146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.028182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.028342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.028598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.028651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.028979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.029283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.029530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.029780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.029975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.030180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.030736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.031322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.031498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.031676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.064235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.094408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.128647 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: E0312 13:32:04.139981 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.169104 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.204210 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.216490 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.252090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.286258 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.330148 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.359063 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.390163 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.425305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.462418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.493425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.531443 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.584502 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.623481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.661552 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.697250 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.765932 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.796322 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.822308 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.864202 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.882971 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.886087 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.886158 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.898862 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.914891 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.935845 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.955377 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.979161 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:04 crc kubenswrapper[4125]: I0312 13:32:04.996332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:04Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.025625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.026271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.026669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.026776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026863 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.026974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.027048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.027123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.027407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.026531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.027886 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.028640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.028702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.029337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.028798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028880 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028911 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028927 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.028946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.029004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.029050 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.029052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.029099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.029123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.029142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.029162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.029263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.030651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.030742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.030890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.031194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031898 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.031782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032775 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.032986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.033086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.033332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:05 crc kubenswrapper[4125]: E0312 13:32:05.033604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.047235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.074247 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.108226 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.132400 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.147123 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.165420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.181301 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.198980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.212309 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.227870 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.242785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.261601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.277332 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.296391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.312326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.327537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.342869 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.358233 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.376303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.392235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.419487 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.446791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.477477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.503678 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.536549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.573147 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.604613 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.633577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.665442 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.691958 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.725660 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.755166 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.776614 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.802995 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.838457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.861737 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.887075 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.887299 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.888447 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.921642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.947690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.972231 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:05 crc kubenswrapper[4125]: I0312 13:32:05.993310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:05Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.015390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.025605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.026132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.026504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.026743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.027202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.027439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.027731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.028147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.028517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.028763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.030186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.030483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.030693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.030887 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.031069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.031219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.031236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.031417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.032426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.032723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.033386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.033924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.034269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.035076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.036590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:06 crc kubenswrapper[4125]: E0312 13:32:06.037420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.050076 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.072947 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.089557 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.106373 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.122503 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.144235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.165600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.203962 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.224037 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.251741 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.275049 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.299298 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.329180 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.351079 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.376377 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.406293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.444068 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.470293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.509312 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.542422 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.565001 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.593955 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.621473 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.647245 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.674693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.702006 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.728774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.764975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.790476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.822512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.865512 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.886983 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.887756 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.904386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.940304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.970716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:06 crc kubenswrapper[4125]: I0312 13:32:06.999401 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:06Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.025739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.025905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.025939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.025986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.026321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.026594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.026652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.026715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.027126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.027169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.027508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027570 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.027977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.028100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.028395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.028522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.028554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.028646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.028655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.028744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.028791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.028932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.029256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.029378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.029418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.029447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.029507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.029662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.029903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.027782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.030409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.030511 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.030087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.030215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.030223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.030716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.031092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.031239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.031420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.031787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.031917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.031996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.032122 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.032216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.032664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.032792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.033185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:07 crc kubenswrapper[4125]: E0312 13:32:07.033460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.033469 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.056096 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.103053 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.130766 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.165678 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.208265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.245411 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.292300 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.332064 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.389591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.425162 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.465152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.488407 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.520982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.565340 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.607339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.645227 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.696258 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.726508 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.763047 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.802062 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.840108 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.881950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.887286 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.887383 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.922284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:07 crc kubenswrapper[4125]: I0312 13:32:07.960165 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:07Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.011524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.024722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.024935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.025165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.025330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.025436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025867 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.025970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.026112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.025976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.026330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.026387 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.026428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.026424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.026640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.026758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.027170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.027359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.027516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.027586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.027691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.027694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.028400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:08 crc kubenswrapper[4125]: E0312 13:32:08.029092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.042970 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.123774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:08Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.888803 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:08 crc kubenswrapper[4125]: I0312 13:32:08.889126 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.026205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.026311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.026566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.026574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.026266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.026205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.027099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.027110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.027263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.027287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.027358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.027642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.027749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.027978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.028234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.028556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.028737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.028934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.028967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.028990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.029122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.029173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.029262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.029324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.029589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.029698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.029722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.029995 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.030242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.030355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.030494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.030503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.030499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.030637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.030798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.031303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.031355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.031421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.031560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.031618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.031916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.033156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.033381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.031933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.031963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.032127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.032183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.032493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.032522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.032559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.033736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.032568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.032770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.033995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.034303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.034400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.034502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.034583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.034679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.034948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.035227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.035403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.035530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.037451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.083914 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.084282 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.084305 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.084331 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.084360 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:09Z","lastTransitionTime":"2026-03-12T13:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.108978 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.124647 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.124734 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.124771 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.124949 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.125105 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:09Z","lastTransitionTime":"2026-03-12T13:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.142245 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.160227 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.188704 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.188785 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.188920 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.188966 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.189004 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:09Z","lastTransitionTime":"2026-03-12T13:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.211204 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.259327 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.259734 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.259763 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.259882 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.260025 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:09Z","lastTransitionTime":"2026-03-12T13:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.286615 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.292449 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.292510 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.292526 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.292547 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.292577 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:09Z","lastTransitionTime":"2026-03-12T13:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.311324 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:09Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:09 crc kubenswrapper[4125]: E0312 13:32:09.311405 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.889140 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:09 crc kubenswrapper[4125]: I0312 13:32:09.889279 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.025308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.025440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.025629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.025672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.025742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.025966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.025294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.026171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.026221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.026325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.026458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.026537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.026608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.026719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.026758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.027089 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.027145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.027290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.027327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.027491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.027652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.027704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.027791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.027976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.028256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.028340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.028460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.028560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.028653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.029270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.029591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:10 crc kubenswrapper[4125]: E0312 13:32:10.029764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.246937 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.351545 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/4.log" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.356690 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7"} Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.357666 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.379219 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.400749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.431853 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.467990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.485211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.509349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.532141 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.551623 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.570503 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.602998 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.627985 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.661454 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.680553 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.701541 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.719095 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.732567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.747278 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.769923 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.789626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.806922 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.823325 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.838283 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.854725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.874975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.893269 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.893571 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.906617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.925467 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.942881 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.956185 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.972386 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:10 crc kubenswrapper[4125]: I0312 13:32:10.986752 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:10Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.011046 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.024994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025082 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.025703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.025723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.026136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.026158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.026127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.026335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.026364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.026409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.026524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.026571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.026626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.026646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.026728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.027234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.027241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.027303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.027514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.027740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.027977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.028254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.028966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029096 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.029710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.030941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.031228 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.031382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.031518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.031677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.031993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.032159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.032765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.060276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.083368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.101220 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.123044 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.142617 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.161785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.181372 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.206949 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.226739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.249665 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.275523 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.297611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.320230 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.344500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.366375 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/5.log" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.368244 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/4.log" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.375258 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.380473 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7"} Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.380568 4125 scope.go:117] "RemoveContainer" containerID="292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.382662 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.383127 4125 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" exitCode=1 Mar 12 13:32:11 crc kubenswrapper[4125]: E0312 13:32:11.383490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.399428 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.416736 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.440983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.458775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.480479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.499253 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.520662 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.551511 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.582194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.606748 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.641054 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.662152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.678867 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.706885 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.725571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.743610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.772158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.793612 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.809634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.827397 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.844605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.869496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.885858 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.885979 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.890106 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.911043 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.933145 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.950324 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.970651 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:11 crc kubenswrapper[4125]: I0312 13:32:11.992348 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.016919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.024967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.026194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.026782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.026939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.027126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.027340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.027415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.026403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.027525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.027690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.027944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.028171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.028340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.028598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.029000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.029194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.030227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.055708 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.080966 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.106103 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.140599 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.165520 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.192937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.228536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.259955 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.287387 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.311222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.344158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.374527 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.393241 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/5.log" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.405953 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:32:12 crc kubenswrapper[4125]: E0312 13:32:12.406750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.422937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.452336 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.477354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.509954 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.554544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://292e4b4b3787d5168485268d70f825cc0898058cd7286894df26280f0b158360\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:30:37Z\\\",\\\"message\\\":\\\"12 13:30:37.836443 18455 handler.go:203] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0312 13:30:37.836450 18455 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:30:37.836451 18455 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:30:37.836457 18455 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:30:37.836476 18455 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:30:37.836487 18455 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:30:37.836461 18455 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:30:37.836497 18455 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:30:37.836356 18455 reflector.go:295] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0312 13:30:37.836681 18455 reflector.go:295] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:159\\\\nI0312 13:30:37.836774 18455 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:30:37.836886 18455 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.584291 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.610365 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.635039 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.675597 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.698171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.717790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.738584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.760568 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.789789 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.814176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.833189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.853926 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.885736 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.885929 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.889389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.911657 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.937153 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.964758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:12 crc kubenswrapper[4125]: I0312 13:32:12.983102 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.009135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.027004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.028968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.028986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.029378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.030155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.030312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.030450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.030472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.030547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.031995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.032004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.032158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.032299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.032572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.033408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.033617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.034357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.034516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.034585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.034711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.035124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.035131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.035314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.035459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.035929 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.035963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.036984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.035722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.037089 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.037172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.037252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.037337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.037341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.037748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.038106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.038248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.038447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.038611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.038789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.039915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.041204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.041683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.041876 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.042078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.042359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.042689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.042935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.043222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.043430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.043536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.043707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.044214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.044281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.044544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.044667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.044941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.045113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.045249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.046054 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.046341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.047365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.048143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.048699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:13 crc kubenswrapper[4125]: E0312 13:32:13.048954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.050135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.080402 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.113792 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.138136 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.160744 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.188056 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.209362 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.229915 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.259080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.286363 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.311125 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.339757 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.363339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.394196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.427392 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.455418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.505140 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.542409 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.581407 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.629080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.669722 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.702996 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.736112 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.783633 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.823270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.861905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.887799 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.888089 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.901962 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.942412 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:13 crc kubenswrapper[4125]: I0312 13:32:13.979255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.020416 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.025989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.026305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.026576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.026786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.027230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.027481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.027758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.028120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.028404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.028636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.029072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.029313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.029552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.029758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.030172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.030391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.030668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.030973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.031725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.031308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.032301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.032803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.033353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.033529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.033663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.033795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.072668 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.101991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: E0312 13:32:14.144354 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.146123 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.173224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.220224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.263449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.303327 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.341072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.380529 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.427716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.459162 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.498749 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.539473 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.580419 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.627055 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.660696 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.699579 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.751716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.819479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.852194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.879920 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.888747 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.888976 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.912082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.932527 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:14 crc kubenswrapper[4125]: I0312 13:32:14.972351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:14Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.014917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.025636 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.026031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.026734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.027247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.027407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.027504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.027544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.027884 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028038 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.028162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.028308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.028480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.028639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.028952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.029114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.029241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.029623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.029664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.029729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.029896 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.030312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.030542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.026750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.027185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.031628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.031997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.032251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.032543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.032608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.033096 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.033392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.033499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.033593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.034092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.034404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.032135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.027894 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.032369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.032482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.032920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033865 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.033928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:15 crc kubenswrapper[4125]: E0312 13:32:15.037476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.053233 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.097051 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.137379 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.175564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.215572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.264570 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.303584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.345468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.399982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.430408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.463211 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.515310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.543103 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.587723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.623610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.656552 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.691693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.735301 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.778492 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.823293 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.863738 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.887758 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.888113 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.905275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.933630 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:15 crc kubenswrapper[4125]: I0312 13:32:15.992747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:15Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.027346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.027677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.027902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.028168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.028321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.028495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.028594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.028785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.028970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.029222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.029404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.029922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.029995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.030175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.030186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.030277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.030504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.030727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.030795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.031435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.031459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.032142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.032244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.032334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.032419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.032573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.032699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.033095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.033277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.033434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.033696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.034624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: E0312 13:32:16.034667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.055552 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.093464 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.143094 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.180675 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.223887 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.253125 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.294239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.331214 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.375261 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.416610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.455544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.498499 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.537539 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.572523 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.621790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.656428 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.693584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.736221 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.777317 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.821363 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.863690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.886891 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.887093 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.896765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.938391 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:16 crc kubenswrapper[4125]: I0312 13:32:16.980369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:16Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.025652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.025709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.026120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.026284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.026479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.026680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.026913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.027001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.027185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.027312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.027387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.027481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.027524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.027591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.027664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.027751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.027903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.028058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.028251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.028431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.028585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.028643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.028742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.028971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.029187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.029340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.029457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.029568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.029787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.030039 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.030173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.030294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.030355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.030502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.030601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.030648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.030766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.030907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.031113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.031460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.031602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.031702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.031795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.032160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.032253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:17 crc kubenswrapper[4125]: E0312 13:32:17.032329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.040181 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.067290 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.104468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.134720 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.179381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.217097 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.275942 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.319789 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.351536 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.369982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.413213 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.451598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.493520 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.539453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.571255 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.614264 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.655229 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.696445 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.753120 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.778637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.826173 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.859215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.886722 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.887076 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.902470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.936270 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:17 crc kubenswrapper[4125]: I0312 13:32:17.988077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:17Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.026545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.026743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026922 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.027139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.027202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.027271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.027315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.027285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.027445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.026210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.027506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.027661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.027674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.027971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.028237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.028629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.029127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.029160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.029291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.029307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.029410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.029553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.029732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.030143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.030310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.030477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.030584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: E0312 13:32:18.030733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.068785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.101220 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.157130 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.191050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.225448 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.264334 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.304891 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.345189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.382406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.432348 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.480050 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.504577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.535571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.587326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.620359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.658208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.695547 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.735775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.779578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.819773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.856906 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.886668 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.886793 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.902178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.940213 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:18 crc kubenswrapper[4125]: I0312 13:32:18.986167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.021091 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.025603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.025660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.025668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.025890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.025951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.025612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.026245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.026391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.026577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026728 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.026741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.026923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.026980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.027124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.027315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.027585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027878 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.027912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.028149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.028315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.028486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.028656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.028743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.028794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.029253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.029351 4125 scope.go:117] "RemoveContainer" containerID="bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.029525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.029687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.029968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.030072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.030213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.030330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.030386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.030494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.030533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.030598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.030716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.030998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.031242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.031605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.031697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.031919 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.032167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.032307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.032437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.032574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.032704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.032958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.033236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.033378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.033506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.033628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.064600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.099459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.146316 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.612678 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.613535 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.613623 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.613669 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.613710 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:19Z","lastTransitionTime":"2026-03-12T13:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.647795 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.659094 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.659279 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.659333 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.659384 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.659608 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:19Z","lastTransitionTime":"2026-03-12T13:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.689414 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.698717 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.699001 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.699197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.699730 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.700302 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:19Z","lastTransitionTime":"2026-03-12T13:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.731573 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.738978 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.739177 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.739214 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.739253 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.739304 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:19Z","lastTransitionTime":"2026-03-12T13:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.768496 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.777179 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.777284 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.777315 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.777355 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.777390 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:19Z","lastTransitionTime":"2026-03-12T13:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.811481 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:19 crc kubenswrapper[4125]: E0312 13:32:19.811626 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.887120 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:19 crc kubenswrapper[4125]: I0312 13:32:19.887263 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.026698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.026974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.027113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.027287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.027287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.027436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.027450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.027944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.028194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.028371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.028454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.028614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.028652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.029101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.029125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.029488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.029565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.029725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.029805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.030122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.030293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.030631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.031196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.031417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:20 crc kubenswrapper[4125]: E0312 13:32:20.031691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.889305 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:20 crc kubenswrapper[4125]: I0312 13:32:20.889472 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.025613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.025718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.026269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.025638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.026659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.026697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.027094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.027133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.027231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.027331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.027331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.027391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.027471 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.027589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.027668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.027997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.028279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.028393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.028554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.028587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.028805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.029074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.029156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.029252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.029459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.029515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.029622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.030089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.030605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.031082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.031307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.031435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.031366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.031446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.031617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.031625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.031807 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.032269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.032287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.032329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.032367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.032378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.032126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.032615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.032793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.034449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.034944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.034995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.035257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.035413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.035577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.035695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.036259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.036410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.036600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.036754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.037000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:21 crc kubenswrapper[4125]: E0312 13:32:21.037214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.886984 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:21 crc kubenswrapper[4125]: I0312 13:32:21.887279 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.026567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.026666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.026716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.026980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027376 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.027610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.027713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.027983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.028059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.028113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.028166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.028210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.028211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.028438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.028552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.028669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.028798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.029918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:22 crc kubenswrapper[4125]: E0312 13:32:22.030123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.046759 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.070669 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.094214 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.119479 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.140382 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.155165 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.174350 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.196482 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.219671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.237882 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.257577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.276373 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.293529 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.310694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.334224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.358363 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.383771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.402081 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.418257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.438171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.453388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.477877 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.497484 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.514276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.533415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.556951 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.577905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.610654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.626175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.650674 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.665779 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.686988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.706939 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.723736 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.753192 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.775350 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.793727 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.814248 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.839218 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.864279 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.885592 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.886948 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.887365 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.906227 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.936575 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.960349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:22 crc kubenswrapper[4125]: I0312 13:32:22.989438 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.014988 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.025468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.025531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.025666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.025476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.025994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.026221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.026403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.026540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.026748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.026752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.025474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.027211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.027469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027882 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.028078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.028198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.028253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.027957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.028508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.028595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.028736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.028792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.028383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.029134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.029265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.029386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.029481 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.029600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.029693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.030056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.030203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.030065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.030394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.030478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.030589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.030629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.030662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.030794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.031076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.031217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.031328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.031458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.031583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.031631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.031667 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.032664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.031699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.027951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.031985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.033416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.032128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.032260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.032449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.032973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.031924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.033205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:23 crc kubenswrapper[4125]: E0312 13:32:23.033899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.048785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.074971 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.095758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.123998 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.150455 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.174959 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.195404 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.216800 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.235736 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.252729 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.272427 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.297359 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.314778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.340950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.369524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.390328 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.407351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.429793 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.457677 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.485275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.507366 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.888640 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:23 crc kubenswrapper[4125]: I0312 13:32:23.889923 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.025462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.025976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.025998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.026342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.026372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.026565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.026608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.026745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.025489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.027068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.027167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.027423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.027474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.027785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.028092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.028211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.025563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.028380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.028279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.028596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.028955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.029502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.030703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.031420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.031714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.031964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.031754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.032261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.032688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.033268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.034222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.034962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.035326 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.036468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:32:24 crc kubenswrapper[4125]: E0312 13:32:24.148123 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.886804 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:24 crc kubenswrapper[4125]: I0312 13:32:24.887523 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.026041 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.026417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.026568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.026677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.027099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.027285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.027108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.027396 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.027537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.027619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.027940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.027994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.028455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.029547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.029562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.030115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.029658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.029961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.029973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.030731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.030942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.031130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.031455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.031131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.031609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.031295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.031472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.031507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.031797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.031991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.032081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.032155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.032434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.032453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.032569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.032641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.032724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.032999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.033651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.033106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.033766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.033268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.033327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.033991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.033402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.033465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.034712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.034794 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.035165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.035303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.035400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:25 crc kubenswrapper[4125]: E0312 13:32:25.035490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.887724 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:25 crc kubenswrapper[4125]: I0312 13:32:25.888159 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.025739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.026544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.026726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.026808 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.027098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.027216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.027290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.027296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.027551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.028093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.028935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.029352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.029362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.029406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.029575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.029668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.030962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.029947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.030151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.030353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:26 crc kubenswrapper[4125]: E0312 13:32:26.030472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.889721 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:26 crc kubenswrapper[4125]: I0312 13:32:26.890147 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.025973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.026066 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.029508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.029961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.031942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.032116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.032385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.032438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.032732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.032744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.032987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.033116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.033319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.033416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.033480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.033633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.033696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.034744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.038486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:27 crc kubenswrapper[4125]: E0312 13:32:27.035511 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.887222 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:27 crc kubenswrapper[4125]: I0312 13:32:27.887452 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.025682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.026189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.026453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.026647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.026679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.027102 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.027152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.027200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.027260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.027368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.027380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.027474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.027518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.027645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.027720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.027776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.027794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.028252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.028649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.028909 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.028976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.029175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.029436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:28 crc kubenswrapper[4125]: E0312 13:32:28.029552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.888158 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:28 crc kubenswrapper[4125]: I0312 13:32:28.888344 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025672 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.026199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.026336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.026449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.026546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.026733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.026675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.026154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.027143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.027152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.027247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.027151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.027287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.027595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.027633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.027723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.027745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.028104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.028229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.028348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.028539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.028630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.028685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.028732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.028915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.028957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.029035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.029272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.029314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.029756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029759 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.029910 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.029998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.030936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.031205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.150370 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.834000 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.834310 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.834348 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.834384 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.834422 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:29Z","lastTransitionTime":"2026-03-12T13:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.866146 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.874321 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.874424 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.874455 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.874495 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.874532 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:29Z","lastTransitionTime":"2026-03-12T13:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.893461 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.893648 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.914365 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.925524 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.925634 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.925665 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.925702 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.925747 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:29Z","lastTransitionTime":"2026-03-12T13:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.956284 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.970365 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.970722 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.970777 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.970948 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:29 crc kubenswrapper[4125]: I0312 13:32:29.971092 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:29Z","lastTransitionTime":"2026-03-12T13:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:29 crc kubenswrapper[4125]: E0312 13:32:29.999662 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:29Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.008309 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.008650 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.008956 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.009211 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.009458 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:30Z","lastTransitionTime":"2026-03-12T13:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.025719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.026176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.026259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.026476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.026812 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027071 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.028360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.028424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.027551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.027317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.030689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.030927 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.030969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.030984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.030472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.031166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.031278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.031412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.031545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.031660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.032585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.034383 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:30Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:30 crc kubenswrapper[4125]: E0312 13:32:30.034662 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.888600 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:30 crc kubenswrapper[4125]: I0312 13:32:30.888750 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.026078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.026282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.025571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.026489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.026965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.027076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.027747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027806 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.028128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.027224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.028212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.028332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.027498 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.027602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.028471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.028137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.028677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.028759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.028791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.029090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.029237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.029535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.029731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.030161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.030308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.030383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.030747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.030799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.030988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.031180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.031219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.031357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.031471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.031674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.031899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.032092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.032287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.032380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.032430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.032519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.032942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.033158 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.033310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:31 crc kubenswrapper[4125]: E0312 13:32:31.033505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.425253 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.425494 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.425543 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.425594 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.425651 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.887765 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:32:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:32:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:32:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.888092 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.888177 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.890181 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a"} pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" containerMessage="Container router failed startup probe, will be restarted" Mar 12 13:32:31 crc kubenswrapper[4125]: I0312 13:32:31.890289 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" containerID="cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a" gracePeriod=3600 Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.026045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.026269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.028176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.028301 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.028362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.028264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.028315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.029488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.030534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.031152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.031969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.032283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.032586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.029741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.029900 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.029996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.030070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.035461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.030097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.030156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.030255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.030329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.036448 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.031443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.033100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:32 crc kubenswrapper[4125]: E0312 13:32:32.035766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.051414 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.068732 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.088690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.110137 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.135327 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.159658 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.177341 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.209964 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.228319 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.246226 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.268176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.284029 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.304062 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.327917 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.345289 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.363507 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.383112 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.405380 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.423284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.447517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.480572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.498799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.521612 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.542353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.560699 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.587226 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.610066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.629284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.648218 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.688382 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.708980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.732683 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.756271 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.776174 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.792557 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.809226 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.826393 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.847553 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.864452 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.890673 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.909589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.923585 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.946691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.967883 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:32 crc kubenswrapper[4125]: I0312 13:32:32.990796 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.005251 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.020031 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025511 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025857 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025899 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.025959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026104 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026657 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026773 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.026910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.026986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.027072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.027084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.027112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.027482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027896 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.027995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.028032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028122 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.028589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028897 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.028966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.029063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.029123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:33 crc kubenswrapper[4125]: E0312 13:32:33.029197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.036254 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.051306 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.070763 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.101358 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.118781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.139426 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.157330 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.177606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.252561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.285598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.315937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.334938 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.364322 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.387771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.407742 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.434406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.456118 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.481613 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.508059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:33 crc kubenswrapper[4125]: I0312 13:32:33.528205 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.025331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.026297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.026596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.027170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.027378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.027426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.027610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.027628 4125 scope.go:117] "RemoveContainer" containerID="bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.027663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.027794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.028112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.028154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.028376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.028675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.028752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.028939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.029153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.029938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.030190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.030357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.030489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.030672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.030928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.031110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.031276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.031980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:34 crc kubenswrapper[4125]: E0312 13:32:34.152626 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.524348 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/3.log" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.524522 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb"} Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.555551 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.591943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.629446 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.674381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.725496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.756610 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.785639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.809318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.832976 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.863862 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.889979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.922635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.954059 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.975425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:34 crc kubenswrapper[4125]: I0312 13:32:34.995654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.021113 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.024680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.024727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.024776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.024912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.024970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.025931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026122 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026289 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.026889 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.026978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.027179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.027225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.027305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027666 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.027987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.028094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.028177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:35 crc kubenswrapper[4125]: E0312 13:32:35.028243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.038544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.054210 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.069284 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.085970 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.110535 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.125242 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.140934 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.161871 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.178634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.192660 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.210427 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.229175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.249345 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.264110 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.284084 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.300254 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.318420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.336510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.355591 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.374305 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.401966 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.418063 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.439468 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.458329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.479177 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.497639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.516627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.551788 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.572662 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.596626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.621282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.639074 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.658802 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.679329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.699070 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.719878 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.740925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.756774 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.780225 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.814103 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.839189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.861228 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.886970 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.913408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.947620 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.972747 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:35 crc kubenswrapper[4125]: I0312 13:32:35.996257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:35Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.014026 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025586 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.025646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.025383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.026154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.026356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026890 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.026909 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.026925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.027191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.027774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.027963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.028214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.028289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.028401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.028534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.028670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.028771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.028950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.029123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.033142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:36 crc kubenswrapper[4125]: E0312 13:32:36.033182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.048095 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.061767 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:36 crc kubenswrapper[4125]: I0312 13:32:36.076924 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:36Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.024973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.026681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025438 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.025657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:37 crc kubenswrapper[4125]: I0312 13:32:37.026226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.027559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.027685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.029395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.029733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.030105 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.030281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.030452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.030725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.030980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.031323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.031602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.031792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.032361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.032580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.032975 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.033431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.033594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.033752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.034078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.034251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.034397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.034555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.034668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.034940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.035169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.035324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.035459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.035591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.035702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.035951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:37 crc kubenswrapper[4125]: E0312 13:32:37.036178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.025939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.026476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.026740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.026992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.027069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.027224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.027371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.027421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.027480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.027630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.027700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.027786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.027936 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.027960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.027953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.028178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.028276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.028442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.028625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.028964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.029242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.029419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.029947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.030273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.030381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.030792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:38 crc kubenswrapper[4125]: I0312 13:32:38.033621 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:32:38 crc kubenswrapper[4125]: E0312 13:32:38.034525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.026577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.026639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.026730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.026785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.026918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.026946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.027058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027082 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.027350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.027692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027897 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027928 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.027980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.028098 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.028370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.028545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.028788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.028920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.029074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.029208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.029404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.032227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.032307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.032425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.032462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.032517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.032623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.032628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.032734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.032939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.032991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.033051 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.033191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.033224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.033291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.033372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.033720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.033804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.035591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.035983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.036214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.036440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:39 crc kubenswrapper[4125]: I0312 13:32:39.036555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.036661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.036764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.036941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.037152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.037335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.044556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.051795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:39 crc kubenswrapper[4125]: E0312 13:32:39.160541 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.025631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.026226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.026318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.026400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.026517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.026579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.026673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.026788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.027445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.027497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.027688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.028182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.028297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.028433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.028626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.028714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.029151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.029198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.029379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.029605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.029680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.030131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.030172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.030271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.030530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.030524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.031473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.032151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.032599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.033080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.033428 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.322108 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.322205 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.322233 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.322267 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.322302 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:40Z","lastTransitionTime":"2026-03-12T13:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.338643 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.346497 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.346696 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.346806 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.346984 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.347130 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:40Z","lastTransitionTime":"2026-03-12T13:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.379949 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.389337 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.389762 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.390299 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.390548 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.390939 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:40Z","lastTransitionTime":"2026-03-12T13:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.415192 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.422736 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.423370 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.423651 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.424316 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.424559 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:40Z","lastTransitionTime":"2026-03-12T13:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.445910 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.455356 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.455457 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.455489 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.455526 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:40 crc kubenswrapper[4125]: I0312 13:32:40.455565 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:40Z","lastTransitionTime":"2026-03-12T13:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.488780 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:40Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:40 crc kubenswrapper[4125]: E0312 13:32:40.488937 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.025649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.025944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.026072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.026272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.026330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.026425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.026505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.026770 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.026785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027084 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.025686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027257 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.027344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.026790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.027582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.027755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.027933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.028087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.028117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.028308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.028384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.028518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.028618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.028650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.028720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.029157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.029302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.029328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.029704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.029779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.029779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.030256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.030523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.030744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.030931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.030941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.031445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.031528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.031603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.031714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.031931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.032071 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.032152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.032288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.032433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.032536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.032651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:41 crc kubenswrapper[4125]: I0312 13:32:41.032748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.033128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.033268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.033444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.033587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.033948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.034240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.034638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:41 crc kubenswrapper[4125]: E0312 13:32:41.035116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.025413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.025532 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.025464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.026202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.026291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.026480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.026564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.026667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.026713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.026783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.026973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.027087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.027159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.027243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.027284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.027390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.027492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.027589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.027681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.027729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.027802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.027939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.028143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.028189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.028237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.028357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.028996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.029141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.029345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.029591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.029803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:42 crc kubenswrapper[4125]: E0312 13:32:42.030290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.056434 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.079237 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.103801 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.156524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.189543 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.227639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.250996 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.273178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.297443 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.332580 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.357684 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.386147 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.409717 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.443232 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.473239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.503537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.534507 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.569154 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.611443 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.641661 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.678529 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.707370 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.736639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.763705 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.794799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.821702 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.850186 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.870272 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.891370 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.907898 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.923938 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.947919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.965754 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:42 crc kubenswrapper[4125]: I0312 13:32:42.988690 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.011104 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.025530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025537 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.025779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.025265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.026125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.026209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.026127 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.026211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.027163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.027403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.027919 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.028081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.028214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.028297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.028329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.028388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.028388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.029214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.032244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.032621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.033717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.034506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.035150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.035433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.035730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.036107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.036406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.036577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.037554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.038063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.038737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.038799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.039213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.039362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.041174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.041725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.042208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.042264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.042504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.042591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.042730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.043344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.044185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.044345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.044754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.044955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.044967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.045132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.045285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.045429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.045639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.045803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.045997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.045944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.046151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.046310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:43 crc kubenswrapper[4125]: E0312 13:32:43.046407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.067659 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.088797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.107932 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.131439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.154940 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.174408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.194517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.218087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.286197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.314739 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.347258 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.379558 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.414634 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.438627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.454257 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.468144 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.482618 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.500740 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.514785 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.537477 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.562943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.585484 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.611776 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.636799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.661329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.684084 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.704787 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.734158 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.772677 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.804286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:43 crc kubenswrapper[4125]: I0312 13:32:43.835282 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.025996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.026424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026096 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.026694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.026092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.027104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.027180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.027318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.027194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.027430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.027473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.027648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.027723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.027959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.029337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.028095 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.028326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.028548 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.028618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.028979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.029127 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.029239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.029962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:44 crc kubenswrapper[4125]: I0312 13:32:44.030345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.030686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.030789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.031059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:44 crc kubenswrapper[4125]: E0312 13:32:44.162952 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.024766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.025338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025109 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.025558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.025635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.030904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.025634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.032678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.032939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.033275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.033402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.033445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.033917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.030929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.030967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.034198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.034400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.031170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.037419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.031721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.031920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.032103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.032139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.032243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.032500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.035363 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.035436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.035467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.035499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.035528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.035617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:45 crc kubenswrapper[4125]: I0312 13:32:45.035686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.035800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.036619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.036971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.039935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.040407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.040724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.041105 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.042214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.042218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.042764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.043281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.043573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.043936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.044166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.044376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.044601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.045216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:45 crc kubenswrapper[4125]: E0312 13:32:45.045401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.026340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.026686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.027720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.027971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.028299 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.028416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.028589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.028664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.028691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.028738 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.028775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.029060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.029160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.029183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.029266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.029365 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.029503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.029638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.029688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.029985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.030234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.030439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.030575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.030806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:46 crc kubenswrapper[4125]: I0312 13:32:46.031938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:46 crc kubenswrapper[4125]: E0312 13:32:46.032635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.025619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.026720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.025969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026111 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.027208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.025654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.026369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.027490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.027739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.027961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.028256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.028426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.028669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.028747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.028762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.029133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.028968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.029374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.029554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.029630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.029801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.030102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.030135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.030336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.031000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.031078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.031202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.031255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.031399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:47 crc kubenswrapper[4125]: I0312 13:32:47.031529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.031711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.032161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.032382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.032611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.032942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.033362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.033784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.033962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.034533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.035159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.035256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.035435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.035783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:47 crc kubenswrapper[4125]: E0312 13:32:47.036342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026452 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.026490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.026770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.026968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.027157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.027199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.027424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.027431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.027517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.027613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.027735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.027966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.028198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.028357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.028460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.028594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.028657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.028948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:48 crc kubenswrapper[4125]: I0312 13:32:48.029065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.029237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.029342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.029504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.029684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.029798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:48 crc kubenswrapper[4125]: E0312 13:32:48.030108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.025927 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.026715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.027254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.026106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.027273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.027338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.027345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.027402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.027492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.027622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.027736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.027991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.028158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.028191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.028594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.028670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.028696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.028960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.029163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.029190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.029352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.029453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.029473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.029563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.029619 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.029702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.029783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.030003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.030137 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.030455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.030758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.031195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.031334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.031591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.031718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.031781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.032215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.032345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.032431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.032526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.032569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.032658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.032724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.032745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.033123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.033295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:49 crc kubenswrapper[4125]: I0312 13:32:49.033389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.033744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.034370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.034601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.034763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.034939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.035470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:49 crc kubenswrapper[4125]: E0312 13:32:49.169608 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.026588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027191 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.027521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.029325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.029592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.029649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.029783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.030294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.030464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.030624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.030767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.031056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.031222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.031353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.031695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.032163 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.032449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.032697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.581339 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.581766 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.581949 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.582056 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.582124 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:50Z","lastTransitionTime":"2026-03-12T13:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.604801 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.614944 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.615146 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.615181 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.615218 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.615270 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:50Z","lastTransitionTime":"2026-03-12T13:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.641608 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.653896 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.653972 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.653993 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.654059 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.654102 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:50Z","lastTransitionTime":"2026-03-12T13:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.672912 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.679114 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.679203 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.679227 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.679254 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.679290 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:50Z","lastTransitionTime":"2026-03-12T13:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.698557 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.706350 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.706433 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.706454 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.706482 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:32:50 crc kubenswrapper[4125]: I0312 13:32:50.706518 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:32:50Z","lastTransitionTime":"2026-03-12T13:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.725507 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:50Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:50 crc kubenswrapper[4125]: E0312 13:32:50.725581 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.027690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.027933 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.028341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.028717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.028890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.029273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.029330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.029280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.029514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.029514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.029565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.029590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.029769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.029788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.030213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.030228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.030265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.030411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.030531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.030542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.030614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.030630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.030749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.031274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.031110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.031447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.031512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.031652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.031737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.032219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.032554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.032736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.033235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.033556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:51 crc kubenswrapper[4125]: I0312 13:32:51.033629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.033936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.034399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.034645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.034787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.035138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.035300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.035451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.035596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.035772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.036070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.036450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.036537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.036699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:51 crc kubenswrapper[4125]: E0312 13:32:51.037065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.026919 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.027115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.027284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.027422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.027575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.029206 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.029476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.029883 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.029906 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.029962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.030206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.031388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.031501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.031717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.031802 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.032109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.030597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.030744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.032305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.032474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.032622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.032750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.033060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.033229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.033367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.033567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:52 crc kubenswrapper[4125]: E0312 13:32:52.033806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.094420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.134067 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.170121 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.200373 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.229470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.272688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.308766 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.359666 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.387784 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.417608 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.442688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.470310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.495538 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.534961 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.571358 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.614089 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.647975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.676152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.710475 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.736390 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.766997 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.800437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.826193 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.861564 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.894151 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.924307 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.954083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:52 crc kubenswrapper[4125]: I0312 13:32:52.980152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.012717 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.025989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.025933 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.026957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.027588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.029120 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.029431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.029717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.029780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.030110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.030307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.030514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.031201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.031661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.032381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.032421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.032616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.032800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.033223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.033640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.033905 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.034176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.034346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.034527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.034996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.034779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.035221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.035374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.035532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.035651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.035925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.036061 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.036194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.036308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.036399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:53 crc kubenswrapper[4125]: E0312 13:32:53.036482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.043461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.076214 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.113916 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.140639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.164400 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.188178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.214659 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.243645 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.277494 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.350887 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.377678 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.413640 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.436308 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.465075 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.498246 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.532521 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.574279 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.613942 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.651185 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.677399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.706152 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.736264 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.780304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.809769 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.846703 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.870303 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.897989 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.922975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.948773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.966483 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:53 crc kubenswrapper[4125]: I0312 13:32:53.993212 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.023066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.025512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.026323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.026437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.026509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.026784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025927 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.027185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.025985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.026052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026074 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.026112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.028916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.029347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.029787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.030133 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.030261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.030332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.030410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.030783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.031325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.043551 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.061312 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.081357 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.115088 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.143048 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:54 crc kubenswrapper[4125]: E0312 13:32:54.172122 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:32:54 crc kubenswrapper[4125]: I0312 13:32:54.174972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:32:54Z is after 2024-12-26T00:46:02Z" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.027561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.027710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.027996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.028148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.028281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.028473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.028533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.028581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.028744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.028938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.029091 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.029152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.029254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.029286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.029264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.029391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.029409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.029458 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.029918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.030085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.030111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.030304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.031503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.031659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.031695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.031765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.031795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.032165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.032406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.032507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.032647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.032968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.033472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.033625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.033782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.034079 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.034941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.034981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.035201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.035528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.034989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.035157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.036510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.036576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.036637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.036694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.036514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.036451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.035390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.036988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.037391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.037762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.038169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.038359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.038542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:55 crc kubenswrapper[4125]: I0312 13:32:55.039403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.039581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.039493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.039698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.039922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.040158 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.040307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.040485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:55 crc kubenswrapper[4125]: E0312 13:32:55.040621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.025520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.025915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.025980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.026085 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.025985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.025998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.026274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.026626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.026718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.026795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.027114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.027198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.027380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.027472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.027570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.027981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:56 crc kubenswrapper[4125]: I0312 13:32:56.028095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.028045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.028197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.028272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.028336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.028425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:56 crc kubenswrapper[4125]: E0312 13:32:56.029594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.026359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.026502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.026662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.026686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.026791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027376 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.027974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.028915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.029078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.029168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.029232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.029538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.029603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.029620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.029948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.030160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.030205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.030614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.030692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.030623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.031235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.028942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.031699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.031734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032700 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.032768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.033096 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.033121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.033123 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.033143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:57 crc kubenswrapper[4125]: I0312 13:32:57.033243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.033337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.033490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.033620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.034299 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.034459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.034520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.035152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.034546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.035086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.035138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.035243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.035384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:57 crc kubenswrapper[4125]: E0312 13:32:57.035555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026546 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.026682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.027245 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.027501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.027600 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.027611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.027742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.027747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.027978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.028326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.028373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.028459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.028640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.028952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.028972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.029164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.029277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.029365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.029579 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:32:58 crc kubenswrapper[4125]: I0312 13:32:58.029794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.030215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.030552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.030691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.030963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.031456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:32:58 crc kubenswrapper[4125]: E0312 13:32:58.031659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.025496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.025915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.026233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.026329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.026407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.026605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.026729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.026985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.027140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.027205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.027384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.027491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.027599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.027723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.028060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.028250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.028766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.029157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.029577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.029745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.031070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030267 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.031260 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.031636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:32:59 crc kubenswrapper[4125]: I0312 13:32:59.030993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.031726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.032281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.032563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.032669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.033086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.033273 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.033444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.033574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.033729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.034002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.034258 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.034461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.034960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.035119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.035287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.035514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.035780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.036105 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.036966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.038707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:32:59 crc kubenswrapper[4125]: E0312 13:32:59.174510 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.025931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.026119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.026352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.026380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.026655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.026774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.026995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.027156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.027172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.027258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.027307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.027438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.027503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.027646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.027722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.027756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.027995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.028337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.028499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.028642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.028674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.028962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.029247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.029316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.029371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.029430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.029783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.029969 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.030151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.030235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.030350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.903348 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.903502 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.903542 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.903582 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.903631 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:00Z","lastTransitionTime":"2026-03-12T13:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.946575 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.955611 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.955716 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.955752 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.955791 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.955935 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:00Z","lastTransitionTime":"2026-03-12T13:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:00 crc kubenswrapper[4125]: E0312 13:33:00.988254 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:00Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.997528 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.997716 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.997760 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.997924 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:00 crc kubenswrapper[4125]: I0312 13:33:00.997999 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:00Z","lastTransitionTime":"2026-03-12T13:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.025322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.025595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.025779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.025988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.026184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.026362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.026580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.026613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.026726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.027149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.027205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.027415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.027556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.027594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.027702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.027418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.028072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.028462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.028662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.028801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.029174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.029269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.029675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.029994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.030157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.030176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.030260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.030371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.030574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.030611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.030705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.030749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.030967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.031208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.031284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.031627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.031656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.031746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.032212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.032432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.032634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.032710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.033000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.035111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.036189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.036295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.040121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.041649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.042581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.043524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.044373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.045126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.052201 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.061502 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.062001 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.062197 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.062237 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.062283 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:01Z","lastTransitionTime":"2026-03-12T13:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.094458 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.110640 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.110955 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.110992 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.111088 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:01 crc kubenswrapper[4125]: I0312 13:33:01.111144 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:01Z","lastTransitionTime":"2026-03-12T13:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.149654 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:01Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:01 crc kubenswrapper[4125]: E0312 13:33:01.150158 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.025177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.025270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.025219 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.025574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.025675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.025776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.026058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.026308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.026450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.026626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.026715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.026892 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.027137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.027343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.027577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.027768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.028146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.028239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.028378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.028578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.028744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.029402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.029637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.030103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.030188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.030361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.030465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.030673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.030724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.030953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.031219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:02 crc kubenswrapper[4125]: E0312 13:33:02.031754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.053601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.082164 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.108782 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.138597 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.163549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.183987 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.201606 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.221115 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.239484 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.260517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.278288 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.302291 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.324644 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.357561 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.380352 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.413701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.441682 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.460879 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.485693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.507732 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.526087 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.549271 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.587235 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.609571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.632196 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.651079 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.673151 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.698728 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.730986 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.754513 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.778123 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.801585 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.829715 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.852379 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.879929 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.931488 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.976788 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:02 crc kubenswrapper[4125]: I0312 13:33:02.992338 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:02Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.011556 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.024892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025159 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025339 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.025854 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.025977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.026089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026118 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.026125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.026296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.026463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.026599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.026647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.026871 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.027295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.027324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.027389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.027960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.028137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.028262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.028332 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.028423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.028506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.028647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.028761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029075 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.029180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:03 crc kubenswrapper[4125]: E0312 13:33:03.029744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.037197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.053351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.069521 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.089183 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.107943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.125423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.149356 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.166882 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.182208 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.201230 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.223609 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.246481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.270431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.295418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.323746 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.341093 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.362719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.381712 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.405983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.423699 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.441001 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.457445 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.476639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.502759 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.523576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.545932 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.564481 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:03 crc kubenswrapper[4125]: I0312 13:33:03.583329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:03Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.026354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.026579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.026931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.026991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027116 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.027348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.027593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.027922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.027963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.028099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.028322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.028559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.028681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.028746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.028803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.029148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.029420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.029493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.029580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.029704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.029739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.030217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.030399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.030596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.031513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.032072 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:04 crc kubenswrapper[4125]: I0312 13:33:04.035132 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.037397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:33:04 crc kubenswrapper[4125]: E0312 13:33:04.177472 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.026656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.026773 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026111 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.026925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.027128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.027223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.027524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.026340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.025694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.027732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.027785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:05 crc kubenswrapper[4125]: I0312 13:33:05.027943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.028211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.028301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.028515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.028788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.029235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.029296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.029429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.029973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.030060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.030060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.030603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.030721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.030908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031870 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.032106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.031767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:05 crc kubenswrapper[4125]: E0312 13:33:05.032200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.024978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.025079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.025126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.025382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.025391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.025410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.025690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.025718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.025788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.026074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.026199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.026315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.026385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.026480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.026585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.026723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.026987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.027164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.027289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.027346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.027608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.027679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.028104 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.028097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.028309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.028456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.028517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.028632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:06 crc kubenswrapper[4125]: I0312 13:33:06.028687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.028960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.029188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:06 crc kubenswrapper[4125]: E0312 13:33:06.029310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.025759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.025909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026123 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026135 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.026240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.026383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.025799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.026554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.026726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.026908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.026991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.026991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.027139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.027394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.027584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.027852 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.027969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.028454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.028508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.028593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.028954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.027220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.029177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.029234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029459 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:07 crc kubenswrapper[4125]: I0312 13:33:07.029583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029890 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.029983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.030115 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.030338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:07 crc kubenswrapper[4125]: E0312 13:33:07.030462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.024906 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.025303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.025590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.025741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.026109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.026270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.026379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.026416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.026447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.026944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.027139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:08 crc kubenswrapper[4125]: I0312 13:33:08.027165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.027398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.027542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.027671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.028088 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.028282 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.028442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.028571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.028770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.029104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:08 crc kubenswrapper[4125]: E0312 13:33:08.029238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025286 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.025551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.025927 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.026095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.026136 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.026175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.026361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.026507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.026663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.026805 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.026968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.027115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.027261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.027262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.027404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.027586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.027690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.028328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.029922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.030069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.028468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.028599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.030309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.028776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.030530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.030698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.028977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.030966 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.029003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.029088 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.028998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.029108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.029126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.029194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.029372 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.029447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.032673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.032947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.033188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.033642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034163 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.034411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.035132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:09 crc kubenswrapper[4125]: E0312 13:33:09.178381 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.228117 4125 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 12 13:33:09 crc kubenswrapper[4125]: I0312 13:33:09.228238 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.025422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.025541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.025669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.025680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.026048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.026102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.026159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.026300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.026462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.026523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.026621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.026716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.026895 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.027091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.027156 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.027235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.027324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.027367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.027506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.027564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.027761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.028006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.028096 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.028151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.028246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.028351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.028495 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.028582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.028675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.028758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:10 crc kubenswrapper[4125]: I0312 13:33:10.029097 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:10 crc kubenswrapper[4125]: E0312 13:33:10.029276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.025314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.025380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.025549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.025564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.025631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.025745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.025770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.025920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.025977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026091 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.026174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.026334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.026457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.026500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.026690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.026799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.027008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.027199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.027324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.027375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.027457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.028318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.028481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.028538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.028633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.028731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.028922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.028981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.029123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.029180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.029265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.029345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.029450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.029534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.029631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.029676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.029771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.029939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.030081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.030242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.030360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.030550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030672 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.030763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.030904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.030990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.031147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.031233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.031290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.031361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.031443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.031524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.264474 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.264536 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.264556 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.264578 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.264608 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:11Z","lastTransitionTime":"2026-03-12T13:33:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.280892 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.286331 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.286414 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.286435 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.286463 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.286491 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:11Z","lastTransitionTime":"2026-03-12T13:33:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.315510 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.323249 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.323492 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.323587 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.323705 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.323872 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:11Z","lastTransitionTime":"2026-03-12T13:33:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.339456 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.347242 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.347283 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.347297 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.347316 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.347339 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:11Z","lastTransitionTime":"2026-03-12T13:33:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.363121 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.370571 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.370637 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.370653 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.370674 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:11 crc kubenswrapper[4125]: I0312 13:33:11.370696 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:11Z","lastTransitionTime":"2026-03-12T13:33:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.388670 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:11Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:11 crc kubenswrapper[4125]: E0312 13:33:11.389067 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.025241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.025592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.027348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.027290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.027638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.027739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.026394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.025629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.029357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.032497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.033719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.034706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.037085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.037556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.038484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.039489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.040561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.041139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.041609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.042056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.042412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.042797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.043065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.048201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.048374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.048383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.048441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.048765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.050111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.050693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.050993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:12 crc kubenswrapper[4125]: E0312 13:33:12.052091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.080516 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.110346 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.133600 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.151331 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.177331 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.207799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.228178 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.254349 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.278908 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.296966 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.368790 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.405576 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.427693 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.444904 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.459769 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.474403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.490573 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.508082 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.528982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.547654 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.575868 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.592900 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.609589 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.630407 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.651797 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.669376 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.692906 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.718649 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.738116 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.759286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.780517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.819732 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.838583 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.862179 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.884159 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.904574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.926525 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.950385 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.969471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:12 crc kubenswrapper[4125]: I0312 13:33:12.989531 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:12Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.007790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.024787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.024788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.024966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.024996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.025198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.025296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.025349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.025402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.025362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.025527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.025564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.025669 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.025746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.025990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.026113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.026193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.026268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.026324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.026532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.026635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.028455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.028492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.028577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.028795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.029503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029792 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029873 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.030042 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.029359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.030377 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:30:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.031495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.031715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.031859 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.031995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.032960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:13 crc kubenswrapper[4125]: E0312 13:33:13.033803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.048635 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.070091 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.089508 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.110203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.131431 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.148877 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.167248 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.192244 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.214755 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.239638 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.261036 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.283676 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.310688 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.332658 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.360718 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.415522 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.444672 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.469007 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.487355 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.504313 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.526334 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.542331 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.555437 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.579605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:13 crc kubenswrapper[4125]: I0312 13:33:13.606389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:13Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026414 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.026751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.026940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.026998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.027573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027894 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.027973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:14 crc kubenswrapper[4125]: I0312 13:33:14.028047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.028235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.028560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.028932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.028947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.028994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.029130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.029225 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:14 crc kubenswrapper[4125]: E0312 13:33:14.179983 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.027425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.027646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.027767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.028179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.028447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.028572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.028661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.028769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025470 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.028958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.029101 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025519 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.029402 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:15 crc kubenswrapper[4125]: I0312 13:33:15.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.025894 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.026270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.026344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.029555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.029620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.029727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.030410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.030643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.031139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.031440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.031509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.031605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.031946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.032914 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.032931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.032980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.033200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.033229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.033405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.033512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:15 crc kubenswrapper[4125]: E0312 13:33:15.033539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.025654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.025719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.026234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.026242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.026672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.026686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.027112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.027227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.027341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.027395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.027516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.027522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.027674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.027773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.028093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.028133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.028255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.028399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.028650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.028791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.029297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.029330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.029472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.029591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.031150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.031319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.031417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.031650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.031688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.032063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.032424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.033321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:16 crc kubenswrapper[4125]: I0312 13:33:16.033566 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:33:16 crc kubenswrapper[4125]: E0312 13:33:16.035437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.025963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.026532 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.026994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.027180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.027308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.027467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.027566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.027793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.027962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.028004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.028107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.028147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.028292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.028514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.028918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.029194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.029353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.029518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.029703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.029804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.030192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.030445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.030691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.030941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.030970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.031124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.031287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.031513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.031703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.031998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.032221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.032417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.032743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.033368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:17 crc kubenswrapper[4125]: I0312 13:33:17.033400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.033540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.033573 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.033729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:17 crc kubenswrapper[4125]: E0312 13:33:17.034093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.025774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.025978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026118 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.026574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.026770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026191 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.026223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027886 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.027968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.028014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.028082 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.028211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.028312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.028424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.028648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.028776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:18 crc kubenswrapper[4125]: E0312 13:33:18.029103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.816222 4125 generic.go:334] "Generic (PLEG): container finished" podID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerID="fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a" exitCode=0 Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.816371 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerDied","Data":"fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a"} Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.817319 4125 scope.go:117] "RemoveContainer" containerID="a47654c281ff834a47f517fb058b0e6e68633fabb864a21addeb41d256beeeec" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.818779 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6"} Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.847363 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.866327 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.883207 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.883292 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.884135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.891330 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.891420 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.909976 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.942135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.960923 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.981121 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:18 crc kubenswrapper[4125]: I0312 13:33:18.998041 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:18Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.016680 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.024929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.024966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025332 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025870 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.025947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.025951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026395 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026767 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.026878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.026967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.027074 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.027144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.027199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.027313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.027471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.027606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.027717 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.027772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.027928 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.027986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.028080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.028188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.028234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.028296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.028384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.028454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.028559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.028693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.028880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.029069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.029237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.029374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.029503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.029642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.039369 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.057403 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.074338 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.096593 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":4,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.119925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.138445 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.166294 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: E0312 13:33:19.181579 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.202952 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.239194 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.255411 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.271937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.286756 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.300114 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.314691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.330480 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.348155 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.371395 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.393790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.416765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.436413 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.458249 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.486425 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.517251 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.547758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.582890 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.609132 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.639569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.673103 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.711708 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.740299 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.760424 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.785350 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.806598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.844578 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.860472 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.883085 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.886538 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.887077 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.909353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.943679 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:19 crc kubenswrapper[4125]: I0312 13:33:19.976656 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.001466 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:19Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.022301 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026091 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.026167 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.027308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.027642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.027952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.028138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.028336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.028487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.028752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.028926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.029411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.029132 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.029301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.029765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.030159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.030374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.030535 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.030594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.030994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.031596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.031706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.032087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.032423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.032586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.049923 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.069006 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.089674 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.112731 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.145108 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.169964 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.186972 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.208748 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.229342 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.254052 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.280500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.306574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.330998 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.356120 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.385001 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.407578 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.432176 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.837801 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/4.log" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.839792 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/3.log" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.840059 4125 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" exitCode=1 Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.840608 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb"} Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.840730 4125 scope.go:117] "RemoveContainer" containerID="bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.842247 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:33:20 crc kubenswrapper[4125]: E0312 13:33:20.843756 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.884304 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.886308 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.886450 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.917131 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.948793 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:20 crc kubenswrapper[4125]: I0312 13:33:20.970601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:20Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.022461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.025741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.025785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.025987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.026150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.026232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.025794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.025942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.026381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.026217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.026410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.027093 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.027423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.027482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.027552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.027615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.027686 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.027723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.027790 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.027910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.027310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.027996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.028070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.028291 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.028321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.030452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.030698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.030771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.031587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.032083 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.032699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.032729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.033008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.033468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.034157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.034312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.034422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.034457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.038409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.038449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.038652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.038981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.039171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.039528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.040202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.040502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.040757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.040951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.041462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.041618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.046097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.047503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.048003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.048342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.048780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.049116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.049122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.049310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.049913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.050043 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.050130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.050307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.050504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.050707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.053336 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.076994 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.097671 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.131388 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.158691 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.183510 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.222983 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.250459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.277164 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.301508 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.331542 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.358801 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.383203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.412465 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.442224 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.462417 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.487723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.523781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.552543 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.552681 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.552723 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.552764 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.552918 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:21Z","lastTransitionTime":"2026-03-12T13:33:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.557453 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.582415 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.583647 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.590089 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.590191 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.590228 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.590270 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.590309 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:21Z","lastTransitionTime":"2026-03-12T13:33:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.614529 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.620741 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.623547 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.623646 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.623677 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.623720 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.623764 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:21Z","lastTransitionTime":"2026-03-12T13:33:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.650543 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.655393 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.658640 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.658749 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.658779 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.658866 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.658917 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:21Z","lastTransitionTime":"2026-03-12T13:33:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.685943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.687926 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.695164 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.695344 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.695383 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.695425 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.695470 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:21Z","lastTransitionTime":"2026-03-12T13:33:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.710796 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.722136 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: E0312 13:33:21.722211 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.743582 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.769227 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.799093 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.826537 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.848133 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/4.log" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.856703 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.887583 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.891244 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.891391 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.922090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.959889 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.981798 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:21 crc kubenswrapper[4125]: I0312 13:33:21.999716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:21Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.025358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.025670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.026318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.026367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.026520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.026486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.026715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.026995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.027210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.027323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.027373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.027449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.027578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.027870 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.027913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.028180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.028236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.028412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.028420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.028568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.028651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.028773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.028980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.028991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.029144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.029271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.029352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:22 crc kubenswrapper[4125]: E0312 13:33:22.029436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.032449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.059003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.077759 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.110318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.131622 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.155782 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.186919 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.210215 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.235566 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.259400 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.281395 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.306353 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.332532 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.355258 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.375314 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.400741 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.426381 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.447184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.470408 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.492109 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.522596 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.556058 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.577195 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.610624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.640436 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.682167 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.704627 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.723615 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"2026-03-12T13:32:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb\\\\n2026-03-12T13:32:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb to /host/opt/cni/bin/\\\\n2026-03-12T13:32:35Z [verbose] multus-daemon started\\\\n2026-03-12T13:32:35Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:33:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.740250 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.755126 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.768357 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.783067 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.798418 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.813874 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.834637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.852345 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.872104 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.885482 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.885578 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.894994 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.925701 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.951080 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.969791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:22 crc kubenswrapper[4125]: I0312 13:33:22.990085 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:22Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.006781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.025895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.026156 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.026320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.025895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.026464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.026705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026869 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.026928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.026972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.026942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027123 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027191 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027286 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027549 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.027908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.027977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.028768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.028923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.029148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029566 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.029913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.030001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.025924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:23 crc kubenswrapper[4125]: E0312 13:33:23.030220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.033642 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.057223 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.076598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.094768 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.113053 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.129160 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.152794 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.172894 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.200979 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.219975 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.238722 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.274524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.317395 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.370475 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.420792 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.445773 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.489123 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.518203 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.552873 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.590980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.632761 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.671725 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.717719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.755108 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.797758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.843207 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.882422 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.886182 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.886347 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.912863 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:23 crc kubenswrapper[4125]: I0312 13:33:23.956799 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.005077 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:23Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.026085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.026173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.026392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.026442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.026529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.026688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.026788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.027285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.027329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.027801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.027902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.027703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.028238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.028477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.028603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.028734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.028901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.028922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.029139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.029315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.029491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.029675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.029967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.030164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.030713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.030232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.051275 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.087746 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.120574 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.151125 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: E0312 13:33:24.182770 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.198963 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.235399 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.271901 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.313368 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.351949 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.390151 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.437588 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.474641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.515057 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.552602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.602611 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.632727 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.672124 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.721677 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.766421 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdd6045e081f1be72ef54119dcabbc2638add059567b54032f3237b5b1e42a45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:31:52Z\\\",\\\"message\\\":\\\"2026-03-12T13:31:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df\\\\n2026-03-12T13:31:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_253518c1-9571-4b52-b141-fac54c4766df to /host/opt/cni/bin/\\\\n2026-03-12T13:31:07Z [verbose] multus-daemon started\\\\n2026-03-12T13:31:07Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:31:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:31:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"2026-03-12T13:32:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb\\\\n2026-03-12T13:32:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb to /host/opt/cni/bin/\\\\n2026-03-12T13:32:35Z [verbose] multus-daemon started\\\\n2026-03-12T13:32:35Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:33:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.802598 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.830801 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.873160 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:24Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.885887 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:24 crc kubenswrapper[4125]: I0312 13:33:24.886051 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.024716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.024779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.025097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.025244 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.025537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.025930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.026114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.026177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.026177 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.026249 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.026283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.026297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.026552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.026614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.026672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.026789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.027277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.027293 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.027242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.027386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.027462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.027527 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.027588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.027617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.027666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.027703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.028092 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.028326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.028492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.028689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.029054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.029202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.029342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.029428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.029492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.029659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.029873 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030064 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030302 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.030968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.031059 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.031165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.031343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.031441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.031719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:25 crc kubenswrapper[4125]: E0312 13:33:25.031981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.888408 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:25 crc kubenswrapper[4125]: I0312 13:33:25.888590 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.026141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.026246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.026514 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.026177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.026964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027115 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.027094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.027406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.027608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.028349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.028652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.028983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.029134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.029410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.029544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.029965 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.030227 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.030401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.030638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.030730 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.031086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.031254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:26 crc kubenswrapper[4125]: E0312 13:33:26.031497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.889458 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:26 crc kubenswrapper[4125]: I0312 13:33:26.889633 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.025605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.026150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.026278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.026330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.026591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.026594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.026748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.026971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.027318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027503 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.027672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.027773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.028149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.028152 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.028374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.028631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.028732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.029072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.029107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.029207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.029366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.029435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.029529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.029621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.030084 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.030124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.030168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.030369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.030524 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.030678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.030693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.031257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.032387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.032403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.032579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.032676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.033016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.033496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.033575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.034116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.034230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.034456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.034646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.035477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.036126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.036425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.036674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.038988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.040236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.041953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.042685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.043481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.044134 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:27 crc kubenswrapper[4125]: E0312 13:33:27.044606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.886664 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:27 crc kubenswrapper[4125]: I0312 13:33:27.886992 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.028250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.028565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.028999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.029266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.029510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.029684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.030179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.030360 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.030639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.031235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.031436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.031504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.031508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.031614 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.031669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.031713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.031976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.032152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032215 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.032490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.032651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.032974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.032976 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.033209 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.033297 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.033439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:28 crc kubenswrapper[4125]: E0312 13:33:28.033617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.888470 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:28 crc kubenswrapper[4125]: I0312 13:33:28.888663 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.025951 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.026322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.025982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.026673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.027197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.027732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028021 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.028113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.028310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028378 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.028541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.028711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.028974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.029007 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.029243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.029478 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.029652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.029748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.029978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.030112 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030191 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.030275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.030443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.030616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.030696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.030782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.030993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.031120 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.031233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.031379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.031567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.032145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.032287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.032530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.032650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.032791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.033454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.033493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.033607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.031804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.033773 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.034160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:29 crc kubenswrapper[4125]: E0312 13:33:29.185466 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.889386 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:29 crc kubenswrapper[4125]: I0312 13:33:29.890388 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025086 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.025533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.026355 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.026461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.025710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.026569 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025877 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025878 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025899 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.025914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.026130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.026181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.026243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.026758 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.026933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.026978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.027957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:30 crc kubenswrapper[4125]: E0312 13:33:30.028076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.888887 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:30 crc kubenswrapper[4125]: I0312 13:33:30.890567 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.025613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.025749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.025920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.025935 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.025982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.026106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026204 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.026433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026448 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.026521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.026635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.026295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.026795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026911 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.026976 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.027266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.027470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.027629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027890 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.027939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.027990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.028195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.028338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.028408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.028594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.028644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.028689 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.028764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.029655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.030008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.030707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.030849 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.030907 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.030978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.031010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.031079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031086 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.031295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031904 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.031976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.032306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.032525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:31 crc kubenswrapper[4125]: E0312 13:33:31.030724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.055760 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.085769 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.113577 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.132343 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.154423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.175790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.198558 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.220503 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.237549 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.261551 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.287069 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.306544 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.325968 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.346200 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.366280 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.392414 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.413012 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.426754 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.426916 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.426946 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.426978 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.426999 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.435415 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.455406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.478528 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.497571 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.530855 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.546470 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.578778 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"2026-03-12T13:32:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb\\\\n2026-03-12T13:32:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb to /host/opt/cni/bin/\\\\n2026-03-12T13:32:35Z [verbose] multus-daemon started\\\\n2026-03-12T13:32:35Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:33:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.610530 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.629090 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.655875 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.683450 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.697950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.730637 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.750190 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.766887 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.783323 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.807613 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.825758 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.841956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.865318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.884684 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.887601 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.887720 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.904794 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.925460 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.949902 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.964790 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.979083 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.996452 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:31Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.998362 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.998404 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.998419 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.998440 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:31 crc kubenswrapper[4125]: I0312 13:33:31.998458 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:31Z","lastTransitionTime":"2026-03-12T13:33:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.018124 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.021095 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.022965 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.023008 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.023053 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.023077 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.023100 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:32Z","lastTransitionTime":"2026-03-12T13:33:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.026490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.026729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.026764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.026943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.026956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.027093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.027202 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.027393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027612 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027883 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.027627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.028021 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.027787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.028255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.028151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.028194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.028595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.028637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.028772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.028695 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.029108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.029321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.029560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.029595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.029880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.029679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.045222 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.048268 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.053137 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.053195 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.053215 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.053243 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.053269 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:32Z","lastTransitionTime":"2026-03-12T13:33:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.069699 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.072142 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.077910 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.078075 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.078174 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.078375 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.078493 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:32Z","lastTransitionTime":"2026-03-12T13:33:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.090529 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.094650 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.099175 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.099301 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.099324 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.099350 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.099380 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:32Z","lastTransitionTime":"2026-03-12T13:33:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.111771 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.113326 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: E0312 13:33:32.113390 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.133523 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.155772 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.178504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.197072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.211894 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.226735 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.256457 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.277517 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.294996 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.316601 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.332011 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.352781 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.371336 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.384757 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.398997 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.448723 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.474984 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.506776 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.531454 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.549016 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.566873 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.582500 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.600775 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.624060 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.642499 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.660626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.675276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.693073 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.709171 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.729250 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.766181 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.783886 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.795459 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.811743 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.830916 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.849237 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.874126 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.886317 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.886409 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.893354 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.922990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.943285 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.959193 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:32 crc kubenswrapper[4125]: I0312 13:33:32.987089 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.001685 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:32Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.022213 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.024791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.024915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.024993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.025985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.024992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.024995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025099 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025112 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025289 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025383 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.025395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.026898 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.027927 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.028979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.029714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:33 crc kubenswrapper[4125]: E0312 13:33:33.031091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.047605 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.064569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.084370 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.106964 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.131716 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.153958 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.171535 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.188759 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.208584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.235893 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.260286 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.279422 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.307572 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.331668 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.369602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.407948 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.453464 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"2026-03-12T13:32:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb\\\\n2026-03-12T13:32:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb to /host/opt/cni/bin/\\\\n2026-03-12T13:32:35Z [verbose] multus-daemon started\\\\n2026-03-12T13:32:35Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:33:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.548553 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.579315 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.598694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.624310 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.656498 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.695524 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.730072 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.770490 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.819106 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.855265 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.886568 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.886754 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.892596 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.939504 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:33 crc kubenswrapper[4125]: I0312 13:33:33.971974 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:33Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.006978 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025364 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025369 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.025593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.025768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025873 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.025968 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.026075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.026136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.026254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.026414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.026556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.026629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.026723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.027100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.027321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.027574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.027783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.028265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.028584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.028693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.028917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.029241 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.029408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.029462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.046150 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.088135 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.128179 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.175641 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: E0312 13:33:34.187706 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.217372 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.263584 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.295992 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.344519 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.379694 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.413329 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:34Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.888222 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:34 crc kubenswrapper[4125]: I0312 13:33:34.888383 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.025646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.025932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026568 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.026616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.026927 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.026946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.026983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.027087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.027137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.027145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.027245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.027314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.027169 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.027436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.027569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.027615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.027696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.028734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028878 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.028954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.029070 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.029103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.029138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.029485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.029874 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.029957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030209 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.030324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030524 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.030694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.030947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.031446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.032261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:35 crc kubenswrapper[4125]: E0312 13:33:35.033023 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.889803 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:35 crc kubenswrapper[4125]: I0312 13:33:35.890234 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.025343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.025543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.025627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.025712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.025734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.025962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.026221 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.026360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.026509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.026549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.026719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.026773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.027004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.027106 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.027255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.027528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.027564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.027677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.027685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.027754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.027963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.028212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.028403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.028492 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.028541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.028591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.028667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.028971 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.029231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.029400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.029556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:36 crc kubenswrapper[4125]: E0312 13:33:36.029732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.887765 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:36 crc kubenswrapper[4125]: I0312 13:33:36.888103 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.024992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.025464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.027152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.027285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.027361 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.027504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.027666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.027760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.026616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.027766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.028967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.030149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.030572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.031357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.031490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.031568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.031560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.031771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.031800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.031633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.031998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.032745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.033534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.034266 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.034481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.034624 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:37 crc kubenswrapper[4125]: E0312 13:33:37.035313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.887668 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:37 crc kubenswrapper[4125]: I0312 13:33:37.887774 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.024776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.026294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.025401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.026711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.027003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025737 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025768 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.027562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.025993 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.027694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.026028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.026150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.028028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.027312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.028368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.028727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.029226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.029544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.029978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.030338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.030457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.030599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:38 crc kubenswrapper[4125]: E0312 13:33:38.030745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.888509 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:38 crc kubenswrapper[4125]: I0312 13:33:38.888702 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.026225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.026507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.027139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.027463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.027510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.027709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.028008 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.028022 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.028195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.028340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.028417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.028536 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.028986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.029318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.029760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.029894 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030085 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.030329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030349 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.030581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.030739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.030786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.031542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.034406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.034444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.034704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.034747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.035028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.035261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.035417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.035489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.035517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.035530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.035581 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.035640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.035658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.036148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.036593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.036696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.037076 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.037196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.037246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.037195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.037434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.037596 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.037787 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.037986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.038175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.038347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.038491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.038616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.038766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.039125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.039252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.039401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.039534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.039663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.039791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:39 crc kubenswrapper[4125]: E0312 13:33:39.193460 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.227705 4125 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.227781 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.887112 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:39 crc kubenswrapper[4125]: I0312 13:33:39.887318 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.025510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.025740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.025969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.026200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.025553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.026022 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.026477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.026533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.026569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.026665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.026741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.026755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.027154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.027289 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.027429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.027578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.027665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.027792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.028187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.028235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.028272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.028493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.028585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.028765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.028795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.029114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.029331 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.029598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.029689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.029958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:40 crc kubenswrapper[4125]: E0312 13:33:40.030236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.888992 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:40 crc kubenswrapper[4125]: I0312 13:33:40.889239 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.025942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.026215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.026375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.026714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.027294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.027395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.027589 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.027626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.027801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.029147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.029237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.029306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.029395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.029451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.029462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.029737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.030376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.030594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.030751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.031015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.031419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.031803 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.032557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.033788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035096 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035906 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.035998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.036999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.037124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.037216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.037251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.037800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.038402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.038940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.038979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.040397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.041430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.042137 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.042771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.043123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.043447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.043782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.044206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.044491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.044660 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.045100 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.045480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.045782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.046146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.046342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.047012 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.047597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.047991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.048347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.048653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.049516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.050305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:41 crc kubenswrapper[4125]: E0312 13:33:41.050645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.890934 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:41 crc kubenswrapper[4125]: I0312 13:33:41.891216 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.025192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.025910 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.026283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.026367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.026660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.026765 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.029185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.029342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.027784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.028008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.028479 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.028948 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.029587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.029778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.030081 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.030269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.030410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.030536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.030615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.030670 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.031015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.031252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.028947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.066553 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.100439 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.138360 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.179496 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.213958 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.248480 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.265486 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.265732 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.265766 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.265803 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.265995 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:42Z","lastTransitionTime":"2026-03-12T13:33:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.292019 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.299122 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.308331 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.308743 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.308924 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.308977 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.309030 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:42Z","lastTransitionTime":"2026-03-12T13:33:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.330757 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.338751 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.348254 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.348380 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.348417 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.348454 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.348502 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:42Z","lastTransitionTime":"2026-03-12T13:33:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.360412 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.366889 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.372460 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.372518 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.372538 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.372562 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.372586 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:42Z","lastTransitionTime":"2026-03-12T13:33:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.383084 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"2026-03-12T13:32:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb\\\\n2026-03-12T13:32:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb to /host/opt/cni/bin/\\\\n2026-03-12T13:32:35Z [verbose] multus-daemon started\\\\n2026-03-12T13:32:35Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:33:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.397092 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.404113 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.404323 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.404367 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.404407 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.404460 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:42Z","lastTransitionTime":"2026-03-12T13:33:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.408925 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.425768 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: E0312 13:33:42.425914 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.429730 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.451274 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.470356 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.490687 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.512596 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.537700 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.577461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.612065 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.637066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.654066 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.675125 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.694385 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.718519 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.737113 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.755551 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.771351 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.796626 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.814491 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.834616 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.850932 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.866493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.888324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.888693 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.891239 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.909326 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.925905 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.947085 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.971172 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:42 crc kubenswrapper[4125]: I0312 13:33:42.993120 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:42Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.016248 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025075 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025211 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025355 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025381 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025391 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025481 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.025547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.027603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.028230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.029242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.028506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.028652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.028781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.029107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.029265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.029296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.029409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.029804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.030019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.030265 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.030440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.030671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.030888 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.031105 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.031285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.031488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.031724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.031886 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.032103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.032121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.032334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.032353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.032508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.032720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.033375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.033438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.033489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.033597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.033802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:43 crc kubenswrapper[4125]: E0312 13:33:43.034123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.040647 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.065629 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.087133 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.107344 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.126942 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.144259 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.164420 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.195346 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.219909 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.251476 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.280757 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.299633 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.327189 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.352801 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.374670 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.406719 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.435602 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.473607 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.513493 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.542990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.612991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.643639 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.683507 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.710710 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.745003 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.769174 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.791389 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.813950 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:43Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.889009 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:43 crc kubenswrapper[4125]: I0312 13:33:43.889250 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.025342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.025415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.025528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.025630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.025924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.025926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.026235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.026311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.026333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.026491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.026607 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.026659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.027322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.027509 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.027625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.027732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.027793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.028110 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.028227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.028357 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.028415 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.028578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.028731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.029009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.029234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.029370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.029560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.030029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.030717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.031134 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.031483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:44 crc kubenswrapper[4125]: E0312 13:33:44.195092 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.887478 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:44 crc kubenswrapper[4125]: I0312 13:33:44.887671 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.025733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.026349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.026505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.026713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.026960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.027298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.027429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.027630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.027749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.028426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.028557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.030213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.030601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.030748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.031210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.031685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.032032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.032341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.032654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.032788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.033256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.033447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.033575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.033756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.034139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.034139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.034251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.034321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.034409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.034606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.035338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.035547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.035565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.035916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.036227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.036343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.036543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.036642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.036686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.036919 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.037361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.037416 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.037564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.037753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.037994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.038111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.038279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.038351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.038270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.038574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.038474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.038801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.038997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.039139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.039256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.039354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.039467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.039565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.039615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.039748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.040131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.040388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.040541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:45 crc kubenswrapper[4125]: E0312 13:33:45.040798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.887996 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:45 crc kubenswrapper[4125]: I0312 13:33:45.888349 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.025233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.025787 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.026331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.026445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.026551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.026466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.026749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.027276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.027309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.027517 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.027673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.027962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.028243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.028616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.028924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.028933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.029028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.029335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.029525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.029648 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.029919 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.030018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.030108 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.030261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.030416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.030465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.030556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.030630 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.030958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.031171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.031243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.031416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.031763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:46 crc kubenswrapper[4125]: E0312 13:33:46.031993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.889145 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:46 crc kubenswrapper[4125]: I0312 13:33:46.889261 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.025593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.025754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.026136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026232 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.026349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026411 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.026645 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.025651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.027176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.027125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.027268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.026904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.028026 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.028223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.028337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.028230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.028475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.028683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.028928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.028978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.029104 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.029206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.029315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.029390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.028487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.028609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.029683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.030176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.030344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.030601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.030804 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.030994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.031106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.031124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.031288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.031393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.031407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.031604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.032107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.032350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.032961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.033548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.033776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.034127 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.034169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.034553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.034995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.035422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.035795 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.036123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.036153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.036308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.036433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.036614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.037104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:47 crc kubenswrapper[4125]: E0312 13:33:47.037287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.887589 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:47 crc kubenswrapper[4125]: I0312 13:33:47.887770 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.025987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026142 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026347 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.026370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.026554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.026914 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026922 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.027019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.027035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.026972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.027296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.027530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.027596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.027755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.028025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.028362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.028591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.028714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.029008 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.029134 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.029151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.029236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.029463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.029621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.029769 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:48 crc kubenswrapper[4125]: E0312 13:33:48.030115 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.887536 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:48 crc kubenswrapper[4125]: I0312 13:33:48.887717 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.025273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.025345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.025462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.025563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.025612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.025625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.025722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.025762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.025970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.026079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.026160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.026172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.026742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.026954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.026968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.026974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.027190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.027482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.027665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.027901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.028089 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.028232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.028404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.028558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.028619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.028699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.028955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.029020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.029165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.029317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.029380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.029459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.029572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.029618 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.029757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.029768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.030313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.030321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.030354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.030384 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.030556 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.030614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.030677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.030791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.031121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.031300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.031348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.031489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.031544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.031711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.032193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.032351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.032536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.032716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.033369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.033714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.033945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:49 crc kubenswrapper[4125]: E0312 13:33:49.201605 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.889357 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:49 crc kubenswrapper[4125]: I0312 13:33:49.889527 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.026386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.026551 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.026683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.026751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.026946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.027169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.027203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.027228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.027276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.027183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.027708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.028531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.028480 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.028680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.028882 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.028988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.029213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.029294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.029380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.029540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.029681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.029759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.029953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.030100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.030254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.030318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.030466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.030591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.030703 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.030755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.031574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.031744 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.609389 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.609657 4125 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.609760 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.609732761 +0000 UTC m=+922.933119110 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.816122 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.816378 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.816580 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.816485583 +0000 UTC m=+923.139871912 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.817587 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.817670 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.817743 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.817796 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.817973 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818155 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818216 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818266 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818318 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818455 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818608 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.818668 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.818746 4125 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.818775 4125 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.818941 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.81891714 +0000 UTC m=+923.142303479 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.819158 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819229 4125 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819159 4125 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819296 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.819278281 +0000 UTC m=+923.142664620 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819361 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.819332982 +0000 UTC m=+923.142719351 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819401 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819450 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.819436576 +0000 UTC m=+923.142822895 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.819497 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819546 4125 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.819569 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819603 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.81958691 +0000 UTC m=+923.142973129 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819647 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819696 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.819683144 +0000 UTC m=+923.143069383 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.819651 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819723 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.819804 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819918 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.819796236 +0000 UTC m=+923.143182485 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.819975 4125 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820012 4125 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820094 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820103 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820031024 +0000 UTC m=+923.143417253 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820181 4125 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820184 4125 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820188 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820165669 +0000 UTC m=+923.143551838 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820250 4125 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820266 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.82024532 +0000 UTC m=+923.143631629 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820299 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820284381 +0000 UTC m=+923.143670660 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820298 4125 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820332 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820317492 +0000 UTC m=+923.143703671 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"client-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820347 4125 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820361 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820344154 +0000 UTC m=+923.143730373 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820389 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820376325 +0000 UTC m=+923.143762504 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820413 4125 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820420 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820405896 +0000 UTC m=+923.143792075 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820428 4125 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820451 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820437357 +0000 UTC m=+923.143823566 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.819981 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820479 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820466958 +0000 UTC m=+923.143853127 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.820528 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.820584 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820621 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.820645 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820648 4125 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820677 4125 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.820698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820731 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820716015 +0000 UTC m=+923.144102244 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820784 4125 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.820918 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.820947 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.820925353 +0000 UTC m=+923.144311682 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821003 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821113 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821133 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821169 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821192 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821272 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821313 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821337 4125 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821354 4125 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821410 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.821389808 +0000 UTC m=+923.144776117 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821340 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821430 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821459 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821477 4125 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821493 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821539 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.821522191 +0000 UTC m=+923.144908510 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821581 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821587 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821649 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.821630825 +0000 UTC m=+923.145017054 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821710 4125 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821765 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.821750868 +0000 UTC m=+923.145137187 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.821768 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.821975 4125 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.822093 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822107 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.822033018 +0000 UTC m=+923.145419347 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.822223 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822236 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822279 4125 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822303 4125 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822381 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.822364508 +0000 UTC m=+923.145750857 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822449 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.822562 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822616 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822671 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.822654536 +0000 UTC m=+923.146040845 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822691 4125 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822711 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.822693867 +0000 UTC m=+923.146080136 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822751 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.82273362 +0000 UTC m=+923.146119939 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822771 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822796 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.822781452 +0000 UTC m=+923.146167721 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822972 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.822927535 +0000 UTC m=+923.146313804 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823122 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823190 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.823173773 +0000 UTC m=+923.146560102 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823235 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.823215434 +0000 UTC m=+923.146601613 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823329 4125 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823381 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.823366839 +0000 UTC m=+923.146753078 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.822622 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823421 4125 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823501 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.823482933 +0000 UTC m=+923.146869252 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.822568 4125 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.823559 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.823545584 +0000 UTC m=+923.146931903 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.823605 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.823957 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.824022 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.824138 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824192 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.824280 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824284 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.824264345 +0000 UTC m=+923.147650674 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824363 4125 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824434 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.8244133 +0000 UTC m=+923.147799809 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-key" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824483 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.824543 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824555 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.824528765 +0000 UTC m=+923.147915004 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824621 4125 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824669 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.824655699 +0000 UTC m=+923.148042018 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.824713 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824782 4125 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.824929 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.825007 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.824984909 +0000 UTC m=+923.148371236 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.825233 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.825156 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.824713 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.825341 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.825325029 +0000 UTC m=+923.148711348 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.825488 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.825468154 +0000 UTC m=+923.148854323 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.825517 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.825504175 +0000 UTC m=+923.148890364 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.887346 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.887519 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.927561 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.927757 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.927904 4125 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.927949 4125 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.927972 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d7ntf for pod openshift-service-ca/service-ca-666f99b6f-vlbxv: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.928018 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928122 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.928089001 +0000 UTC m=+923.251475800 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d7ntf" (UniqueName: "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928230 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928248 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928311 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.928286717 +0000 UTC m=+923.251672966 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928371 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.928343329 +0000 UTC m=+923.251729638 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.928269 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.928558 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928468 4125 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.928623 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928689 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.928665909 +0000 UTC m=+923.252052218 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928780 4125 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928975 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.928999 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.928971488 +0000 UTC m=+923.252357727 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.929144 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.929030271 +0000 UTC m=+923.252416520 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.929401 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.929504 4125 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.929570 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.929574 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.929556606 +0000 UTC m=+923.252942835 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930086 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930109 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930175 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930205 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930260 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930281 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.930244819 +0000 UTC m=+923.253631058 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930291 4125 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930307 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930342 4125 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930353 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.93033892 +0000 UTC m=+923.253724859 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930262 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930404 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.930386813 +0000 UTC m=+923.253773112 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930402 4125 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930449 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930487 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.930465655 +0000 UTC m=+923.253851964 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-client" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930534 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930541 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930585 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.93057075 +0000 UTC m=+923.253956979 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930648 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930699 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930714 4125 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930754 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.930954 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930982 4125 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931033 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.931009602 +0000 UTC m=+923.254395491 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931107 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.931092365 +0000 UTC m=+923.254478244 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"service-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931113 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.931166 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931192 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.931169937 +0000 UTC m=+923.254556176 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931204 4125 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931236 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.931256 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931297 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.931278101 +0000 UTC m=+923.254664330 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931306 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931334 4125 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931347 4125 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931348 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.931361 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931393 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.931378523 +0000 UTC m=+923.254764382 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.930649 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931421 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.931404575 +0000 UTC m=+923.254790814 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.931763 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931475 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931914 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.931941 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.931945 4125 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.932011 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932029 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.932005855 +0000 UTC m=+923.255392084 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.932152 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932167 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932176 4125 projected.go:294] Couldn't get configMap openshift-kube-controller-manager/kube-root-ca.crt: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932367 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932399 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.932414 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932424 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932433 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager/revision-pruner-8-crc: object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932446 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.932473 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932192 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932492 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.932479137 +0000 UTC m=+923.255865026 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932504 4125 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932517 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access podName:72854c1e-5ae2-4ed6-9e50-ff3bccde2635 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.932507288 +0000 UTC m=+923.255893137 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access") pod "revision-pruner-8-crc" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635") : object "openshift-kube-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932594 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932653 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.932541309 +0000 UTC m=+923.255927638 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932686 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932187 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932721 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932741 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932747 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932767 4125 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932807 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.932788739 +0000 UTC m=+923.256174918 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.933206 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.933174591 +0000 UTC m=+923.256560780 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.933246 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.933231152 +0000 UTC m=+923.256617331 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.932627 4125 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.933297 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.933338 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.933315914 +0000 UTC m=+923.256702143 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.933577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.933928 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.933968 4125 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.933987 4125 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934108 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.934031856 +0000 UTC m=+923.257418095 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.934139 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.934231 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.934305 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934319 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.934357 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934454 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934956 4125 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935027 4125 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934464 4125 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935113 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935159 4125 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935160 4125 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935178 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935190 4125 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934468 4125 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935244 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.934953 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.934510 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935121 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.935106519 +0000 UTC m=+923.258492478 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935343 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.935321745 +0000 UTC m=+923.258708025 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935381 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.935361478 +0000 UTC m=+923.258747787 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935415 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.935400899 +0000 UTC m=+923.258787078 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935444 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.93543111 +0000 UTC m=+923.258817309 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.935491 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.935569 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935632 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935655 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.935622468 +0000 UTC m=+923.259008757 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935705 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.935686497 +0000 UTC m=+923.259072836 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"image-import-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935772 4125 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.936146 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.936125502 +0000 UTC m=+923.259511751 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.935921 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.936376 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.936361559 +0000 UTC m=+923.259747868 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"audit-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.936562 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.936780 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.937118 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.937216 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.937194824 +0000 UTC m=+923.260581123 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.937276 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.937431 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.937410612 +0000 UTC m=+923.260796851 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.937472 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.937572 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.937626 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.937676 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.937685 4125 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.937726 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.937745 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.93772899 +0000 UTC m=+923.261115289 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.937797 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938368 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938446 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.938426904 +0000 UTC m=+923.261813233 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938516 4125 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938603 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.938583237 +0000 UTC m=+923.261969546 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-oauth-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938677 4125 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.938700 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.939096 4125 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938755 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.938736202 +0000 UTC m=+923.262122491 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.939265 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.938788 4125 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.939608 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle podName:378552fd-5e53-4882-87ff-95f3d9198861 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.939582428 +0000 UTC m=+923.262968917 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle") pod "service-ca-666f99b6f-vlbxv" (UID: "378552fd-5e53-4882-87ff-95f3d9198861") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.939639 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.939707 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.939754 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.939887 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940190 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940249 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940326 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940376 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940455 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.940435336 +0000 UTC m=+923.263821625 (durationBeforeRetry 2m2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940518 4125 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940548 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940580 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.940562259 +0000 UTC m=+923.263948498 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940623 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940637 4125 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940687 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.940673203 +0000 UTC m=+923.264059432 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940718 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.940704014 +0000 UTC m=+923.264090253 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.940889 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.940920 4125 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941015 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.940993632 +0000 UTC m=+923.264379931 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941115 4125 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941177 4125 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941187 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.941170248 +0000 UTC m=+923.264556487 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941244 4125 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941270 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.9412526 +0000 UTC m=+923.264638769 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941298 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.941284243 +0000 UTC m=+923.264670502 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941345 4125 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941359 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941397 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.941381104 +0000 UTC m=+923.264767373 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941429 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.941414475 +0000 UTC m=+923.264800714 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941435 4125 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941488 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.941472768 +0000 UTC m=+923.264858997 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.941542 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941610 4125 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.941657 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941681 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.941665142 +0000 UTC m=+923.265051431 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.941733 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.941968 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941806 4125 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: I0312 13:33:50.942196 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942270 4125 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942338 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.942320774 +0000 UTC m=+923.265707014 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.941740 4125 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942380 4125 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942414 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.942400897 +0000 UTC m=+923.265787106 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942445 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.942430758 +0000 UTC m=+923.265816937 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942481 4125 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942522 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942533 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.94251673 +0000 UTC m=+923.265903019 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942578 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.942558512 +0000 UTC m=+923.265944821 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:33:50 crc kubenswrapper[4125]: E0312 13:33:50.942609 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:52.942595673 +0000 UTC m=+923.265981862 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.025888 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025922 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025942 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.025984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.026160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.026367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.026577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.026629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.027114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.027285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.027474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027544 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.027574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.027703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.027676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.027940 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.028213 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.028262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.028438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.028688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.028933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.029009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.029175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.029334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.029374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.029554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.029555 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.029736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.029932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.030222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.030315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.030502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.030614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.030735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.031561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.031781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.032097 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.043966 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044138 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044290 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044303 4125 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044394 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044405 4125 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044430 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r8qj9 for pod openshift-apiserver/apiserver-67cbf64bc9-mtx25: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044464 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044508 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9 podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.044483597 +0000 UTC m=+923.367869956 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r8qj9" (UniqueName: "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044565 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044576 4125 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044581 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044624 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044628 4125 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044642 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.044622882 +0000 UTC m=+923.368009181 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-serving-cert" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044654 4125 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044713 4125 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.044735 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044750 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.044720815 +0000 UTC m=+923.368107134 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044793 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.044775606 +0000 UTC m=+923.368161785 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"oauth-serving-cert" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044943 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044977 4125 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044997 4125 projected.go:200] Error preparing data for projected volume kube-api-access-pzb57 for pod openshift-controller-manager/controller-manager-6ff78978b4-q4vv8: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.045016 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045112 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57 podName:87df87f4-ba66-4137-8e41-1fa632ad4207 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.045086147 +0000 UTC m=+923.368472996 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-pzb57" (UniqueName: "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57") pod "controller-manager-6ff78978b4-q4vv8" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045199 4125 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.045254 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.045315 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045391 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.045370275 +0000 UTC m=+923.368756575 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"trusted-ca" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.045448 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045453 4125 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045474 4125 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-585546dd8b-v5m4t: object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045534 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.045517799 +0000 UTC m=+923.368904118 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045584 4125 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045628 4125 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.044311 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045638 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets podName:c5bb4cdd-21b9-49ed-84ae-a405b60a0306 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.045623893 +0000 UTC m=+923.369010173 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045703 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.045930 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046002 4125 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046026 4125 projected.go:200] Error preparing data for projected volume kube-api-access-hpzhn for pod openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046086 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046137 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn podName:af6b67a3-a2bd-4051-9adc-c208a5a65d79 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.046117149 +0000 UTC m=+923.369503478 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hpzhn" (UniqueName: "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn") pod "route-controller-manager-5c4dbb8899-tchz5" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046178 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.04616341 +0000 UTC m=+923.369549589 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.045959 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046212 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.046190881 +0000 UTC m=+923.369577180 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : object "openshift-console"/"console-config" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046251 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046269 4125 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046321 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.046307095 +0000 UTC m=+923.369693324 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.046704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.046747 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.046718407 +0000 UTC m=+923.370104656 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.047132 4125 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.047215 4125 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.047240 4125 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.047648 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.047620176 +0000 UTC m=+923.371006435 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.048168 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.048507 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.048678 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.048783 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.049166 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.049279 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049278 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049378 4125 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049412 4125 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049436 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049456 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle podName:23eb88d6-6aea-4542-a2b9-8f3fd106b4ab nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049437622 +0000 UTC m=+923.372823931 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle") pod "apiserver-67cbf64bc9-mtx25" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049466 4125 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049486 4125 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049526 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049544 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049527185 +0000 UTC m=+923.372913424 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049594 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049575106 +0000 UTC m=+923.372961405 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049620 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049674 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049660061 +0000 UTC m=+923.373046280 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049682 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049705 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049723 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049761 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049773 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049758542 +0000 UTC m=+923.373144771 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049785 4125 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049438 4125 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049801 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049956 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049936247 +0000 UTC m=+923.373322526 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.049341 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.049995 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.049981648 +0000 UTC m=+923.373367897 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.050096 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.050228 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.050324 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050376 4125 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050428 4125 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050447 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.050429633 +0000 UTC m=+923.373815862 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050453 4125 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050481 4125 projected.go:200] Error preparing data for projected volume kube-api-access-lz9qh for pod openshift-console/console-84fccc7b6-mkncc: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050524 4125 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050547 4125 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050527 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh podName:b233d916-bfe3-4ae5-ae39-6b574d1aa05e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.050514006 +0000 UTC m=+923.373900235 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lz9qh" (UniqueName: "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh") pod "console-84fccc7b6-mkncc" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050565 4125 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.050619 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.050602279 +0000 UTC m=+923.373988578 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.050746 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.050798 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.051029 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051208 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051266 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051284 4125 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051299 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051305 4125 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.051209 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051330 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051333 4125 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051350 4125 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.051386 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051403 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.051384552 +0000 UTC m=+923.374770881 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051441 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.051425594 +0000 UTC m=+923.374811773 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051466 4125 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051523 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.051506536 +0000 UTC m=+923.374892825 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"audit" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051311 4125 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051650 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051661 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.051629142 +0000 UTC m=+923.375015591 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051674 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051098 4125 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051698 4125 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.051569 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051751 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.051734223 +0000 UTC m=+923.375120432 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.051781 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.051767964 +0000 UTC m=+923.375154213 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.155235 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.155444 4125 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.155629 4125 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.155667 4125 projected.go:200] Error preparing data for projected volume kube-api-access-w4r68 for pod openshift-authentication/oauth-openshift-765b47f944-n2lhl: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.155764 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68 podName:13ad7555-5f28-4555-a563-892713a8433a nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.155738165 +0000 UTC m=+923.479124484 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-w4r68" (UniqueName: "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68") pod "oauth-openshift-765b47f944-n2lhl" (UID: "13ad7555-5f28-4555-a563-892713a8433a") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.190976 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.191346 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.191750 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.191907 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.191938 4125 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.192113 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.192030144 +0000 UTC m=+923.515416483 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.193955 4125 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.194345 4125 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.194543 4125 projected.go:200] Error preparing data for projected volume kube-api-access-r7dbp for pod openshift-marketplace/redhat-marketplace-rmwfn: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: E0312 13:33:51.194978 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp podName:9ad279b4-d9dc-42a8-a1c8-a002bd063482 nodeName:}" failed. No retries permitted until 2026-03-12 13:35:53.194939083 +0000 UTC m=+923.518325462 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-r7dbp" (UniqueName: "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp") pod "redhat-marketplace-rmwfn" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.886586 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:51 crc kubenswrapper[4125]: I0312 13:33:51.886756 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.025353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.025741 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.026516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.026591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.026676 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.026942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.027034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.027235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.027331 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.027449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.027476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.026541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.027571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.027687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.027947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.028127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.028729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.029180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.029254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.029393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.029553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.029767 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.030094 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.030312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.030495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.031119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.031271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.031529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.031628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.031745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.032154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.056935 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [control-plane-machine-set-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"control-plane-machine-set-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"control-plane-machine-set-operator-649bd778b4-tt5tw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.086175 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0b6248038ea0c6b6feddfb0891e5951b6a0580fb2da9703c372d0b13ad469e6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:14Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.119426 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10603adc-d495-423c-9459-4caa405960bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns-operator\"/\"dns-operator-75f687757b-nz2xb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.158317 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297ab9b6-2186-4d5b-a952-2bfd59af63c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-controller-6df6df6b6b-58shh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.186406 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ad279b4-d9dc-42a8-a1c8-a002bd063482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [extract-utilities extract-content]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-utilities\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"extract-content\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-rmwfn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.220791 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d0dcce3-d96e-48cb-9b9f-362105911589\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0b51c5f98e7786850db9144e979bc0f5c7f0c87faef5c3cbdbbcc8e10e5ca36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:09Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zpnhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.249312 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.278485 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf1a8966-f594-490a-9fbb-eec5bafd13d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [migrator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:30f6d30b6bd801c455b91dc3c00333ffa9eec698082510d7abd3ad266d0de5a1\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"migrator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator\"/\"migrator-f7c6d88df-q2fnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.310339 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"af6b67a3-a2bd-4051-9adc-c208a5a65d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-5c4dbb8899-tchz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.342956 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.368612 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.398982 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.435764 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:33:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:30:32Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:33:18Z\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.459230 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.482184 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.494252 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.494348 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.494375 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.494401 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.494438 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:52Z","lastTransitionTime":"2026-03-12T13:33:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.514624 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.521404 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.521494 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.521516 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.521542 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.521569 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:52Z","lastTransitionTime":"2026-03-12T13:33:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.525197 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"09143b32-bfcb-4682-a82f-e0bfa420e445\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:32:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:42Z\\\",\\\"message\\\":\\\"W0312 13:26:41.620464 1 cmd.go:245] Using insecure, self-signed certificates\\\\nI0312 13:26:41.621227 1 crypto.go:601] Generating new CA for check-endpoints-signer@1773322001 cert, and key in /tmp/serving-cert-789118117/serving-signer.crt, /tmp/serving-cert-789118117/serving-signer.key\\\\nI0312 13:26:42.175270 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:26:42.183077 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\nI0312 13:26:42.183250 1 builder.go:299] check-endpoints version 4.16.0-202406131906.p0.gd790493.assembly.stream.el9-d790493-d790493cfc43fd33450ca27633cbe37aa17427d2\\\\nI0312 13:26:42.184101 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-789118117/tls.crt::/tmp/serving-cert-789118117/tls.key\\\\\\\"\\\\nF0312 13:26:42.371530 1 cmd.go:170] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:42Z is after 2025-06-26T12:46:59Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:26:41Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:31:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.550349 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.552315 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6d67253e-2acd-4bc1-8185-793587da4f17\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca-operator\"/\"service-ca-operator-546b4f8984-pwccz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.560000 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.560081 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.560107 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.560131 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.560161 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:52Z","lastTransitionTime":"2026-03-12T13:33:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.579430 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34a48baf-1bee-4921-8bb2-9b7320e76f79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-v54bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.584101 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.590649 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.590747 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.590770 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.590796 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.590897 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:52Z","lastTransitionTime":"2026-03-12T13:33:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.607188 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"378552fd-5e53-4882-87ff-95f3d9198861\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [service-ca-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"service-ca-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-service-ca\"/\"service-ca-666f99b6f-vlbxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.608574 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.614016 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.614151 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.614178 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.614205 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.614238 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:33:52Z","lastTransitionTime":"2026-03-12T13:33:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.625943 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6323995442a1c180082671979c67824f80788672ca09fb42a47b3ee4b8f3c23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-approver-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-cluster-machine-approver\"/\"machine-approver-7874c8775-kh4j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.632144 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"ephemeral-storage\\\":\\\"76397865653\\\",\\\"memory\\\":\\\"13782392Ki\\\"},\\\"capacity\\\":{\\\"ephemeral-storage\\\":\\\"83295212Ki\\\",\\\"memory\\\":\\\"14243192Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\"],\\\"sizeBytes\\\":2572133253},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:1174d995af37ff8e5d8173276afecf16ec20e594d074ccd21d1d944b5bdbba05\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4d4d0edd652ff5b78c2704a4f537be106c9234d6cbd951ae2a461194fb88b1c6\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"],\\\"sizeBytes\\\":2121001615},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:6a58359b0d36a5a73982ca12769ac45681fbe70b0cdd8d5aed90eb425dfe3b2b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:bc4ee69425a59a9d92c27ee511fc281057ed7bff497c2a4fc2d9935e6c367fe3\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1374511543},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\"],\\\"sizeBytes\\\":1346691049},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\"],\\\"sizeBytes\\\":1222078702},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:3dc5bbedad8cec4f9184d1405a7c54e649fce3ec681bbab1d2f948a5bf36c44f\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:4bff896b071099ebb4f6a059f5c542cb373ac8575e17309af9fc9cf349956aa1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"],\\\"sizeBytes\\\":1116811194},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\"],\\\"sizeBytes\\\":1067242914},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:3507cb8b73aa1b88cf9d9e4033e915324d7db7e67547a9ac22e547de8611793f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:c0bbc686ed725ea089fb6686df8a6a119d6a9f006dc50b06c04c9bb0ccf6512d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"],\\\"sizeBytes\\\":993487271},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\"],\\\"sizeBytes\\\":874809222},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\"],\\\"sizeBytes\\\":829474731},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\"],\\\"sizeBytes\\\":826261505},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\"],\\\"sizeBytes\\\":823328808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0aa8e5d7a7b4c6e7089fee8c2fcfd4ac66dd47b074701824b69319cfae8435e2\\\"],\\\"sizeBytes\\\":775169417},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\"],\\\"sizeBytes\\\":685289316},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\"],\\\"sizeBytes\\\":677900529},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\"],\\\"sizeBytes\\\":654603911},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\"],\\\"sizeBytes\\\":596693555},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\"],\\\"sizeBytes\\\":568208801},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\"],\\\"sizeBytes\\\":562097717},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\"],\\\"sizeBytes\\\":541135334},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\"],\\\"sizeBytes\\\":539461335},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:2e2e16ed863d85008fdd9d23e601f620ec149ea4f1d79bc44449ba7a8ad6d2b8\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:79fb5507de16adabfad5cf6bc9c06004a0eebe779bf438ef3a101735d2c205c9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":520763795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\"],\\\"sizeBytes\\\":507363664},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\"],\\\"sizeBytes\\\":503433479},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\"],\\\"sizeBytes\\\":503286020},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\"],\\\"sizeBytes\\\":502054492},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\"],\\\"sizeBytes\\\":501535327},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\"],\\\"sizeBytes\\\":501474997},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\"],\\\"sizeBytes\\\":499981426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\"],\\\"sizeBytes\\\":498615097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\"],\\\"sizeBytes\\\":498403671},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\"],\\\"sizeBytes\\\":497554071},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\"],\\\"sizeBytes\\\":497168817},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\"],\\\"sizeBytes\\\":497128745},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\"],\\\"sizeBytes\\\":496236158},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:24216f0c25a6e1d33af5f8798e7066a97c6c468ad09b8fad7342ee280db29d9d\\\"],\\\"sizeBytes\\\":495929820},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\"],\\\"sizeBytes\\\":494198000},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\"],\\\"sizeBytes\\\":493495521},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\"],\\\"sizeBytes\\\":492229908},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\"],\\\"sizeBytes\\\":488729683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\"],\\\"sizeBytes\\\":487322445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\"],\\\"sizeBytes\\\":484252300},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\"],\\\"sizeBytes\\\":482197034},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\"],\\\"sizeBytes\\\":481069430},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:d4ae187242ec50188e765b3cad94c07706548600d888059acf9f18cc4e996dc6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:f8b01d4bc2db4bf093788f2a7711037014338ce6e3f243036fe9c08dade252d6\\\",\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\"],\\\"sizeBytes\\\":476206289},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:450797700afd562ba3f68a8c07b723b5d2fec47f48d20907d60b567aca8b802f\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:e79d574eda09fd6b39c17759605e5ea1e577b8008347c7824ec7a47fd1f8f815\\\",\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\"],\\\"sizeBytes\\\":473948807},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9941e996bbf90d104eb2cad98bdaed8353e6c83a4ac1c34e9c65e6b1ac40fcc3\\\"],\\\"sizeBytes\\\":469995872},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4006587f6315522f104e61b48def4e51bacb5af9088fb533e3cbce958a7a26a2\\\"],\\\"sizeBytes\\\":469119456},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e95a421ea1d60cbffa4781a464aee3e316ed5550dd6c294388a2166b7737ad2\\\"],\\\"sizeBytes\\\":466544831},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\"],\\\"sizeBytes\\\":464091925}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3e215c51-46d2-4fe1-830f-0da70663f06f\\\",\\\"systemUUID\\\":\\\"eae6101c-2eb4-4be3-9ebf-3aa1ff418173\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: E0312 13:33:52.632414 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.648342 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ad7555-5f28-4555-a563-892713a8433a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-openshift]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:57f136230f9e7a63c993c9a5ee689c6fc3fc2c74c31de42ea51b0680765693f0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-openshift\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication\"/\"oauth-openshift-765b47f944-n2lhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.661991 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df889508a338fa6b931af3205d80b78058de97e0bc1a48c142b6fccb82db3708\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:39Z\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.676937 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e53e26d-e94d-45dc-b706-677ed667c8ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:21:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:39Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.696296 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.712423 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.733448 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a5ae51d-d173-4531-8975-f164c975ce1f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [catalog-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"catalog-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"catalog-operator-857456c46-7f5wf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.748990 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-samples-operator cluster-samples-operator-watch]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bd9205b185124b3b67669bb3166734f9e22831957c457aa1083f4f2bc4750312\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-samples-operator-watch\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-cluster-samples-operator\"/\"cluster-samples-operator-bc474d5d6-wshwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.775367 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df02f99a-b4f8-4711-aedf-964dcb4d3400\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:29:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d1ce9342b0ceac619a262bd0894094be1e318f913e06ed5392b9e45dfc973791\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:26:20Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI0312 13:25:49.801733 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:25:49.805881 1 observer_polling.go:159] Starting file observer\\\\nI0312 13:25:49.807044 1 builder.go:299] cluster-policy-controller version 4.16.0-202406131906.p0.geaea543.assembly.stream.el9-eaea543-eaea543f4c845a7b65705f12e162cc121bb12f88\\\\nI0312 13:25:49.808017 1 dynamic_serving_content.go:113] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nF0312 13:26:20.079735 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cluster-policy-controller-lock\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:26:19Z is after 2025-06-26T12:47:18Z\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:25:49Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":7,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:29:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.797449 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5bacb25d-97b6-4491-8fb4-99feae1d802a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [oauth-apiserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:26660173efd872a01c061efc0bd4a2b08beb4e5d63e3d7636ec35ddcf5d3c1fa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"oauth-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-oauth-apiserver\"/\"apiserver-69c565c9b6-vbdpd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.815929 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/community-operators-8jhz6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f4dca86-e6ee-4ec9-8324-86aff960225e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"community-operators-8jhz6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.846997 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e19f9e8-9a37-4ca8-9790-c219750ab482\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:48Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:32:10Z\\\",\\\"message\\\":\\\"2:10.751189 20206 handler.go:217] Removed *v1.Pod event handler 6\\\\nI0312 13:32:10.751198 20206 handler.go:203] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0312 13:32:10.751203 20206 handler.go:203] Sending *v1.Namespace event handler 1 for removal\\\\nI0312 13:32:10.751208 20206 handler.go:203] Sending *v1.Namespace event handler 5 for removal\\\\nI0312 13:32:10.751216 20206 handler.go:217] Removed *v1.Namespace event handler 5\\\\nI0312 13:32:10.751224 20206 handler.go:217] Removed *v1.Node event handler 2\\\\nI0312 13:32:10.751231 20206 handler.go:217] Removed *v1.Node event handler 7\\\\nI0312 13:32:10.751321 20206 network_attach_def_controller.go:166] Shutting down network-controller-manager NAD controller\\\\nI0312 13:32:10.752954 20206 handler.go:217] Removed *v1.EgressIP event handler 8\\\\nI0312 13:32:10.752968 20206 handler.go:217] Removed *v1.EgressFirewall event handler 9\\\\nI0312 13:32:10.753078 20206 handler.go:217] Removed *v1.NetworkPolicy event handler 4\\\\nI0312 13:32:10.753100 20206 handler.go:217] Removed *v1.Pod event handler 3\\\\nI0312 13:32:10.753106 20206 handler.go:217] Removed *v1.Namespace event handler 1\\\\nI0312 13:32:10.753194 20206 ovnkube.go:581] Stopped ovnkube\\\\nI0312 13:32:10.753193 20206 reflector.go:295] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attach\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}},{\\\"containerID\\\":\\\"cri-o://5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:55Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-44qcg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.867599 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy package-server-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"package-server-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"package-server-manager-84d578d794-jw7r2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.885896 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.886341 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.890088 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy machine-api-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6a5a3a50ec641063c0e1f3fc43240ceca65b0ac8e04564a4f69a62288e1930b2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-api-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-api\"/\"machine-api-operator-788b7c6b6c-ctdmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.914030 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f394926-bdb9-425c-b36e-264d7fd34550\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:527c3ad8df5e881e720ffd8d0f498c3fbb7727c280c51655d6c83c747373c611\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-7978d7d7f6-2nt8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.932647 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.946534 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://76136c8c3824ec32a4d6b8ee0453d624b8a48c6727936cf60cbca8fd819a9b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:42Z\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.966252 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d51f445-054a-4e4f-a67b-a828f5a32511\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ingress-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43853133e59a34528c9018270d1f3b7952c38126adc543ec1c49573ad8f92519\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2024-06-27T13:25:33Z\\\",\\\"message\\\":\\\"an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821312 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.RoleBinding ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821367 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Service ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821402 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Pod ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821484 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.DNSRecord ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821488 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Deployment ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\nW0627 13:25:33.821752 1 reflector.go:462] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105: watch of *v1.Role ended with: an error on the server (\\\\\\\"unable to decode an event from the watch stream: context canceled\\\\\\\") has prevented the request from succeeding\\\\n2024-06-27T13:25:33.824Z\\\\tERROR\\\\toperator.main\\\\tcobra/command.go:944\\\\terror starting\\\\t{\\\\\\\"error\\\\\\\": \\\\\\\"failed to wait for canary_controller caches to sync: timed out waiting for cache to be synced for Kind *v1.Route\\\\\\\"}\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2024-06-27T13:23:33Z\\\"}},\\\"name\\\":\\\"ingress-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":9,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-operator\"/\"ingress-operator-7d46d5bb6d-rrg6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:52 crc kubenswrapper[4125]: I0312 13:33:52.989569 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5947f21-291a-48d6-85be-6bc67d8adcb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:39Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59f477057e14688daf5f65f226097d3d0a8a0520c3aec172fa61237e36bb90d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://53089d2f61188d763712eb947d1c1f5a8411305ecb82a36c83e0a7cf31d0cdfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fd32fbaa780f71cd2b0086703b5ee13983865e71d5061344413faadfa208819f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:43Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f9b529e4c5ef1905743bea7a0034a5fcd65a7d038ec98d59685bf619e2815048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:40Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://112cbe16bf477bfe2ff6c1ac86f06737f16245939043e1420776b9baf60a6ac9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b57b89f985227cce453a376eb11e34fee32b30057597b775aea7a49b1aa797a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5375febaac0dd91b58ec329a5668a28516737ace6ba3f474888f2d43328c9db3\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e87c10ff0104c6edbfbc216a516b37d10791fbe8632020ed338c6b5cbee7d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:38Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:52Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.008765 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.025119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.025123 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.025558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.026254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.026603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.026773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.027080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.028566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.028765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.030178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.028795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.028888 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.028915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.028981 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029367 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.029622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.029794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.030087 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.030161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.030420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.030451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.030455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.030514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.030755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.030918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.031118 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.031408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.031498 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.031613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.031779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.032311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.032542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.032726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.033084 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.033510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.034447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.034590 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410cf605-1970-4691-9c95-53fdc123b1f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7bd3dc8917e12135c4ba643ca99e96f2a0066050e5c7f5d80381774c485d52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://cb2e49469b4cda57466c718ad9bad5799d948b36e7220c0492463eace06b5f51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}}}]}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-77c846df58-6l97b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.034954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035099 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.035967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.036083 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.036391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.036490 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:53 crc kubenswrapper[4125]: E0312 13:33:53.036597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.055624 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.071461 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/node-resolver-dn27q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a23c0ee-5648-448c-b772-83dced2891ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b75258aa155b79099394b381f2ae46529bd330253a96bec62374c1e83f4552c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-dns\"/\"node-resolver-dn27q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.086542 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/console-84fccc7b6-mkncc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [console]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cdb162caf10c0d078bc6c1001f448c5d011a2c70bd2d30100bf6e3b5340e8cae\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"console\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"console-84fccc7b6-mkncc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.110964 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver openshift-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c48d0ab22815dfdb3e171ef3df637ba22947bd5d2ec5154fb7dfc4041c600f78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}},{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver\"/\"apiserver-67cbf64bc9-mtx25\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.130570 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [pruner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"pruner\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-03-12T13:27:38Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-8-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.152631 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:28:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4e72b719e7b324f1e3aa1f32e96c31a903df5fb27f8e057495208104426d379\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:28:07Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:476d419f3e57548b58f62712e3994b6e6d4a6ca45c5a462f71b7b8e5f137a208\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba46413468968bb5ff0aa4a24353378a3acddfdd8f7e1ba2fec7f75da8865d72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b752ed92e0354fc8b4316fc7fc482d136a19212ae4a886515db81e8f0b37648\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a18d4207a9c577a06ee3f984d0041f61e45a42d1efbcc73f613da27dae344b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:329299206a95c4bc22e9175de3c3dbedc8e44048aaa7d07e83eafb3e14a3a30f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f30ddaf09d0dafc5b96c9adcc2b550de9193fdc441c68eabb54f700f4a87b91c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f410c53f8634b7827203f15862c05b6872e3d2e7ec59799b27a6b414943469d8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ae2aded66e719b77fa539b4eda8f1006014578bd12d97cdb0ffd7118c455a70\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:27:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bab04bf3dc4f2334e25b09bc247224d7267ab2796c68489eac7889c327430ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fbcb795269ddc7387faae477e57569282f87193d8f9c6130efffc8c7480dd73\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2325a610db9bf89780ad18c61d67f8eeff701e57a26da3afbb276b954e329ce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:28:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:28:06Z\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bzj2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.165678 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed024e5d-8fc2-4c22-803d-73f3c9795f19\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:2aa3d89686e4084a0c98a021b05c0ce9e83e25ececba894f79964c55d4693f69\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-78d54458c4-sc8h7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.177244 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fb762d1-812f-43f1-9eac-68034c1ecec7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9cebc34caf6d3183cec0cd961b9bad9da1770f9caf35a134a9b9cf785253ec59\\\",\\\"image\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"imageID\\\":\\\"quay.io/crcont/ocp-release@sha256:65efcc4be5509483168263ee09cbedf25ece6d8e13e302b01754aa6835d4ea69\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-version-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-cluster-version\"/\"cluster-version-operator-6d5d9649f6-x6d46\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.193176 4125 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.209129 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:36Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:20:32Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c08ac1eeef29e3bbbb3de7162c808764244ad79cd4009f58f7a2dd93fafe315\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:20:37Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed983c628768c5d8b0ee726af46f06cbffdd54a96b4220b01d34951f8705914\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:20:34Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-12T13:20:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.227980 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.247340 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.264276 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.286373 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [multus-admission-controller kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b589a20426aa14440a5e226ccd7f08c3efb23f45a2d687d71c9b399967adfa45\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"multus-admission-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"multus-admission-controller-6c7c885997-4hbbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.305567 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.323471 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"59748b9b-c309-4712-aa85-bb38d71c4915\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [conversion-webhook-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"conversion-webhook-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-conversion-webhook-595f9969b-l6z49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.344965 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"530553aa-0a1d-423e-8a22-f5eb4bdbb883\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-config-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2cc5ae1e097b03db862f962be571c386e3ec338e71a053a8dd844a93fb4c31dc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"openshift-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-config-operator\"/\"openshift-config-operator-77658b5b66-dq5sc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.362374 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc291782-27d2-4a74-af79-c7dcb31535d2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:41Z\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"network-operator-767c585db5-zd56b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.390692 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.416903 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0f40333-c860-4c04-8058-a0bf572dcf12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5c5478f8c-vqvt7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.437396 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87df87f4-ba66-4137-8e41-1fa632ad4207\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-6ff78978b4-q4vv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.462918 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120b38dc-8236-4fa6-a452-642b8ad738ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-operator kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-operator-76788bff89-wkjgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.511345 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/multus-q88th" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"475321a1-8b7e-4033-8f72-b05a8b377347\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:83a060571691f85f6019ba7983d8d2f41b1845e371316ab2d0016226a9f111ca\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:33:20Z\\\",\\\"message\\\":\\\"2026-03-12T13:32:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb\\\\n2026-03-12T13:32:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2433adb-d52e-4f56-b39d-4301131497eb to /host/opt/cni/bin/\\\\n2026-03-12T13:32:35Z [verbose] multus-daemon started\\\\n2026-03-12T13:32:35Z [verbose] Readiness Indicator file check\\\\n2026-03-12T13:33:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:32:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}}}]}}\" for pod \"openshift-multus\"/\"multus-q88th\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.552741 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.577130 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2c8f3dfdc7e1db1c3266d9c546d65ff56471d28625d3ef54136fd0fa04874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-12T13:27:43Z\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.599144 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [registry]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"image-registry-585546dd8b-v5m4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.620318 4125 status_manager.go:877] "Failed to update status for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-12T13:27:38Z\\\",\\\"message\\\":\\\"containers with unready status: [authentication-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1252c975e7e2b2f2f1e4a547ca59f1b5af16b1d6dc5b2aa2efdd99f9edc47a75\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"authentication-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-authentication-operator\"/\"authentication-operator-7cc7ff75d5-g9qv8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-03-12T13:33:53Z is after 2024-12-26T00:46:02Z" Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.887953 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:53 crc kubenswrapper[4125]: I0312 13:33:53.888143 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.025187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.025363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.025294 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.025699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.025704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.025948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.026200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.026344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.026472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.026491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.026717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.026729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.026954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.027193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.027406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.027484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.027583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.027690 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.027967 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.028316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.028477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.028675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.028939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.029378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.029693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.030351 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.030496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.025305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.031984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.032401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.033285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:54 crc kubenswrapper[4125]: E0312 13:33:54.204256 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.887977 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:54 crc kubenswrapper[4125]: I0312 13:33:54.888459 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.025301 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.025432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.025972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.026107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.026436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.026903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.026980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.027235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.027478 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.027664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.027991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.027988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.028132 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.028182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.028197 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.028234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.028303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.028376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.028792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.029160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.029305 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.029431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.029585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.029689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.029722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.030006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.030252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.030390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.030558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.030761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.031028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.031320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.031561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.031747 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.031968 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.032145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.032475 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.032538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.032622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.032654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.032804 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.033148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.033274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.033398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.033962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.034335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.034625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:55 crc kubenswrapper[4125]: E0312 13:33:55.035217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.888320 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:55 crc kubenswrapper[4125]: I0312 13:33:55.888471 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.029472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025664 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.025961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.026076 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.026126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.026341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.026638 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.026752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.030262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.030503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.030701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.030999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031461 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031618 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.031771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.032037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.032268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:56 crc kubenswrapper[4125]: E0312 13:33:56.032452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.888446 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:56 crc kubenswrapper[4125]: I0312 13:33:56.888657 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.026903 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.026928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.027232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.027247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.027295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.027552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.027685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.027705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.027902 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.028122 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.028440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.028670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.029000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.029032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.029151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.029237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.029277 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.029463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.029498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.029608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.029708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.029750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.030028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.030165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.030269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.030366 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.030427 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.030457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.030606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.030640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.030741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.030791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.030997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.031131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.031419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.031646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.031963 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.032011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.032212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.032295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.032328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.032512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.032698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.032739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.033103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.033194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.033309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.033521 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.033699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.034123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.034345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.034500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.034760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.034998 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.037696 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.038734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:57 crc kubenswrapper[4125]: E0312 13:33:57.039378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.887360 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:57 crc kubenswrapper[4125]: I0312 13:33:57.887525 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.025953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.026272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.026431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.026493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.026575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.026762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.026973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.027242 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.027413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.027614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.027661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.028130 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.028197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.028133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.028399 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.028457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.028547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.028655 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.028803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.029020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.029224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.029376 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.029620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.030023 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.030619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.030647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.031031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.031565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.032243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.033271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.033477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:33:58 crc kubenswrapper[4125]: E0312 13:33:58.035981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.886469 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:58 crc kubenswrapper[4125]: I0312 13:33:58.886597 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026553 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025869 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025900 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026090 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026038 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026217 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026513 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.026598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.025639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.027540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.028000 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.028525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.029896 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.029918 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.029957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.029979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.031916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030041 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030777 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030898 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.030960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.031941 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.032106 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.032342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.032501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.032707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.032733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.032868 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.034104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:33:59 crc kubenswrapper[4125]: E0312 13:33:59.209661 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.888242 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:33:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:33:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:33:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:33:59 crc kubenswrapper[4125]: I0312 13:33:59.888440 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.025678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.025791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.025995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.026347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026370 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.026551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.026720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.027223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.027263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.027399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.027509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.027551 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.027727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.027785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.027788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.028107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.028354 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.028636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.029111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.029327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.029502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.029639 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.030921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.031601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.032644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.033007 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:00 crc kubenswrapper[4125]: E0312 13:34:00.033659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.887220 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:00 crc kubenswrapper[4125]: I0312 13:34:00.887394 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025688 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.026012 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026017 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026041 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.026321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.026512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.026766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.025576 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.026972 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.027013 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.027218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.027279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.027319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.027432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.027216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.027572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.027666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.027961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.028020 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.028137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.027977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.028283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.028394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.028485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.028617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.028621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.028959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.029002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.029030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.029175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.029245 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.029341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.029422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.029461 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.029429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.029952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.030229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.030356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.030487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.030654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.030724 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.030894 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.031743 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.032019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.032206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.032385 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:01 crc kubenswrapper[4125]: E0312 13:34:01.032558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.887240 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:01 crc kubenswrapper[4125]: I0312 13:34:01.887424 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.025916 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.026698 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.026766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.026707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.027182 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.027224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.027362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.027444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.027560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.029194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.029359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.029563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.029640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.029694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.030359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.030559 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.030626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.030747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.030894 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.030974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.031038 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.031224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.031228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.031330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.031542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.031710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.031690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.032370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.032617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.032756 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.031119 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:02 crc kubenswrapper[4125]: E0312 13:34:02.033116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.719019 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.719143 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.719170 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.719205 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.719242 4125 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-12T13:34:02Z","lastTransitionTime":"2026-03-12T13:34:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.886478 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:02 crc kubenswrapper[4125]: I0312 13:34:02.886567 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.024687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.024748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.024901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.024708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.024752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.024796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.025021 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025150 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.025309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.025634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.025491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.025788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.025994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.026125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.026334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.026518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.026734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.026928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027043 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027103 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.027305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.027329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.027450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.027545 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.027736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.027766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.027973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028254 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.028611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.028681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.028739 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.028909 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.029031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.029174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.029279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.029381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.029472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.029585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.030320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.031141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.031323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:03 crc kubenswrapper[4125]: E0312 13:34:03.031476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.368640 4125 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.389140 4125 reflector.go:351] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.889033 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:03 crc kubenswrapper[4125]: I0312 13:34:03.889214 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.025534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.025622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.025571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.025938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.025994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.026100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.026165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.026263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.026282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.026360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.026507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.026594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.026702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.025942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.026113 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.026957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.026958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.027039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.027168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.027282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.027334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.027421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.027642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.027897 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.028024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.028220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.028349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.028416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.028802 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.028904 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.028990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.029187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:04 crc kubenswrapper[4125]: E0312 13:34:04.211369 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.887912 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.889138 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.921783 4125 csr.go:261] certificate signing request csr-l64gz is approved, waiting to be issued Mar 12 13:34:04 crc kubenswrapper[4125]: I0312 13:34:04.935699 4125 csr.go:257] certificate signing request csr-l64gz is issued Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.025397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.025658 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026037 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026091 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.026114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026179 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026215 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.025781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.026390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.026546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.026647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.026729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.026863 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.026986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.027176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.027458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.027489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.027609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.027651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.027771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.027786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.028271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.028507 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028734 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028856 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.028907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.028944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.028978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.029178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.029291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.029352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.029523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.029598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.033973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.034418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.034586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.034689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.034797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.035024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:05 crc kubenswrapper[4125]: E0312 13:34:05.035203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.886707 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.887022 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.937674 4125 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-06-01 13:56:56.023228977 +0000 UTC Mar 12 13:34:05 crc kubenswrapper[4125]: I0312 13:34:05.937768 4125 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1944h22m50.085471496s for next certificate rotation Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.024965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025123 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.025612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.025959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.025973 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.026226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.027726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.026350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.026364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.026446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.026571 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.026613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.026739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.026766 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.026730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.027141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.027244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.027340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.027430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.027427 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.029269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.029421 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.029497 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.029681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.029739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.029986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:06 crc kubenswrapper[4125]: E0312 13:34:06.030192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.888153 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.888290 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.939375 4125 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-06-08 13:31:29.320639633 +0000 UTC Mar 12 13:34:06 crc kubenswrapper[4125]: I0312 13:34:06.939482 4125 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 2111h57m22.381168549s for next certificate rotation Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.025569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.025781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.026146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.026276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.026419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.026514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.026639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.026733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.026973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.027129 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.027324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.027439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.027626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.027727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.027955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.028126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.032398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.032638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.032997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.033235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033363 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.033921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.033966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.034368 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.034410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.034547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.034622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.034716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.035018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.035190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.035320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.035377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.035455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.035568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.035635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.035737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.035791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.036024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.036148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.036290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.036342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.036473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.036531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.036620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.036749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.036941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.037145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.037305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.037398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.037539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.037728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.037988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.038207 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.039178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.039435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.039713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.040142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:07 crc kubenswrapper[4125]: E0312 13:34:07.040503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.886781 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:07 crc kubenswrapper[4125]: I0312 13:34:07.887096 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.025715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.025946 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.026239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.026377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.026431 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.026246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.026388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.026602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.026617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.026260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.027015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.027275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027316 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027376 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.027548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.028296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.028589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.028766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.029011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.029587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.030126 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.030558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.030993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.032300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.032678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.033170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:08 crc kubenswrapper[4125]: E0312 13:34:08.033409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.887222 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:08 crc kubenswrapper[4125]: I0312 13:34:08.888616 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.025969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.025964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.026441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.026035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.026572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.026721 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.026907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.026934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027095 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027119 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.027411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.027493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.027542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.028016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.028380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.028388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.028466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.028486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.028426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.028725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.028794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.029122 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.029169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.029385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.029433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.029583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.029610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.029592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.029585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.029771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.030196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.030206 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030306 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.030735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.030895 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.031136 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.031158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.031335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.031948 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032452 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.032939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.213205 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.228034 4125 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.228279 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.228365 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.231030 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.231643 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372" gracePeriod=600 Mar 12 13:34:09 crc kubenswrapper[4125]: E0312 13:34:09.609557 4125 controller.go:195] "Failed to update lease" err="Operation cannot be fulfilled on leases.coordination.k8s.io \"crc\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/leases/kube-node-lease/crc, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 705b8cea-b0fa-4d4c-9420-d8b3e9b05fb1, UID in object meta: " Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.887114 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:09 crc kubenswrapper[4125]: I0312 13:34:09.887234 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.025684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.025777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.025943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.025979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.026187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026243 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.026405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.026508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.026641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.026754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.026912 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.027001 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.027127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027178 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.027304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.027770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.028138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.028287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.028149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.029324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.030543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:10 crc kubenswrapper[4125]: E0312 13:34:10.030880 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.125325 4125 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372" exitCode=0 Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.125621 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"26121f607136902a1a5d61e96d62e613e649ee5268913cbb7b2d5d8c87c9d372"} Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.125653 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"065c5996ca080a2d926edae33744982a89d75871aab173c7a5bd48cc89165efc"} Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.125678 4125 scope.go:117] "RemoveContainer" containerID="17a8388c963edf26598bea0d9b92d7294ca3c3338187d882d2bb2edf6d937fd3" Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.888899 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:10 crc kubenswrapper[4125]: I0312 13:34:10.889110 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.025678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.025713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.026153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.026325 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.026414 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.026652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.026798 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027122 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027187 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.027234 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.027388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.027445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.027614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.027895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.027916 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028041 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.028166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028102 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.028280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.028311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.028477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.028659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.028789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.028945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.029020 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.029092 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.029323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.029583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.029641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.029954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.030027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.030180 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.030259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.030287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.030588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.030699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.031494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.031687 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.031949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.032447 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.032468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.032706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.032751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.032787 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.033013 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.033179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.033222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.033790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.034024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:34:11 crc kubenswrapper[4125]: E0312 13:34:11.034200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.891421 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:11 crc kubenswrapper[4125]: I0312 13:34:11.891562 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.026113 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.026237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.026173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.026424 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.026462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.026473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.027684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.028530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.028976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.029538 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.029783 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.030340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.030628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.031109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.031409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.031537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.032376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.032613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.032784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.033623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.034253 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.035195 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.035456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:12 crc kubenswrapper[4125]: E0312 13:34:12.035658 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.887180 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:12 crc kubenswrapper[4125]: I0312 13:34:12.887310 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.027602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026567 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026923 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026913 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027003 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.026999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027038 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.029991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.030267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.030345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.030442 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.028001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.030754 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.028276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.028471 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.031028 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.031235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.028909 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.029170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.029375 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.029543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.029710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.027123 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.030558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.030654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.028646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.031392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.031504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.031753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.033206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.032993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.033317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:13 crc kubenswrapper[4125]: E0312 13:34:13.033346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.888718 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:13 crc kubenswrapper[4125]: I0312 13:34:13.888992 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.025443 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.025529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.025694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.026032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.026162 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.026247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.026409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.026485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.026563 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.026702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.026765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.026913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.027116 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.027180 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.027255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.025488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.027489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.027671 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.027924 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.028185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.028380 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.028546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.028685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.028786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.029002 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.029161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.029224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.029413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.029419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.029478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.029538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.029650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:14 crc kubenswrapper[4125]: E0312 13:34:14.214985 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.887013 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:14 crc kubenswrapper[4125]: I0312 13:34:14.887770 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027733 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027779 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.027991 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028151 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028343 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028397 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028409 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.028633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.032920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.036930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.037550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.037731 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.037911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.037990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038272 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.038640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.039038 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.039356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.039653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.039755 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.040137 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.040435 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.040674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.040939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.041204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.041386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.041538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.041627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.041944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.042275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.042534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.042677 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.042803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:15 crc kubenswrapper[4125]: E0312 13:34:15.042983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.886796 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:15 crc kubenswrapper[4125]: I0312 13:34:15.887054 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.025548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.025731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.025805 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026911 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.026975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.027172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.027324 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.027628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.027631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.027918 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.028171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.028259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.028523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.028635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.028766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.028988 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.029052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.029208 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.029322 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.029656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.029744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.029961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.030123 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.030341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.030485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.030676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:16 crc kubenswrapper[4125]: E0312 13:34:16.032778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.886995 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:16 crc kubenswrapper[4125]: I0312 13:34:16.887555 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.025575 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.025633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.025677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.025799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.026111 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026152 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026251 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026440 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.026702 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.026802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.027017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.027135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.027178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.027213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.027356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.027407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.027541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.027688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.027734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.027932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.028016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.028184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.028341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.028458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.028597 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.028750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.028934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.029014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.029210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.029487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.029558 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.029766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.029920 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.029932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.030179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.029944 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.029999 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.030314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.030375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.030460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.030584 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.030639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.030673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.030746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.030869 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.030960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.031006 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.031007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.031516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.031674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.031801 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.031919 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.032104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.032150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.032214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.032335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.032418 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:17 crc kubenswrapper[4125]: E0312 13:34:17.033104 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.887032 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:17 crc kubenswrapper[4125]: I0312 13:34:17.887217 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.025703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.026401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.027138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026528 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.027329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026584 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.026526 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.027587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.027665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.027793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.028222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.028404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.028554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.028709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.028979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.029225 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.029476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.029654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.029913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.030146 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:18 crc kubenswrapper[4125]: E0312 13:34:18.030295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.888158 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:18 crc kubenswrapper[4125]: I0312 13:34:18.888313 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.025686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.025765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.025716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.026222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026268 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.026469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026489 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.026662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026782 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.026982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.026988 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.027032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.027210 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.027390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.027531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.027647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.027747 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.027979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.027980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.028022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.028200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.028266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.028520 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.028553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.028680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.028756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.028996 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.029211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.029310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.029382 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.029484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.029646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.029670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.029797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.029950 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.030038 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.030194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.030494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.030643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.030726 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.030922 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.030986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.031154 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.031218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.031404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.031465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.031543 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.031672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.031776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032046 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.032986 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:19 crc kubenswrapper[4125]: E0312 13:34:19.216327 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.890201 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:19 crc kubenswrapper[4125]: I0312 13:34:19.890393 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.026652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.027287 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.027406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.027568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.027710 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.028018 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.028189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.028285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.028412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.028448 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.028451 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.028779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.028933 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.028986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.029057 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.029199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.029370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.029491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.029498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.029617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.029778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.029954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.030116 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.030291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.030650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.031142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.031161 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.031423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.031518 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.031779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.032162 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:20 crc kubenswrapper[4125]: E0312 13:34:20.032394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.887708 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:20 crc kubenswrapper[4125]: I0312 13:34:20.887935 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026517 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026545 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026649 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.026699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.026506 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.027025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.027207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.027268 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.027311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.027433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.027440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.027535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.027601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.027625 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.027683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.027780 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.028140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.028256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.028421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.028423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.028671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.028743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.028761 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.028959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.029181 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.029190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.029312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.029407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.029494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.029643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.029725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.029966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.029984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.030130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.030271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.030279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.030395 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.030554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.030678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.030999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.031239 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.031411 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.031562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.031714 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.031748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.031957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.032148 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.032298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.032362 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.032449 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.032663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033131 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033739 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:21 crc kubenswrapper[4125]: E0312 13:34:21.033993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.887097 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:21 crc kubenswrapper[4125]: I0312 13:34:21.887214 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.025566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026456 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.025908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.025945 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.026184 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.025740 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.028798 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.029243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.029444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.029516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.029723 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.029974 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.030305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.030781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.031193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.031257 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.031408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.031648 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.031790 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.031992 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.032168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.032359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.032574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.033235 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.033347 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.033454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:22 crc kubenswrapper[4125]: E0312 13:34:22.033643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.888280 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:22 crc kubenswrapper[4125]: I0312 13:34:22.888496 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025334 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.025343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.025664 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025693 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.025785 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.025915 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026125 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.026170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026236 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.026351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.026436 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.026574 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.026615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.026970 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027040 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027601 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027620 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.027735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.027975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.028013 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.028019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.028133 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.028364 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.028685 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.028942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.028953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029218 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.029487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.030975 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:23 crc kubenswrapper[4125]: E0312 13:34:23.031116 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.888365 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:23 crc kubenswrapper[4125]: I0312 13:34:23.888566 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.025985 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.027620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026144 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026128 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026263 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026342 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026394 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026405 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026477 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.026735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.026908 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.028166 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.028281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.028484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.028673 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.028938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.029200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.029338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.029575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.029788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.030040 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.030342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.030420 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.030429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.030735 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.030961 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.031612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:34:24 crc kubenswrapper[4125]: E0312 13:34:24.218327 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.887444 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:24 crc kubenswrapper[4125]: I0312 13:34:24.887658 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.026715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026777 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.027137 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027235 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027418 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027570 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.027570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.027633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.026296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.028296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.028407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.028718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.028789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.029126 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.029337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.029543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.029359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.029480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.029488 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.029413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.029753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.030164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.030177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.030342 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.030196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.030430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.030582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.030719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.030626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.031159 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.031181 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.031376 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.031550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.031722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.031735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.031996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.032011 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.032270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.032453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.032587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.032914 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033328 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033107 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033581 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.033954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.034153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.034313 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.034567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.034957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.035617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.036203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:25 crc kubenswrapper[4125]: E0312 13:34:25.036485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.887292 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:25 crc kubenswrapper[4125]: I0312 13:34:25.887792 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.026197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.026197 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.026276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.026425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.026019 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.026671 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.026936 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.027051 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.027110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.027169 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.027053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.027295 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.027415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.027421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.027487 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.027583 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.027733 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.027924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.028175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.028676 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.028930 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.029044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.029284 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.029457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.029542 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.029704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.029947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.030186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.030409 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.030483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:26 crc kubenswrapper[4125]: E0312 13:34:26.030612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.891343 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:26 crc kubenswrapper[4125]: I0312 13:34:26.892932 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.024802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.024929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.026415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.027049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.027290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025200 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.027472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.027620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.027779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025340 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.028465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.028955 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.029145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.029343 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025365 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.029610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.030210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.030623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.031030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.031308 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025457 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.031496 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.031692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.032045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025500 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.032344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025556 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.032554 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.032715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.033190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025661 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.025445 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.033456 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.033641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.033788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.034279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.034515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.034951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.035315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.035414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.035656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:27 crc kubenswrapper[4125]: E0312 13:34:27.035771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.887738 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:27 crc kubenswrapper[4125]: I0312 13:34:27.887999 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.025433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026121 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026207 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026223 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026209 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.026635 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026976 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.027168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.027164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.027269 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.027287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.027507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.028049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.028463 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.028704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.028899 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.029238 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.029515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.029629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.026004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.029908 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.031349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.032352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.032614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:28 crc kubenswrapper[4125]: E0312 13:34:28.032689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.888352 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:28 crc kubenswrapper[4125]: I0312 13:34:28.888580 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.025173 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.025529 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.025595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.025645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.025530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.026256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026423 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.026570 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026709 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.026748 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.026757 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.027223 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.027608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027720 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027741 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.028045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027762 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.028170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.027727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.028224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.028414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.028512 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.028722 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.029023 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.029293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.029636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.030037 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.030397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.030710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.030744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.031222 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.031255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.031437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.031622 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.031770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.032061 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.032303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.032338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.032406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.032565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.032595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.032962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.033150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.033449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.033679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.034149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.034220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.034756 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.035247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.035405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.036991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.037291 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.037328 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.037760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:29 crc kubenswrapper[4125]: E0312 13:34:29.221232 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.887798 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:29 crc kubenswrapper[4125]: I0312 13:34:29.888452 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.026174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.026287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.026459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.026608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.026608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.026768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.027114 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.027136 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.027171 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.027323 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.027474 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.027494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.027558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.027979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.028213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.028416 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.028496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.028615 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.028782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.029125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.029339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.029467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.029638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:30 crc kubenswrapper[4125]: I0312 13:34:30.029714 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.030055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.031248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.031404 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.031716 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.032196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.032466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.033017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:30 crc kubenswrapper[4125]: E0312 13:34:30.033567 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.019296 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.019763 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.026324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.026793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.028224 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027226 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.028562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.028980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027282 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027304 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027469 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027490 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027520 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027541 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027591 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027616 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027658 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027771 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027871 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027897 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027924 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027948 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.027973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.033477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.033637 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.034257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.034458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.034528 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.034598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.034936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035149 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035204 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035336 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035398 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035674 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035862 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.035954 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036114 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036256 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036405 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:31 crc kubenswrapper[4125]: E0312 13:34:31.036579 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.427864 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.427962 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.427983 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.428011 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.428042 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.886121 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:31 crc kubenswrapper[4125]: I0312 13:34:31.886233 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.024801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.025033 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.025056 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.025178 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.027711 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.028229 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.027994 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.028047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.028147 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.028195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.029403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.029530 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.029669 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.029771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.030174 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.030272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.030335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.030436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.030466 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.030478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.030504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.030590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.030757 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.030920 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.031281 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.033144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.033541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.034005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.034423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.034932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.035226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:32 crc kubenswrapper[4125]: E0312 13:34:32.035614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.888626 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:32 crc kubenswrapper[4125]: I0312 13:34:32.888722 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.025645 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.025670 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.025934 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026031 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026247 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.026352 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.025959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.026707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026727 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026760 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.026944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.027276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.027285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.027491 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.027531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.027550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.027560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.027778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.028117 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.028188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.028206 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.028228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.028306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.028374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.028478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.028497 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.028550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.028691 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.028504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.029170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.029329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.029337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.029426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.029462 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.029561 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.029782 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.029946 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.030455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.030468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.030597 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.030678 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.030764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.031153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.031230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.031390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.031475 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.031647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.031950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.032030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.032290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.032745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.033002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.033036 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.033369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.033407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.033455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.033626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.033957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:33 crc kubenswrapper[4125]: E0312 13:34:33.034193 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.887528 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:33 crc kubenswrapper[4125]: I0312 13:34:33.887728 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025583 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025105 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025166 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.025523 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.027486 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.027650 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.028025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.028341 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.028604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.028938 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.029262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.029499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.029740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.030187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.030438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.030715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.031164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.031425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.031661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.032057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:34 crc kubenswrapper[4125]: E0312 13:34:34.224382 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.887058 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:34 crc kubenswrapper[4125]: I0312 13:34:34.887273 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.025722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.026121 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026407 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.026487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026505 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026566 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026631 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026735 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.027141 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.026751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.027151 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.027309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.027346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.027353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.027431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.027467 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.027552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.027622 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.028042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.028385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.028421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.028393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.028547 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.028587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.028684 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.028742 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.028751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.028794 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.028759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.029203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.029237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.029335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.029408 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.029503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.029523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.029749 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.030247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.030345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.030513 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.030680 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.031047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.031054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.031424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.031712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.031991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.032149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.032276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.032184 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.032527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.032718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.032932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.033147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.033340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.033391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:35 crc kubenswrapper[4125]: E0312 13:34:35.033485 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.887237 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:35 crc kubenswrapper[4125]: I0312 13:34:35.887414 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.031800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.032348 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.032647 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.032997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.033413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.033612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.034032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.034274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.034498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.034709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.035033 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.035315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.035605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.035933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.036228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.036447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.036894 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.037199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.037426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.037629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.037941 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.038226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.038441 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.038662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.039061 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.039337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.039619 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.039945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.040269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.040477 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.044053 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:36 crc kubenswrapper[4125]: E0312 13:34:36.044340 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.889276 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:36 crc kubenswrapper[4125]: I0312 13:34:36.890988 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.026140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.026155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.026408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.026422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.026987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.027507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.028144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028496 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028308 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028450 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.029043 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.028692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029273 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.029309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.029501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.029659 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.029748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.030142 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.030458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.030546 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.030549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.030725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.031210 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.031410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.031548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.031641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.031642 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.031753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.032319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.032459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.031947 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.032069 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.032179 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.032275 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.032653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.032717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.033265 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.033720 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.033799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.034393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.034633 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.034729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.034932 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.035334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.035534 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.036024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.036251 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.036450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.035803 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.036963 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.036984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:37 crc kubenswrapper[4125]: E0312 13:34:37.037230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.890667 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:37 crc kubenswrapper[4125]: I0312 13:34:37.891024 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.025494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.025613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.025882 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.026190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.027344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.027990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.028311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.028183 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.028792 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.029228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.029371 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.029467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.028996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.028259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.030455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.028285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.028225 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.029032 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.029105 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.029137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.029171 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.029199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.030136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.030312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.032593 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.033056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.033407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.033568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.033697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.034274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.034519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.034656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:38 crc kubenswrapper[4125]: E0312 13:34:38.034799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.888407 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:38 crc kubenswrapper[4125]: I0312 13:34:38.888514 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.025756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.025937 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026130 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026153 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026307 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026186 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.025788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.025892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.026560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.026861 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.026936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.027019 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.027021 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.027257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.027318 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.027519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.027700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.027713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.027734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.027883 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.027939 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.027953 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.028063 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.028214 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.028346 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.028373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.028470 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.028534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.028550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.028554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.028585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.028713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.028964 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.029196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.031903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.025960 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.032542 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.032937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.033036 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.033591 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.034304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.034802 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.035762 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.036050 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.040449 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.036738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.037806 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.038319 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.038713 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.039439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.040035 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.040155 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.040654 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.040778 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.041170 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.041374 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.041574 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:39 crc kubenswrapper[4125]: E0312 13:34:39.226548 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.889915 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:39 crc kubenswrapper[4125]: I0312 13:34:39.890046 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.025952 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026346 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026044 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026055 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.026555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.026578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.026800 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.027143 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.027467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.027994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.028234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.028336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.028531 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.028541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.028728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.028984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.029057 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.029216 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.029338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.029473 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.029609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.029736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.030001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.030233 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.030373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.030516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.030649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.031509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:40 crc kubenswrapper[4125]: E0312 13:34:40.033036 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.887971 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:40 crc kubenswrapper[4125]: I0312 13:34:40.888802 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.025147 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.025415 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.025744 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.025893 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.026141 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026203 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.026305 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.026466 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.026652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026967 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027160 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.027189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027176 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.026970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.027394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.027601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.027737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.027886 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.027983 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.028154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.028261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.028296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.028400 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.028487 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.028598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.028710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.028783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.028977 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029161 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029300 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.029440 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.029608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.029698 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.029773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.029942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.030024 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030160 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030407 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030508 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.030666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030740 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.030931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.031044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.031201 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.031252 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.031381 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.031492 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:41 crc kubenswrapper[4125]: E0312 13:34:41.031599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.887395 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:41 crc kubenswrapper[4125]: I0312 13:34:41.888337 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.025509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.030599 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.030732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.030774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.031048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.031198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.031240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.031289 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.031348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.031432 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.031475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.031608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.031752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.031980 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.032264 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.032412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.032464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.032611 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.032756 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.032970 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.033370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.033442 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.033606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.033681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.033799 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.034218 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.034262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.036361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.036499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.037327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.037697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:42 crc kubenswrapper[4125]: E0312 13:34:42.038198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.887454 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:42 crc kubenswrapper[4125]: I0312 13:34:42.887627 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.025638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025254 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025761 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.025718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.026278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.026288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.026371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.026585 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.026623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.026729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.026775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.026786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.027065 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.027320 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.027348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.027429 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.027465 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.027901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.027925 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.028030 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.028107 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.028401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.028930 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.029012 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.029208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.029359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.029486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.029661 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.029793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.029933 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.029957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.029976 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.030052 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.030026 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.030155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.030237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030356 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030437 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030495 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.030779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.031033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.031261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.031349 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.031438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.031507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.031931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.032185 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.032547 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.028984 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:43 crc kubenswrapper[4125]: E0312 13:34:43.034410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.902675 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:43 crc kubenswrapper[4125]: I0312 13:34:43.904315 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.025242 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.025428 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.025525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.025793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.025938 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.025246 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.026949 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.027630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.027973 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.028063 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.028124 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.028274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.028335 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.028503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.028689 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.028962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.029050 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.029361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.029518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.029647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.029796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.030582 4125 scope.go:117] "RemoveContainer" containerID="3794f8795e4cb93e694a011051801bb27ad38a82a961fc0f93365a2e7fa90dcb" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.031493 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.032229 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.032512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:44 crc kubenswrapper[4125]: E0312 13:34:44.229251 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.331223 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/4.log" Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.886235 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:44 crc kubenswrapper[4125]: I0312 13:34:44.886346 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.025789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.025966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.026074 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.025905 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.026298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.026454 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.026609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.026627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.026715 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.026898 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.026939 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.026989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027024 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.027109 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.027240 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.027350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.027517 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.027569 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.027601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.027786 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.028472 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.028638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.029033 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.029288 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.030137 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.030406 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.030515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.030650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.030738 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.030252 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.030290 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.030183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.031525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.031554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.031602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.031984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.032140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.032388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.032403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.032706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.032903 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.033057 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.033167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.033388 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.033540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.033643 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034386 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034530 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034652 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034707 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034776 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034919 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:45 crc kubenswrapper[4125]: E0312 13:34:45.034990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.361244 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/4.log" Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.361449 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"e9cd6258b77a2dbc61d3969ac23ebefd8874acec3f3c831956f9ded27ab218f5"} Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.888340 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:45 crc kubenswrapper[4125]: I0312 13:34:45.888548 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.025259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.025987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.026213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.026241 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.025463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.025580 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.025701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.027194 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.025783 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.027957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.028295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.028430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.028309 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.028360 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.028703 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029317 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029403 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029498 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029519 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029590 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.029611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.029759 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.030058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.030257 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.030444 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.030668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.030951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.031038 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.031307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.031602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:46 crc kubenswrapper[4125]: E0312 13:34:46.031999 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.888289 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:46 crc kubenswrapper[4125]: I0312 13:34:46.888647 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.025420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.025789 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.025927 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.026172 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.026203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.026392 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.026468 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.026417 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.026539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.025459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.026447 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.026694 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.026780 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.027219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.027221 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.027313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.027379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.027462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.027270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.027737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.027755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.027921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.028018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.028018 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.028353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.028503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.028362 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.028398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.028906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.029345 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.029460 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.029609 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.029617 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.029729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.030194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.030361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.030436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.030612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.031149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.031651 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.031686 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.031697 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.031255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.031324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.031430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.031337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.029345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.032430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.032768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.033196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.033481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.033602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.033907 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.034000 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.034150 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.034199 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.034303 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.034460 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.034766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.035164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.036157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.036367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.036590 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:47 crc kubenswrapper[4125]: E0312 13:34:47.037436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.888651 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:47 crc kubenswrapper[4125]: I0312 13:34:47.888998 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.025422 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.025578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.025617 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.025806 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.025995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.026069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.026327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.026375 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.026240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.026430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.026620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.026692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.026793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.026799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.026998 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.027208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.027312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.027326 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.027457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.027491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.027600 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.027774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.028047 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.028329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.028455 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.028620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.029014 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.029361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.029606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.029953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.030278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:48 crc kubenswrapper[4125]: E0312 13:34:48.030505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.887418 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:48 crc kubenswrapper[4125]: I0312 13:34:48.887582 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.024750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.024978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.024781 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.025764 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.025962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.027457 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026028 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026035 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026163 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026196 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026190 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026238 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026248 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026351 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026377 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026433 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026473 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.026505 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026565 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026610 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026683 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.026726 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.026775 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.028267 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.028540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.029030 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.029316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.029488 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.029693 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.029997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.030259 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.030413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.030575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.030724 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.030774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.031373 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.031623 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.032329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.032553 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.032603 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.032682 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.032979 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.033187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.033476 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.033997 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=ovnkube-controller pod=ovnkube-node-44qcg_openshift-ovn-kubernetes(3e19f9e8-9a37-4ca8-9790-c219750ab482)\"" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.034003 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.034263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.034494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.034797 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.035190 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.035307 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.035992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.036325 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:49 crc kubenswrapper[4125]: E0312 13:34:49.231743 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.887186 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:49 crc kubenswrapper[4125]: I0312 13:34:49.887380 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.025635 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.025704 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.025723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.025892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.025979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.026124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.026136 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.025659 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.026284 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.026296 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.026459 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.026589 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.026662 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.026719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.027048 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.027131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.027261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.027612 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.027710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.027763 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.027796 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.027942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.028045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.028139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.027776 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.028439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.028704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.028984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.029168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.029358 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.029605 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:50 crc kubenswrapper[4125]: E0312 13:34:50.029984 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.888296 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:50 crc kubenswrapper[4125]: I0312 13:34:50.889071 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.025753 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.025990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.026248 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026253 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.025759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026453 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026502 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026516 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026640 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026663 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.026458 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026754 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.026785 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.026665 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.027143 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.027339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.027398 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.027588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.027751 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.028069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.028140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.028201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.028258 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.028369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.028389 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.028146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.028656 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.028677 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.028993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.029040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029237 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029668 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.029906 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.029986 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.030205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.030529 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.030750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.031138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031189 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031315 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031344 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.031350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.031562 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.031763 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.031962 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.032056 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.032158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.032232 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.032330 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.032446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.032572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.032701 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.033176 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:51 crc kubenswrapper[4125]: E0312 13:34:51.033312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.888524 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:51 crc kubenswrapper[4125]: I0312 13:34:51.889175 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025165 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025493 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.025921 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.026554 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.027985 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.028588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.028705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.029231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.029269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.029450 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.029539 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.029624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.029078 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.030164 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.030228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.030311 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.030327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.030402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.030512 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.030952 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.031467 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.031718 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.032025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.032417 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.032606 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.032784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.033058 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:52 crc kubenswrapper[4125]: E0312 13:34:52.033278 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.888875 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:52 crc kubenswrapper[4125]: I0312 13:34:52.888979 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.024909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.025243 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.025535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.025624 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.025685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.025709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026264 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.026445 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026510 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.026196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.026709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026755 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026907 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.026788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.027139 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.027167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.027368 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.027390 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.027484 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.027437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.027462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.027708 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.028049 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.028182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.028376 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.028514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.028625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.028746 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.028883 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.029016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.029311 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.029419 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.029507 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.029605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.029651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.029719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.029876 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.029980 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.030124 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030194 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.030220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.030479 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030489 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030610 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.030634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.030673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.030750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030871 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.030953 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031027 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031189 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031261 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031327 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031383 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031455 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031525 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031587 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:53 crc kubenswrapper[4125]: E0312 13:34:53.031638 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.888591 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:53 crc kubenswrapper[4125]: I0312 13:34:53.888739 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025374 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026201 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026303 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026350 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026205 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025652 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025660 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025692 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.026697 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.025550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.026995 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.027270 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.027297 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.027511 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.027779 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.028187 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.028310 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.028366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.028564 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.028572 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.028732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.029145 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.029396 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.029640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.030068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.030298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.031157 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:54 crc kubenswrapper[4125]: E0312 13:34:54.234668 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.888491 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:54 crc kubenswrapper[4125]: I0312 13:34:54.888676 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025237 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025269 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025432 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.025576 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025582 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025706 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.025715 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025867 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.025950 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.025954 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026077 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026138 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026168 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026224 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026250 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026287 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026387 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026412 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026494 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026784 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026876 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.026979 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.026615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.027211 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027266 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027330 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.027464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.027575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027655 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027673 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027685 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.027725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.027789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.027934 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028011 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028312 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028709 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028736 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028796 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028921 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.028978 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029203 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029290 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029717 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029789 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:55 crc kubenswrapper[4125]: E0312 13:34:55.029936 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.888582 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:55 crc kubenswrapper[4125]: I0312 13:34:55.888755 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.026054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.026148 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.026498 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.026719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.026769 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.026982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.027004 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027155 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.026982 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.027337 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027579 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.027588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.027750 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.027994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.028043 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.028200 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.028391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.028399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.028501 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.028534 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.028640 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.028994 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.029293 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.029472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.029630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.029771 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.030070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:56 crc kubenswrapper[4125]: E0312 13:34:56.030318 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.887797 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:56 crc kubenswrapper[4125]: I0312 13:34:56.888695 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.025552 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.026047 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.026535 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.026753 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.027256 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.027371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.027403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.027595 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.027607 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.027731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.028646 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.029470 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.029940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.031439 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.031732 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.032045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.033012 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.033437 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.033598 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.033959 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034319 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034404 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034413 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034463 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.034472 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.034651 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.035001 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.035212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.035298 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.035394 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.035429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.035548 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.035604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.035632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.035936 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.036064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.036139 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.036247 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.036494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.036667 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.036971 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.037080 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.037199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.037560 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.038312 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.038453 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.038727 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.038751 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.038942 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.038982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039009 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039226 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039344 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039541 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039681 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.039915 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.040147 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.040280 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.040413 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.040620 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:57 crc kubenswrapper[4125]: E0312 13:34:57.040791 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.887071 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:57 crc kubenswrapper[4125]: I0312 13:34:57.887360 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.025756 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.026390 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.026712 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.027017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.027322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.027483 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.027696 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.027724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.028006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028072 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.028510 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028679 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028680 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.028964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.028990 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.029062 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.029205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.029261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.029361 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.029526 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.029657 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.029917 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.030138 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.030234 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.030426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.030582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.030712 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.031173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:34:58 crc kubenswrapper[4125]: E0312 13:34:58.031514 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.888727 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:58 crc kubenswrapper[4125]: I0312 13:34:58.889082 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025420 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025515 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025572 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.026186 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.026359 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025585 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.026616 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.026668 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.026992 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.027277 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.027281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.027345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025644 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025654 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.027500 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025759 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025793 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.027314 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.027611 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.027631 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.027682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.027690 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.027914 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.025628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.028165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.028292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.028313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.028614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.028295 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.028863 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.028926 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.029032 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.029074 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.029183 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.028691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.029447 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.030024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.030400 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.030550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.030643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.031135 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.032316 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.032746 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.033410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.033632 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.033910 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.034231 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.034419 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.034465 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.035069 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.035334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.035446 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.035544 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.035759 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.036173 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.036604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:34:59 crc kubenswrapper[4125]: E0312 13:34:59.237037 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.889324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:34:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:34:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:34:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:34:59 crc kubenswrapper[4125]: I0312 13:34:59.889745 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.026175 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.026478 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.026586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.027029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.027402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.027694 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.027959 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.028275 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.028730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.028987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.028729 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.028943 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.029337 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.029672 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.029737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.030129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.030250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.030502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.030555 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.029436 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.030188 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.031174 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.031321 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.031506 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.031691 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.030140 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.031683 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.032262 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.032309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.032604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.033073 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:35:00 crc kubenswrapper[4125]: E0312 13:35:00.033430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.888193 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:00 crc kubenswrapper[4125]: I0312 13:35:00.888448 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.025497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.025628 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.025564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.026081 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.026167 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.026385 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.026578 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.026592 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.026719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.026996 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027013 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027228 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.027397 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027410 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.027626 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027629 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027700 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.027990 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.027932 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.028246 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.028372 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.028475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.028598 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.028625 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.028913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.028955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.029077 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.029433 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.029533 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.029682 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.029732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.029923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.030369 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.030646 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.031082 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.031208 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.031367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.031951 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.032213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.032353 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.032392 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.032434 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.032601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.032523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.032749 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.032937 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.033007 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.033066 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.033292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.033402 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.033522 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.034212 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.034552 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.034989 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.035052 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.035250 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.035388 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.035602 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.035692 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.035797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.035923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.036393 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:35:01 crc kubenswrapper[4125]: E0312 13:35:01.036923 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.038437 4125 scope.go:117] "RemoveContainer" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.471432 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovnkube-controller/5.log" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.477190 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720"} Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.480365 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.890479 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:01 crc kubenswrapper[4125]: I0312 13:35:01.890669 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.025462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.025562 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.025711 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.025729 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.029929 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.030045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.030145 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.030051 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.030367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.030485 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.030577 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.031017 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.031045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.031192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.031016 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.031298 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.031412 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.031515 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.031539 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.031608 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.031650 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.031728 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.032055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.032329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.032503 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.038945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.039261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.041068 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.043300 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.043788 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.047198 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:35:02 crc kubenswrapper[4125]: E0312 13:35:02.047580 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.885854 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:02 crc kubenswrapper[4125]: I0312 13:35:02.886030 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.024922 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.024989 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025292 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.025315 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025665 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025662 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025718 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.025626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.025568 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.026262 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.026220 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.026233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.026486 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.026675 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.026743 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.026523 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.026623 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.027175 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.027278 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.027435 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.027627 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.027713 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.027799 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.027653 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.028183 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.028276 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.028358 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.028422 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.028476 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.028499 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.028561 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.028604 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.028563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.028993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.029153 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.029271 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.029481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.029684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.029774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.029873 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.030060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.030324 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.030502 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.030940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031080 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.031182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031329 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031401 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.030957 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031615 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031679 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031745 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.031784 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.032045 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031981 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.031919 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.032414 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.032468 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:35:03 crc kubenswrapper[4125]: E0312 13:35:03.032533 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.887386 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:03 crc kubenswrapper[4125]: I0312 13:35:03.887573 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.026357 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.026429 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.026632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.026708 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.026644 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.027004 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.027140 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.027236 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.027425 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.027501 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.027719 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.027947 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.028168 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.028274 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.028401 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.028426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.028565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.029067 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.029338 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.029430 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.029575 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.029595 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.029687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.029931 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.030188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.030391 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.030497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.030634 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.030781 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.031706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.032335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.032481 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:35:04 crc kubenswrapper[4125]: E0312 13:35:04.239557 4125 kubelet.go:2906] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.887684 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:04 crc kubenswrapper[4125]: I0312 13:35:04.887979 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.025379 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.025758 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.025791 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.027379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.025940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.025982 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026034 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.027578 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026080 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.027699 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026149 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026182 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026192 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026220 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.028078 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026230 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026239 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026261 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.028292 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026281 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026276 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026301 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026329 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026321 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026322 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.028663 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026352 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026356 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026371 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.026550 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.026793 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.027960 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.028774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.029015 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.029188 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.029426 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.029588 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.029928 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.030199 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.030378 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.030608 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.030772 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.031144 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.031255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.031406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.031621 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.031987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.031991 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.032219 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.032353 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.032536 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.032760 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.033005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.033274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.033424 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 12 13:35:05 crc kubenswrapper[4125]: E0312 13:35:05.033705 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.923779 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:05 crc kubenswrapper[4125]: I0312 13:35:05.924485 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.025577 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.025949 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.026558 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.026719 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.026774 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.027701 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.027704 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.027024 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.027309 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.027480 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.027653 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.027931 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.026958 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.029430 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.030294 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.030778 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.031579 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.031649 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.031722 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.031770 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.032031 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.032158 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.032263 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.032406 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.032516 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.032634 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.031005 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.031030 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:06 crc kubenswrapper[4125]: I0312 13:35:06.031193 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.031323 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.034484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:35:06 crc kubenswrapper[4125]: E0312 13:35:06.034926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.363272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.368350 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365202 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.368675 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365249 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.368958 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365296 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.369217 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365333 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.369443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365373 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.369706 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365408 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.370002 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365444 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.370299 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365481 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.370540 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365521 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.370768 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.366525 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.372441 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.372497 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.372586 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.372626 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.372710 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.372748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.372926 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.372969 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.373044 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.373073 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:11 crc kubenswrapper[4125]: E0312 13:35:11.373192 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.373228 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.373255 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365557 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365633 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365666 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365702 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365773 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365801 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365886 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365892 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365919 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.384313 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365940 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.384802 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.384998 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385154 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385270 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385321 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385348 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385560 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385637 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.385977 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.386317 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.365966 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.366000 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.386587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.390800 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-kpdvz" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.391147 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.391362 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.366029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.366061 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.366129 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.366170 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.392788 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.393336 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.393464 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.393725 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.394033 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.474270 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.496762 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.497600 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.498032 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.498276 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.498587 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.498804 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.498667 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.499076 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.499244 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.500378 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.501884 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.502154 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.502335 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.502455 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.502600 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.502745 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-dwn4s" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.505945 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507057 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507353 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507482 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507614 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507684 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507731 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507865 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507872 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508005 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508074 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508150 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508266 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508288 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508370 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508401 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508493 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508588 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508687 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508728 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508869 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508971 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.509162 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.509280 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.509422 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.509570 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.509691 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.509791 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508500 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.513253 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.507417 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508541 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.508691 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.515664 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.515791 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.516365 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.517132 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.517247 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.518481 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.519486 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.519649 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.519781 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.519956 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-sv888" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520125 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520232 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520290 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520338 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520486 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520614 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520757 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.520931 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.521147 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.521390 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.521585 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.521716 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.522309 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.528429 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.528737 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.528912 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.529158 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.529693 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.530174 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-twmwc" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.531074 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.531432 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.531632 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.532939 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.533077 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.533231 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.533298 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.538953 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.539697 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.559474 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.611369 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.621074 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.774555 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.803040 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.892249 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.892357 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.897242 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 12 13:35:11 crc kubenswrapper[4125]: I0312 13:35:11.912702 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 12 13:35:12 crc kubenswrapper[4125]: I0312 13:35:12.024467 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 12 13:35:12 crc kubenswrapper[4125]: I0312 13:35:12.887324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:12 crc kubenswrapper[4125]: I0312 13:35:12.887742 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025564 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.026059 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.026060 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025786 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025901 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025764 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025955 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025957 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025965 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.026005 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.026029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.026016 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.026040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.025935 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.033445 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.038534 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.038617 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.038661 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.048171 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.048452 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.049044 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.049542 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.049722 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050028 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050245 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050394 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050449 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.049577 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050621 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050032 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.050755 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.051375 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.061776 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.066880 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.070197 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.070367 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.070426 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.071025 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.071156 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.071165 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.071246 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.072414 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.072595 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.072691 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.073170 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.073273 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.073418 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.073481 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.073551 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.073601 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.074206 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.076052 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.076221 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.076269 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.076410 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.076693 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"webhook-serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.082411 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.077460 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-dl9g2" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.078448 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.078478 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.078549 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.080737 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.080791 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.088448 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.090778 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.098402 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.891365 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:13 crc kubenswrapper[4125]: I0312 13:35:13.892187 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.525917 4125 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeReady" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.652681 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.653249 4125 topology_manager.go:215] "Topology Admit Handler" podUID="973266fa-3775-4a33-9ee8-9af757721a2a" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.654509 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.660302 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.660531 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.667046 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dcqzh"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.667236 4125 topology_manager.go:215] "Topology Admit Handler" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" podNamespace="openshift-marketplace" podName="redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.669054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.673875 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k9qqb"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.674030 4125 topology_manager.go:215] "Topology Admit Handler" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" podNamespace="openshift-marketplace" podName="community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.675222 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.680233 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-7cbd5666ff-bbfrf"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.680388 4125 topology_manager.go:215] "Topology Admit Handler" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" podNamespace="openshift-image-registry" podName="image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.681283 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.685405 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-q786x" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.685419 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g4v97"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.685928 4125 topology_manager.go:215] "Topology Admit Handler" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" podNamespace="openshift-marketplace" podName="certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.687540 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.732242 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dcqzh"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.738605 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g4v97"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.740538 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k9qqb"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.766718 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.790439 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-bound-sa-token\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.790661 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-trusted-ca\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.790794 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-utilities\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.790934 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-catalog-content\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791045 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-catalog-content\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791144 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-utilities\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791305 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-utilities\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791428 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-catalog-content\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791557 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/42b6a393-6194-4620-bf8f-7e4b6cbe5679-ca-trust-extracted\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791642 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/973266fa-3775-4a33-9ee8-9af757721a2a-secret-volume\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791751 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt69l\" (UniqueName: \"kubernetes.io/projected/973266fa-3775-4a33-9ee8-9af757721a2a-kube-api-access-nt69l\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791803 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n59fs\" (UniqueName: \"kubernetes.io/projected/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-kube-api-access-n59fs\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791896 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/973266fa-3775-4a33-9ee8-9af757721a2a-config-volume\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.791972 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzb4s\" (UniqueName: \"kubernetes.io/projected/6db26b71-4e04-4688-a0c0-00e06e8c888d-kube-api-access-nzb4s\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.792044 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-tls\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.792163 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/42b6a393-6194-4620-bf8f-7e4b6cbe5679-installation-pull-secrets\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.792232 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwzcr\" (UniqueName: \"kubernetes.io/projected/bb917686-edfb-4158-86ad-6fce0abec64c-kube-api-access-mwzcr\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.792437 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f9ss\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-kube-api-access-4f9ss\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.792505 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-certificates\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.854188 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-7cbd5666ff-bbfrf"] Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.885274 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.885392 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.894183 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nzb4s\" (UniqueName: \"kubernetes.io/projected/6db26b71-4e04-4688-a0c0-00e06e8c888d-kube-api-access-nzb4s\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.894719 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-tls\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.895008 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/42b6a393-6194-4620-bf8f-7e4b6cbe5679-installation-pull-secrets\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.895198 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mwzcr\" (UniqueName: \"kubernetes.io/projected/bb917686-edfb-4158-86ad-6fce0abec64c-kube-api-access-mwzcr\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.895501 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4f9ss\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-kube-api-access-4f9ss\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.895632 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-certificates\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.896388 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-bound-sa-token\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.896715 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-trusted-ca\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897042 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-utilities\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897250 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-catalog-content\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897365 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-certificates\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897427 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-utilities\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897710 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-catalog-content\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897945 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-utilities\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.898188 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-catalog-content\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.898314 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-utilities\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.897951 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-catalog-content\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.898488 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-utilities\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.898673 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-utilities\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.898934 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-catalog-content\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.899223 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/42b6a393-6194-4620-bf8f-7e4b6cbe5679-ca-trust-extracted\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.899329 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-catalog-content\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.899494 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/973266fa-3775-4a33-9ee8-9af757721a2a-secret-volume\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.899530 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/42b6a393-6194-4620-bf8f-7e4b6cbe5679-ca-trust-extracted\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.899922 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nt69l\" (UniqueName: \"kubernetes.io/projected/973266fa-3775-4a33-9ee8-9af757721a2a-kube-api-access-nt69l\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.900077 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n59fs\" (UniqueName: \"kubernetes.io/projected/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-kube-api-access-n59fs\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.900217 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/973266fa-3775-4a33-9ee8-9af757721a2a-config-volume\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.901556 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/973266fa-3775-4a33-9ee8-9af757721a2a-config-volume\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.902341 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-tls\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.902408 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-trusted-ca\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.904643 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/42b6a393-6194-4620-bf8f-7e4b6cbe5679-installation-pull-secrets\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.911547 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/973266fa-3775-4a33-9ee8-9af757721a2a-secret-volume\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.924615 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-bound-sa-token\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.931968 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzb4s\" (UniqueName: \"kubernetes.io/projected/6db26b71-4e04-4688-a0c0-00e06e8c888d-kube-api-access-nzb4s\") pod \"redhat-operators-dcqzh\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.933998 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f9ss\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-kube-api-access-4f9ss\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.934262 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwzcr\" (UniqueName: \"kubernetes.io/projected/bb917686-edfb-4158-86ad-6fce0abec64c-kube-api-access-mwzcr\") pod \"certified-operators-g4v97\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.940598 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt69l\" (UniqueName: \"kubernetes.io/projected/973266fa-3775-4a33-9ee8-9af757721a2a-kube-api-access-nt69l\") pod \"collect-profiles-29555370-8n4p8\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.944198 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-n59fs\" (UniqueName: \"kubernetes.io/projected/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-kube-api-access-n59fs\") pod \"community-operators-k9qqb\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:14 crc kubenswrapper[4125]: I0312 13:35:14.992306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.012036 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.019029 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.045068 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.888499 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.888589 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.946697 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8"] Mar 12 13:35:15 crc kubenswrapper[4125]: I0312 13:35:15.946764 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dcqzh"] Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.020386 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g4v97"] Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.024623 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k9qqb"] Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.394170 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" event={"ID":"973266fa-3775-4a33-9ee8-9af757721a2a","Type":"ContainerStarted","Data":"0260ff3055ca485655dfa3a62ea6cb1042f69d0c1b0e8a1d75401c5004d0a556"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.394495 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" event={"ID":"973266fa-3775-4a33-9ee8-9af757721a2a","Type":"ContainerStarted","Data":"f705d6e8cd58efcbc5c53b3764c7921f53160a374181083c67a45bb0406ebf86"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.398290 4125 generic.go:334] "Generic (PLEG): container finished" podID="bb917686-edfb-4158-86ad-6fce0abec64c" containerID="8066f3b20a26cdaa33f85973f1b4e4f5fa47ef57fcee496150d1801066272f41" exitCode=0 Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.398397 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerDied","Data":"8066f3b20a26cdaa33f85973f1b4e4f5fa47ef57fcee496150d1801066272f41"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.398445 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerStarted","Data":"2763af634f4d669b92332b1dbf6affd01dcf18c5923f4fea2204666a30c0374c"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.403381 4125 generic.go:334] "Generic (PLEG): container finished" podID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerID="56a5bc6b4dee41dcddef32e21164697c824160c5a4c34705a0ad0d4b6748294b" exitCode=0 Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.403497 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerDied","Data":"56a5bc6b4dee41dcddef32e21164697c824160c5a4c34705a0ad0d4b6748294b"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.403534 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerStarted","Data":"049cfa0c7f506db7d8a3962a506f6c7dbfe4b3489281fd37180c3bf06491ac72"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.406941 4125 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.412978 4125 generic.go:334] "Generic (PLEG): container finished" podID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerID="6b4d739a1f1a1f43a1f2f89445711eaf7d44a20124dedaea0758ded72747d1a0" exitCode=0 Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.413046 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerDied","Data":"6b4d739a1f1a1f43a1f2f89445711eaf7d44a20124dedaea0758ded72747d1a0"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.413073 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerStarted","Data":"696d1fd3341d44c6b87bdc93d6ff15956037aa2458e9b94b4b68632fe8229857"} Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.435854 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" podStartSLOduration=316.43523416 podStartE2EDuration="5m16.43523416s" podCreationTimestamp="2026-03-12 13:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:35:16.431555235 +0000 UTC m=+886.754941144" watchObservedRunningTime="2026-03-12 13:35:16.43523416 +0000 UTC m=+886.758620029" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.576949 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.577044 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.577383 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-nzb4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dcqzh_openshift-marketplace(6db26b71-4e04-4688-a0c0-00e06e8c888d): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.577454 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578388 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578476 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578584 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n59fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-k9qqb_openshift-marketplace(ccdf38cf-634a-41a2-9c8b-74bb86af80a7): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578636 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578399 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578680 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.578775 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-mwzcr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-g4v97_openshift-marketplace(bb917686-edfb-4158-86ad-6fce0abec64c): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:16 crc kubenswrapper[4125]: E0312 13:35:16.580410 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.886187 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:16 crc kubenswrapper[4125]: I0312 13:35:16.886419 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:17 crc kubenswrapper[4125]: E0312 13:35:17.423377 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:35:17 crc kubenswrapper[4125]: E0312 13:35:17.423518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:35:17 crc kubenswrapper[4125]: E0312 13:35:17.423601 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:35:17 crc kubenswrapper[4125]: I0312 13:35:17.886401 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:17 crc kubenswrapper[4125]: I0312 13:35:17.886494 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:18 crc kubenswrapper[4125]: I0312 13:35:18.428718 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" event={"ID":"973266fa-3775-4a33-9ee8-9af757721a2a","Type":"ContainerDied","Data":"0260ff3055ca485655dfa3a62ea6cb1042f69d0c1b0e8a1d75401c5004d0a556"} Mar 12 13:35:18 crc kubenswrapper[4125]: I0312 13:35:18.428643 4125 generic.go:334] "Generic (PLEG): container finished" podID="973266fa-3775-4a33-9ee8-9af757721a2a" containerID="0260ff3055ca485655dfa3a62ea6cb1042f69d0c1b0e8a1d75401c5004d0a556" exitCode=0 Mar 12 13:35:18 crc kubenswrapper[4125]: I0312 13:35:18.888173 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:18 crc kubenswrapper[4125]: I0312 13:35:18.888310 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.738193 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.885944 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.886128 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.902623 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/973266fa-3775-4a33-9ee8-9af757721a2a-config-volume\") pod \"973266fa-3775-4a33-9ee8-9af757721a2a\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.902940 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/973266fa-3775-4a33-9ee8-9af757721a2a-secret-volume\") pod \"973266fa-3775-4a33-9ee8-9af757721a2a\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.903121 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt69l\" (UniqueName: \"kubernetes.io/projected/973266fa-3775-4a33-9ee8-9af757721a2a-kube-api-access-nt69l\") pod \"973266fa-3775-4a33-9ee8-9af757721a2a\" (UID: \"973266fa-3775-4a33-9ee8-9af757721a2a\") " Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.904050 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/973266fa-3775-4a33-9ee8-9af757721a2a-config-volume" (OuterVolumeSpecName: "config-volume") pod "973266fa-3775-4a33-9ee8-9af757721a2a" (UID: "973266fa-3775-4a33-9ee8-9af757721a2a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.905724 4125 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/973266fa-3775-4a33-9ee8-9af757721a2a-config-volume\") on node \"crc\" DevicePath \"\"" Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.909324 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/973266fa-3775-4a33-9ee8-9af757721a2a-kube-api-access-nt69l" (OuterVolumeSpecName: "kube-api-access-nt69l") pod "973266fa-3775-4a33-9ee8-9af757721a2a" (UID: "973266fa-3775-4a33-9ee8-9af757721a2a"). InnerVolumeSpecName "kube-api-access-nt69l". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:35:19 crc kubenswrapper[4125]: I0312 13:35:19.910990 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/973266fa-3775-4a33-9ee8-9af757721a2a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "973266fa-3775-4a33-9ee8-9af757721a2a" (UID: "973266fa-3775-4a33-9ee8-9af757721a2a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.008789 4125 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/973266fa-3775-4a33-9ee8-9af757721a2a-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.008887 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nt69l\" (UniqueName: \"kubernetes.io/projected/973266fa-3775-4a33-9ee8-9af757721a2a-kube-api-access-nt69l\") on node \"crc\" DevicePath \"\"" Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.454069 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" event={"ID":"973266fa-3775-4a33-9ee8-9af757721a2a","Type":"ContainerDied","Data":"f705d6e8cd58efcbc5c53b3764c7921f53160a374181083c67a45bb0406ebf86"} Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.454172 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f705d6e8cd58efcbc5c53b3764c7921f53160a374181083c67a45bb0406ebf86" Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.454417 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8" Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.886717 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:20 crc kubenswrapper[4125]: I0312 13:35:20.886962 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:21 crc kubenswrapper[4125]: I0312 13:35:21.885553 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:35:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:35:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:35:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:35:21 crc kubenswrapper[4125]: I0312 13:35:21.885675 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:35:21 crc kubenswrapper[4125]: I0312 13:35:21.885730 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:35:21 crc kubenswrapper[4125]: I0312 13:35:21.887796 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6"} pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" containerMessage="Container router failed startup probe, will be restarted" Mar 12 13:35:21 crc kubenswrapper[4125]: I0312 13:35:21.887945 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" containerID="cri-o://040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6" gracePeriod=3600 Mar 12 13:35:28 crc kubenswrapper[4125]: E0312 13:35:28.146466 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:35:28 crc kubenswrapper[4125]: E0312 13:35:28.147057 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:35:28 crc kubenswrapper[4125]: E0312 13:35:28.147329 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-mwzcr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-g4v97_openshift-marketplace(bb917686-edfb-4158-86ad-6fce0abec64c): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:28 crc kubenswrapper[4125]: E0312 13:35:28.147420 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:35:31 crc kubenswrapper[4125]: E0312 13:35:31.142933 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:35:31 crc kubenswrapper[4125]: E0312 13:35:31.143021 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:35:31 crc kubenswrapper[4125]: E0312 13:35:31.143225 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-nzb4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dcqzh_openshift-marketplace(6db26b71-4e04-4688-a0c0-00e06e8c888d): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:31 crc kubenswrapper[4125]: E0312 13:35:31.143288 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:35:31 crc kubenswrapper[4125]: I0312 13:35:31.428498 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:35:31 crc kubenswrapper[4125]: I0312 13:35:31.428592 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:35:31 crc kubenswrapper[4125]: I0312 13:35:31.428618 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:35:31 crc kubenswrapper[4125]: I0312 13:35:31.428655 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:35:31 crc kubenswrapper[4125]: I0312 13:35:31.428678 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:35:32 crc kubenswrapper[4125]: E0312 13:35:32.561473 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:35:32 crc kubenswrapper[4125]: E0312 13:35:32.562322 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:35:32 crc kubenswrapper[4125]: E0312 13:35:32.563286 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n59fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-k9qqb_openshift-marketplace(ccdf38cf-634a-41a2-9c8b-74bb86af80a7): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:32 crc kubenswrapper[4125]: E0312 13:35:32.563531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:35:39 crc kubenswrapper[4125]: E0312 13:35:39.029469 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:35:46 crc kubenswrapper[4125]: E0312 13:35:46.263750 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:35:47 crc kubenswrapper[4125]: E0312 13:35:47.030423 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:35:50 crc kubenswrapper[4125]: E0312 13:35:50.159420 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:35:50 crc kubenswrapper[4125]: E0312 13:35:50.160375 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:35:50 crc kubenswrapper[4125]: E0312 13:35:50.160602 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-mwzcr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-g4v97_openshift-marketplace(bb917686-edfb-4158-86ad-6fce0abec64c): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:50 crc kubenswrapper[4125]: E0312 13:35:50.160688 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.613525 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.618331 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.636723 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.828397 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.828693 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.828929 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.829009 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.829067 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.829249 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.829315 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.829557 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.829774 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.830044 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.830250 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.830406 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.834986 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.835233 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.836075 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.836229 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.836466 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.836713 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838186 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838506 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838579 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838627 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838715 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838758 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838890 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.838944 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839049 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839094 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839201 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839264 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839302 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839340 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839378 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839416 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839475 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839516 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839551 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839588 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839636 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839902 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.839970 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.840087 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.841699 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.842265 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.842670 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.842759 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.843204 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.843441 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.843508 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.843672 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.844048 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.853456 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.855646 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.862725 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.865403 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.866083 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.866770 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.868163 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.865536 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.865607 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870292 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870335 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870433 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870505 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870584 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870624 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870688 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870744 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870750 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870799 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.870943 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.871042 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.871243 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.871309 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.871315 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.876167 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.877797 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.879800 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.871375 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.887151 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"webhook-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.888097 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.888434 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.892269 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.892385 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.893772 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.899103 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.896592 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872423 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872470 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872478 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872503 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872539 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872655 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872733 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872793 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872995 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.873088 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.876100 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.896650 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.897769 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.903782 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.872308 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.907029 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.908573 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.910142 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.910336 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.910932 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.913594 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.915043 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.915529 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.915620 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.915906 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.916207 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.916421 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.917636 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.918504 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.918390 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.919241 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.919515 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.920033 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.920455 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.920948 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.921601 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.923977 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.925060 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.926723 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.927455 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.927535 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.929193 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.930188 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.930111 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.931024 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.931800 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.936890 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.938190 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.938237 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.944220 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945403 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945465 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945512 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945540 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945571 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945603 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945631 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945662 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945687 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945711 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945737 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945763 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945797 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945880 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945909 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945937 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945962 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.945988 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946013 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946037 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946064 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946138 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946177 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946228 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946250 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946279 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946303 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946365 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946391 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946425 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946450 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946487 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946512 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946535 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946605 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946632 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946654 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946698 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946729 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946789 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946886 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946933 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.946977 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947005 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947045 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947079 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947141 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947208 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947231 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947253 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947275 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947296 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947322 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947346 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947370 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947393 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947416 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947440 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947462 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.947486 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.953434 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:52 crc kubenswrapper[4125]: E0312 13:35:52.963616 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-12 13:37:54.963599873 +0000 UTC m=+1045.286985762 (durationBeforeRetry 2m2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-585546dd8b-v5m4t" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.969457 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.969708 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.970199 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.970654 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.970931 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.973951 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.974197 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.974371 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.974511 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.974667 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.976967 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.979166 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.980100 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.983107 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.983522 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.984221 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.984395 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.984475 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.984927 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.984951 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.987576 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.987913 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.987991 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.988319 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.988633 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.988934 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.989104 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.989684 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.989778 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990000 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990030 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990174 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990245 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990286 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990419 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990540 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.991004 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.991277 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.991676 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.991890 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992019 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992178 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992371 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992446 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.991686 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992510 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990553 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992578 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992401 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992633 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992670 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992539 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992725 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992776 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.992786 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990763 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.993335 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.990540 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.993703 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.993886 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.995483 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.997427 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.996029 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.996989 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.997334 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.997557 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.997578 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.998064 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.998277 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.998371 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:52 crc kubenswrapper[4125]: I0312 13:35:52.999037 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.000331 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.000754 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.002333 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.003762 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.004441 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.004571 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.008227 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.008897 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.009219 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.009455 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.009762 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.010191 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.010908 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.011336 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.011552 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.009469 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.008914 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.012543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.027964 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.028737 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.013525 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.016376 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.033359 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.038512 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.034489 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.018621 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.022909 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.026602 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.034996 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.035299 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.035688 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.035795 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.036576 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.017767 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.037005 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.037650 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.038162 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.042344 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.043523 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.044213 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.047642 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.047708 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.047754 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"service-ca-666f99b6f-vlbxv\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.048016 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.048214 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.048598 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.048703 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.050453 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.050752 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.050788 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.050866 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.051887 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.052215 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.056850 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.059233 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.059609 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.060201 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.060427 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.060559 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.060237 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.061335 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.061601 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.061770 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.061985 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.062649 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.063015 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.063988 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064059 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064096 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064168 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064201 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064229 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064257 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064283 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064328 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064356 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064379 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064434 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064475 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064499 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064524 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.064547 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.066401 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.074481 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.074752 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.075350 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.075494 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.075515 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.075580 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.076151 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.078244 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.078944 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.079687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.080376 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.081334 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.084543 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.085208 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.086617 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.086961 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"image-registry-585546dd8b-v5m4t\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.088795 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.090190 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.090403 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.092151 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"controller-manager-6ff78978b4-q4vv8\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.101639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.103673 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.104780 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.114277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.118998 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.167198 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.169960 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.170504 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.170876 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.173177 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.173212 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.180028 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.173393 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.185065 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.174350 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.174442 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.199893 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.200606 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.203602 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.207974 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.209384 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.212871 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.212603 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.221789 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.221797 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.223857 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.234602 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.239307 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.254627 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.265706 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.267494 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.270925 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.271028 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.273195 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.276592 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.277621 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.277697 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.281713 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"redhat-marketplace-rmwfn\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.284800 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.292185 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.303372 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.311753 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.319913 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.320345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.332511 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.336742 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.341426 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.356061 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.368606 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.371241 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.391676 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.394617 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.414020 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.433323 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.434531 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"route-controller-manager-5c4dbb8899-tchz5\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.449969 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.453588 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-dl9g2" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.460040 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.474985 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.496957 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.515681 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.531183 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"apiserver-67cbf64bc9-mtx25\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.533259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.534215 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.553096 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.572036 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.574396 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.576073 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.582451 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.589524 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"console-84fccc7b6-mkncc\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.595019 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-kpdvz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.595349 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.595499 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.598051 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.604354 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.613503 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.629733 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.633394 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.654413 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.655749 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.671683 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"oauth-openshift-765b47f944-n2lhl\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.702227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.711928 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.726260 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.741302 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.764927 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.781730 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.791508 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.846100 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:35:53 crc kubenswrapper[4125]: I0312 13:35:53.851157 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 12 13:35:54 crc kubenswrapper[4125]: W0312 13:35:54.585261 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb54e8941_2fc4_432a_9e51_39684df9089e.slice/crio-bf33a6f3417637c3ec5a7735974a998f197c0906ebf819399ad5786d84f0ca49 WatchSource:0}: Error finding container bf33a6f3417637c3ec5a7735974a998f197c0906ebf819399ad5786d84f0ca49: Status 404 returned error can't find the container with id bf33a6f3417637c3ec5a7735974a998f197c0906ebf819399ad5786d84f0ca49 Mar 12 13:35:54 crc kubenswrapper[4125]: W0312 13:35:54.777889 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1620f19_8aa3_45cf_931b_7ae0e5cd14cf.slice/crio-d08ab276634a556a99e5389bb4b6df953e2d5c10de8c6b811b6fb6bb9c8e7d8b WatchSource:0}: Error finding container d08ab276634a556a99e5389bb4b6df953e2d5c10de8c6b811b6fb6bb9c8e7d8b: Status 404 returned error can't find the container with id d08ab276634a556a99e5389bb4b6df953e2d5c10de8c6b811b6fb6bb9c8e7d8b Mar 12 13:35:54 crc kubenswrapper[4125]: W0312 13:35:54.849061 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f394926_bdb9_425c_b36e_264d7fd34550.slice/crio-ddc027da674113ccceaaa3a9d6336de02f530f0f7dbbb4e6e22574300507687b WatchSource:0}: Error finding container ddc027da674113ccceaaa3a9d6336de02f530f0f7dbbb4e6e22574300507687b: Status 404 returned error can't find the container with id ddc027da674113ccceaaa3a9d6336de02f530f0f7dbbb4e6e22574300507687b Mar 12 13:35:55 crc kubenswrapper[4125]: I0312 13:35:55.780066 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" event={"ID":"120b38dc-8236-4fa6-a452-642b8ad738ee","Type":"ContainerStarted","Data":"ac67d38edf96fe4943bf8a96cd033f7facfd5be55e5a851dbb1f658a06f60bb5"} Mar 12 13:35:55 crc kubenswrapper[4125]: I0312 13:35:55.797212 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" event={"ID":"b54e8941-2fc4-432a-9e51-39684df9089e","Type":"ContainerStarted","Data":"bf33a6f3417637c3ec5a7735974a998f197c0906ebf819399ad5786d84f0ca49"} Mar 12 13:35:55 crc kubenswrapper[4125]: I0312 13:35:55.838229 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" event={"ID":"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf","Type":"ContainerStarted","Data":"d08ab276634a556a99e5389bb4b6df953e2d5c10de8c6b811b6fb6bb9c8e7d8b"} Mar 12 13:35:55 crc kubenswrapper[4125]: I0312 13:35:55.860554 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" event={"ID":"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be","Type":"ContainerStarted","Data":"9f2801fe198c89e7b3a0c6c80ee711b6a61e3a0770f626597be78340dd378b1b"} Mar 12 13:35:55 crc kubenswrapper[4125]: I0312 13:35:55.889549 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" event={"ID":"0f394926-bdb9-425c-b36e-264d7fd34550","Type":"ContainerStarted","Data":"ddc027da674113ccceaaa3a9d6336de02f530f0f7dbbb4e6e22574300507687b"} Mar 12 13:35:56 crc kubenswrapper[4125]: W0312 13:35:56.316192 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9127708_ccfd_4891_8a3a_f0cacb77e0f4.slice/crio-195d9ce880ffc37e7906b28169daaecc3552857ad7a6692b2af123984d0df3fc WatchSource:0}: Error finding container 195d9ce880ffc37e7906b28169daaecc3552857ad7a6692b2af123984d0df3fc: Status 404 returned error can't find the container with id 195d9ce880ffc37e7906b28169daaecc3552857ad7a6692b2af123984d0df3fc Mar 12 13:35:56 crc kubenswrapper[4125]: I0312 13:35:56.976009 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" event={"ID":"378552fd-5e53-4882-87ff-95f3d9198861","Type":"ContainerStarted","Data":"937398981e25eb6e4d38bb4f0d8fb8dc6dcc16b27b3d8c22df919194b9f3a260"} Mar 12 13:35:57 crc kubenswrapper[4125]: I0312 13:35:57.025248 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" event={"ID":"4f8aa612-9da0-4a2b-911e-6a1764a4e74e","Type":"ContainerStarted","Data":"dd020996c3aea9386649cf412c02fbc511d7fd83525e992b892f43c41ad030a8"} Mar 12 13:35:57 crc kubenswrapper[4125]: I0312 13:35:57.050450 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" event={"ID":"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7","Type":"ContainerStarted","Data":"b2bad55520b2310e6b01036595635f6bc7b7be6184229cbef3f82dff3fc0c63f"} Mar 12 13:35:57 crc kubenswrapper[4125]: I0312 13:35:57.098166 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"195d9ce880ffc37e7906b28169daaecc3552857ad7a6692b2af123984d0df3fc"} Mar 12 13:35:57 crc kubenswrapper[4125]: I0312 13:35:57.121335 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" event={"ID":"c085412c-b875-46c9-ae3e-e6b0d8067091","Type":"ContainerStarted","Data":"272209d89a7757e0c8cb376aa0b09015d9ac2b465a8db1f7ba16cd63a3a6933d"} Mar 12 13:35:57 crc kubenswrapper[4125]: I0312 13:35:57.135252 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" event={"ID":"297ab9b6-2186-4d5b-a952-2bfd59af63c4","Type":"ContainerStarted","Data":"75322feb11b9d9a38f45ccfcb60d373ddc362a0dc92dcde70aae0f55e465930d"} Mar 12 13:35:57 crc kubenswrapper[4125]: W0312 13:35:57.156431 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda702c6d2_4dde_4077_ab8c_0f8df804bf7a.slice/crio-aaadefd723f23405112ee64e3125963e59406d825f3cf2120816bad4d65c9c68 WatchSource:0}: Error finding container aaadefd723f23405112ee64e3125963e59406d825f3cf2120816bad4d65c9c68: Status 404 returned error can't find the container with id aaadefd723f23405112ee64e3125963e59406d825f3cf2120816bad4d65c9c68 Mar 12 13:35:57 crc kubenswrapper[4125]: W0312 13:35:57.187493 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87df87f4_ba66_4137_8e41_1fa632ad4207.slice/crio-cb5d563ba52a01dfa56f9b27f2e068ff2ef322ed159c035ea3a168b811ea8ea4 WatchSource:0}: Error finding container cb5d563ba52a01dfa56f9b27f2e068ff2ef322ed159c035ea3a168b811ea8ea4: Status 404 returned error can't find the container with id cb5d563ba52a01dfa56f9b27f2e068ff2ef322ed159c035ea3a168b811ea8ea4 Mar 12 13:35:57 crc kubenswrapper[4125]: W0312 13:35:57.208439 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e733dd_0939_4f1b_9cbb_13897e093787.slice/crio-ec5faddf76e6fc12ba011540d2875c0469e7ae172bc91ce14332f2c6f3b7e8cc WatchSource:0}: Error finding container ec5faddf76e6fc12ba011540d2875c0469e7ae172bc91ce14332f2c6f3b7e8cc: Status 404 returned error can't find the container with id ec5faddf76e6fc12ba011540d2875c0469e7ae172bc91ce14332f2c6f3b7e8cc Mar 12 13:35:57 crc kubenswrapper[4125]: W0312 13:35:57.216090 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd556935_a077_45df_ba3f_d42c39326ccd.slice/crio-99684c7d505e8b4ef69098093612102b643f76dd4578412dc43812a91129270b WatchSource:0}: Error finding container 99684c7d505e8b4ef69098093612102b643f76dd4578412dc43812a91129270b: Status 404 returned error can't find the container with id 99684c7d505e8b4ef69098093612102b643f76dd4578412dc43812a91129270b Mar 12 13:35:57 crc kubenswrapper[4125]: W0312 13:35:57.249064 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod530553aa_0a1d_423e_8a22_f5eb4bdbb883.slice/crio-dade2d6eec86689f33031a1140c31aa267005e17667a55602a1f4ecc8788de8e WatchSource:0}: Error finding container dade2d6eec86689f33031a1140c31aa267005e17667a55602a1f4ecc8788de8e: Status 404 returned error can't find the container with id dade2d6eec86689f33031a1140c31aa267005e17667a55602a1f4ecc8788de8e Mar 12 13:35:57 crc kubenswrapper[4125]: W0312 13:35:57.491355 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5025cb4_ddb0_4107_88c1_bcbcdb779ac0.slice/crio-c62388eefa1b206fa51288979472b0ed82401b2af5ee151bb4a5b872d820f53b WatchSource:0}: Error finding container c62388eefa1b206fa51288979472b0ed82401b2af5ee151bb4a5b872d820f53b: Status 404 returned error can't find the container with id c62388eefa1b206fa51288979472b0ed82401b2af5ee151bb4a5b872d820f53b Mar 12 13:35:58 crc kubenswrapper[4125]: W0312 13:35:58.010626 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded024e5d_8fc2_4c22_803d_73f3c9795f19.slice/crio-83b38597df46d04c6f97c74456e47b9ef92fce65803772b4d5390222d1cbe304 WatchSource:0}: Error finding container 83b38597df46d04c6f97c74456e47b9ef92fce65803772b4d5390222d1cbe304: Status 404 returned error can't find the container with id 83b38597df46d04c6f97c74456e47b9ef92fce65803772b4d5390222d1cbe304 Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.153015 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" event={"ID":"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0","Type":"ContainerStarted","Data":"c62388eefa1b206fa51288979472b0ed82401b2af5ee151bb4a5b872d820f53b"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.170442 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" event={"ID":"120b38dc-8236-4fa6-a452-642b8ad738ee","Type":"ContainerStarted","Data":"fe0b803c41d86148e7cd88e63106061ff21bc69e2fbc55e9e8eed7919ccb1968"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.193096 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerStarted","Data":"8e80ff4c3ba76dbcbe316dbb771e271a95dbcfe4084cc2b59b11e921779c814f"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.197696 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"b0d8051a2b4037af97790d5856acdaecbe8bf0ebf5044a56acbe3b73c38a5f3c"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.209555 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" event={"ID":"59748b9b-c309-4712-aa85-bb38d71c4915","Type":"ContainerStarted","Data":"ad8a91b9a744ad093b7cd0361990c3d6dfb74245ab133add323e17a1fcc6b3d0"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.211781 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" event={"ID":"8a5ae51d-d173-4531-8975-f164c975ce1f","Type":"ContainerStarted","Data":"f76b0ebb8c571e76990bc2e3f0835780b8371ba94434a5df87c4bd7077a4fb4e"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.220088 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" event={"ID":"a702c6d2-4dde-4077-ab8c-0f8df804bf7a","Type":"ContainerStarted","Data":"aaadefd723f23405112ee64e3125963e59406d825f3cf2120816bad4d65c9c68"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.227099 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" event={"ID":"0b5c38ff-1fa8-4219-994d-15776acd4a4d","Type":"ContainerStarted","Data":"af6b1db94c4dcd6058784a8bec0c9c3515af31d0d0a124804417edcc6fd7d0d2"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.253108 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerStarted","Data":"04ea3a247d56e8e9fb5ed0dbb7a932f44e7a28d7ed36991363238ab5d54920bc"} Mar 12 13:35:58 crc kubenswrapper[4125]: W0312 13:35:58.272911 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc782cf62_a827_4677_b3c2_6f82c5f09cbb.slice/crio-7dad7f0dd93fcfa7265f11fe69e573aaba7634cae7dc640cb99cc4829f2982ba WatchSource:0}: Error finding container 7dad7f0dd93fcfa7265f11fe69e573aaba7634cae7dc640cb99cc4829f2982ba: Status 404 returned error can't find the container with id 7dad7f0dd93fcfa7265f11fe69e573aaba7634cae7dc640cb99cc4829f2982ba Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.281694 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"a678acb867b5055ee7744bd020f55f9e946944ba47268c681705e7f493ef2e91"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.304307 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" event={"ID":"d0f40333-c860-4c04-8058-a0bf572dcf12","Type":"ContainerStarted","Data":"4ff4e528143b4cca1ec3bf26c2a252b892b3f28bb7af10db3fa831b2f8cfa4fa"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.308738 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" event={"ID":"ed024e5d-8fc2-4c22-803d-73f3c9795f19","Type":"ContainerStarted","Data":"83b38597df46d04c6f97c74456e47b9ef92fce65803772b4d5390222d1cbe304"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.311927 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" event={"ID":"34a48baf-1bee-4921-8bb2-9b7320e76f79","Type":"ContainerStarted","Data":"bbe62c4720d6d51d35f89cc3be513d40369e8f029ca2b82ca51c1d17e930111b"} Mar 12 13:35:58 crc kubenswrapper[4125]: W0312 13:35:58.320938 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4092a9f8_5acc_4932_9e90_ef962eeb301a.slice/crio-73384dfe8a881d17e3e6bdb1913af86efff33dd52e3a2f6dedd23aa02e70e32d WatchSource:0}: Error finding container 73384dfe8a881d17e3e6bdb1913af86efff33dd52e3a2f6dedd23aa02e70e32d: Status 404 returned error can't find the container with id 73384dfe8a881d17e3e6bdb1913af86efff33dd52e3a2f6dedd23aa02e70e32d Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.348950 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" event={"ID":"43ae1c37-047b-4ee2-9fee-41e337dd4ac8","Type":"ContainerStarted","Data":"cc9d02fa981661be749ffb61fa48602a64ec7dea4882fb6de5491bc77f8e86eb"} Mar 12 13:35:58 crc kubenswrapper[4125]: W0312 13:35:58.387079 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb233d916_bfe3_4ae5_ae39_6b574d1aa05e.slice/crio-116f902f7ec1a29ab141a90c55ec8e2238c0df7e80947d26572239e3bf033ed1 WatchSource:0}: Error finding container 116f902f7ec1a29ab141a90c55ec8e2238c0df7e80947d26572239e3bf033ed1: Status 404 returned error can't find the container with id 116f902f7ec1a29ab141a90c55ec8e2238c0df7e80947d26572239e3bf033ed1 Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.419474 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" event={"ID":"87df87f4-ba66-4137-8e41-1fa632ad4207","Type":"ContainerStarted","Data":"cb5d563ba52a01dfa56f9b27f2e068ff2ef322ed159c035ea3a168b811ea8ea4"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.432495 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" event={"ID":"bd556935-a077-45df-ba3f-d42c39326ccd","Type":"ContainerStarted","Data":"99684c7d505e8b4ef69098093612102b643f76dd4578412dc43812a91129270b"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.449500 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"dade2d6eec86689f33031a1140c31aa267005e17667a55602a1f4ecc8788de8e"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.451605 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" event={"ID":"5bacb25d-97b6-4491-8fb4-99feae1d802a","Type":"ContainerStarted","Data":"e1b7bca120e58e120c3636ddd061ce7ca33a33c37f86a4d15576973cc3d112d8"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.458063 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"ec5faddf76e6fc12ba011540d2875c0469e7ae172bc91ce14332f2c6f3b7e8cc"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.462089 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gbw49" event={"ID":"13045510-8717-4a71-ade4-be95a76440a7","Type":"ContainerStarted","Data":"57a3b9d1ec262d09a3fcab628111d5dbb6fb68531995cf38d0201e4b379e7d1e"} Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.463020 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerStarted","Data":"f30c793f76e8e07ac69612c64aed9cdebf2149137d05232b2ba17531ce27aa5d"} Mar 12 13:35:58 crc kubenswrapper[4125]: W0312 13:35:58.546295 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10603adc_d495_423c_9459_4caa405960bb.slice/crio-974bbbb412cef619b7ae370ba9bcd495dd86cf992b76f0af39e2d51d861dba84 WatchSource:0}: Error finding container 974bbbb412cef619b7ae370ba9bcd495dd86cf992b76f0af39e2d51d861dba84: Status 404 returned error can't find the container with id 974bbbb412cef619b7ae370ba9bcd495dd86cf992b76f0af39e2d51d861dba84 Mar 12 13:35:58 crc kubenswrapper[4125]: W0312 13:35:58.610515 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b5d722a_1123_4935_9740_52a08d018bc9.slice/crio-100c36f5259735a2e79225d6611a629f1dee36d06f332e30d1667d47502c59d5 WatchSource:0}: Error finding container 100c36f5259735a2e79225d6611a629f1dee36d06f332e30d1667d47502c59d5: Status 404 returned error can't find the container with id 100c36f5259735a2e79225d6611a629f1dee36d06f332e30d1667d47502c59d5 Mar 12 13:35:58 crc kubenswrapper[4125]: I0312 13:35:58.739508 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-8-crc"] Mar 12 13:35:59 crc kubenswrapper[4125]: W0312 13:35:59.193321 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebf09b15_4bb1_44bf_9d54_e76fad5cf76e.slice/crio-c2c4d85cfa8a90f83e0f80c95fc3e048fef0cb72f6f5ff0cdc816fc4a345f1b3 WatchSource:0}: Error finding container c2c4d85cfa8a90f83e0f80c95fc3e048fef0cb72f6f5ff0cdc816fc4a345f1b3: Status 404 returned error can't find the container with id c2c4d85cfa8a90f83e0f80c95fc3e048fef0cb72f6f5ff0cdc816fc4a345f1b3 Mar 12 13:35:59 crc kubenswrapper[4125]: E0312 13:35:59.500098 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:35:59 crc kubenswrapper[4125]: E0312 13:35:59.500480 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:35:59 crc kubenswrapper[4125]: E0312 13:35:59.500587 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-nzb4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dcqzh_openshift-marketplace(6db26b71-4e04-4688-a0c0-00e06e8c888d): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:35:59 crc kubenswrapper[4125]: E0312 13:35:59.500647 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.502074 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84fccc7b6-mkncc" event={"ID":"b233d916-bfe3-4ae5-ae39-6b574d1aa05e","Type":"ContainerStarted","Data":"116f902f7ec1a29ab141a90c55ec8e2238c0df7e80947d26572239e3bf033ed1"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.508029 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" event={"ID":"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e","Type":"ContainerStarted","Data":"c2c4d85cfa8a90f83e0f80c95fc3e048fef0cb72f6f5ff0cdc816fc4a345f1b3"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.524559 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" event={"ID":"10603adc-d495-423c-9459-4caa405960bb","Type":"ContainerStarted","Data":"974bbbb412cef619b7ae370ba9bcd495dd86cf992b76f0af39e2d51d861dba84"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.542216 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"7dad7f0dd93fcfa7265f11fe69e573aaba7634cae7dc640cb99cc4829f2982ba"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.565333 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerStarted","Data":"73384dfe8a881d17e3e6bdb1913af86efff33dd52e3a2f6dedd23aa02e70e32d"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.618406 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" event={"ID":"378552fd-5e53-4882-87ff-95f3d9198861","Type":"ContainerStarted","Data":"38e89c2365d4719953c6077644989f5c6cd2d5cabf3bd51557ef06a00469791c"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.754308 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" event={"ID":"c085412c-b875-46c9-ae3e-e6b0d8067091","Type":"ContainerStarted","Data":"a34291094975e7b0261b1a628b474d2b747997e2394a81de745b418786e2d92b"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.756415 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.777436 4125 patch_prober.go:28] interesting pod/olm-operator-6d8474f75f-x54mh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.777773 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.819164 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" event={"ID":"b54e8941-2fc4-432a-9e51-39684df9089e","Type":"ContainerStarted","Data":"51a0ed51573e54783eaec0b562a7d00b746823f3e4730c5f84cee47fb9d258c7"} Mar 12 13:35:59 crc kubenswrapper[4125]: I0312 13:35:59.870248 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" event={"ID":"0b5d722a-1123-4935-9740-52a08d018bc9","Type":"ContainerStarted","Data":"100c36f5259735a2e79225d6611a629f1dee36d06f332e30d1667d47502c59d5"} Mar 12 13:36:01 crc kubenswrapper[4125]: I0312 13:36:01.008332 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" event={"ID":"72854c1e-5ae2-4ed6-9e50-ff3bccde2635","Type":"ContainerStarted","Data":"b602703906a3befe41f92e64002c3d0e9b2cc7371e6fc5748964f0b2db8dee94"} Mar 12 13:36:01 crc kubenswrapper[4125]: I0312 13:36:01.025970 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" event={"ID":"cf1a8966-f594-490a-9fbb-eec5bafd13d3","Type":"ContainerStarted","Data":"8e4cb709eccdcdb55e8f565bc9bd7686bcb00ac0d78f9e8bc1112b93a08d582f"} Mar 12 13:36:01 crc kubenswrapper[4125]: I0312 13:36:01.076706 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" event={"ID":"d0f40333-c860-4c04-8058-a0bf572dcf12","Type":"ContainerStarted","Data":"ae0472f3f0b97f4aa59dc09f1eadbf9fb1fbee6b180e4e3936996bcd53c4c1f4"} Mar 12 13:36:01 crc kubenswrapper[4125]: I0312 13:36:01.092975 4125 patch_prober.go:28] interesting pod/olm-operator-6d8474f75f-x54mh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Mar 12 13:36:01 crc kubenswrapper[4125]: I0312 13:36:01.093289 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Mar 12 13:36:02 crc kubenswrapper[4125]: E0312 13:36:02.041205 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.076292 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rmwfn"] Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.109437 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"3cedb5d98335d2ae267db70a5778018a0c66fda8e21f3b773df4f274e4be2bf6"} Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.126673 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" event={"ID":"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf","Type":"ContainerStarted","Data":"bf3cc5d384c697ede552b2e7310ef4955e8ac25586b981a98c7f89ae2248f131"} Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.143899 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" event={"ID":"34a48baf-1bee-4921-8bb2-9b7320e76f79","Type":"ContainerStarted","Data":"021c54219acf5e17a7f3970f2d52f147d32aa375e4b93495a4aa7136026c2222"} Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.144687 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.175688 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" event={"ID":"297ab9b6-2186-4d5b-a952-2bfd59af63c4","Type":"ContainerStarted","Data":"e6dd34d6278b981f9eb5adad245f3a80081ceffb6fdce859a4adcf8b3bacb23d"} Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.175979 4125 patch_prober.go:28] interesting pod/olm-operator-6d8474f75f-x54mh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Mar 12 13:36:02 crc kubenswrapper[4125]: I0312 13:36:02.176029 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Mar 12 13:36:02 crc kubenswrapper[4125]: E0312 13:36:02.352155 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:02 crc kubenswrapper[4125]: E0312 13:36:02.352223 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:02 crc kubenswrapper[4125]: E0312 13:36:02.352324 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n59fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-k9qqb_openshift-marketplace(ccdf38cf-634a-41a2-9c8b-74bb86af80a7): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:02 crc kubenswrapper[4125]: E0312 13:36:02.352367 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:36:03 crc kubenswrapper[4125]: I0312 13:36:03.347604 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 12 13:36:03 crc kubenswrapper[4125]: I0312 13:36:03.366676 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerStarted","Data":"be67d2307407975fa80988e5323b61faf043ef1f8ce17b7f937137e83832bdf8"} Mar 12 13:36:03 crc kubenswrapper[4125]: I0312 13:36:03.410380 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" event={"ID":"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab","Type":"ContainerStarted","Data":"8ee63d528c06cb57bbdb5fe1d5ec645d503f2feae7dc2c7c71a58187fb58a697"} Mar 12 13:36:05 crc kubenswrapper[4125]: I0312 13:36:05.267662 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" event={"ID":"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be","Type":"ContainerStarted","Data":"f7262812fbe9cfe5b961780611037c2abd9e71e7abcbd6a149932e971356e019"} Mar 12 13:36:05 crc kubenswrapper[4125]: I0312 13:36:05.335299 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" event={"ID":"45a8038e-e7f2-4d93-a6f5-7753aa54e63f","Type":"ContainerStarted","Data":"5a1035175a8a2e334788f78d96b87df4b5e204f98ee4041f61c1ae77841c3787"} Mar 12 13:36:05 crc kubenswrapper[4125]: I0312 13:36:05.527413 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" event={"ID":"af6b67a3-a2bd-4051-9adc-c208a5a65d79","Type":"ContainerStarted","Data":"c13bd598fec680c7f0451a8d6f2749af905d284092b8f7ba74d7531994f015c4"} Mar 12 13:36:05 crc kubenswrapper[4125]: I0312 13:36:05.899902 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" event={"ID":"6d67253e-2acd-4bc1-8185-793587da4f17","Type":"ContainerStarted","Data":"1a5d26a87e712f246168aa60b2f79a2d5f138d6d9b8dbead8f05b64920a42e30"} Mar 12 13:36:07 crc kubenswrapper[4125]: E0312 13:36:07.312873 4125 kubelet.go:2517] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.281s" Mar 12 13:36:07 crc kubenswrapper[4125]: I0312 13:36:07.711724 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:36:07 crc kubenswrapper[4125]: I0312 13:36:07.726916 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 12 13:36:07 crc kubenswrapper[4125]: I0312 13:36:07.737576 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 12 13:36:08 crc kubenswrapper[4125]: I0312 13:36:08.935717 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755"} Mar 12 13:36:08 crc kubenswrapper[4125]: I0312 13:36:08.978204 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" event={"ID":"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7","Type":"ContainerStarted","Data":"690c3e41c88de001213b70a1c67447cbdf7b536c279ebd273a32e03268f91192"} Mar 12 13:36:08 crc kubenswrapper[4125]: I0312 13:36:08.991260 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"0dc3a60d6c19f83f4941e0943c503d20d8c798e055ddc9ef0aafc2e20d6210bd"} Mar 12 13:36:08 crc kubenswrapper[4125]: I0312 13:36:08.993490 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" event={"ID":"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0","Type":"ContainerStarted","Data":"ac8b98249189ee263d41d786ed4e7e18894aa76619d18d1d278f61804f527629"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:08.994481 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" event={"ID":"13ad7555-5f28-4555-a563-892713a8433a","Type":"ContainerStarted","Data":"ae07e756a7d12ed057ca356f074f3bef9b7c944c57f3e10700d1db5545a7d68d"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.022665 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" event={"ID":"87df87f4-ba66-4137-8e41-1fa632ad4207","Type":"ContainerStarted","Data":"d943ace82fc3b0b87ffb23bd3be803a83c10d920dfaf0f26664415e34667cff8"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.024210 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.037894 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" event={"ID":"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e","Type":"ContainerStarted","Data":"9352ce646adc0ab0c2ea7437c967dcebf4f8a1d8d5617ae7b2529767b9a18575"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.038908 4125 patch_prober.go:28] interesting pod/controller-manager-6ff78978b4-q4vv8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.038967 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.090294 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" event={"ID":"8a5ae51d-d173-4531-8975-f164c975ce1f","Type":"ContainerStarted","Data":"d089f95acb3b75368fa7551fb5a3fc8751a81fd738cde91997c5099bbcec8928"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.092039 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.092182 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.264298 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerStarted","Data":"fad9cea35e0ebc15a97c9f69995245b3677742d9f62c2632e4840da1eef2d0a3"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.461748 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gbw49" event={"ID":"13045510-8717-4a71-ade4-be95a76440a7","Type":"ContainerStarted","Data":"1b316d1c6f59bf3deb94cd5573703228872711b59a786ffe83f2453ded5a18c2"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.532085 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" event={"ID":"4f8aa612-9da0-4a2b-911e-6a1764a4e74e","Type":"ContainerStarted","Data":"524577e5fb5f110bdb01c76b377cd9a7acc3d1012c4709c78601ed36f15893d8"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.546989 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" event={"ID":"120b38dc-8236-4fa6-a452-642b8ad738ee","Type":"ContainerStarted","Data":"30ee690669351e013b4fdb1b6bfdce6328cce7aa1ae84a151f545ae41ee81fed"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.560113 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"bba8c50c3b78b250784a85e4f14d549a065abd7c3cc07b35c99319c6d3a15245"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.562979 4125 generic.go:334] "Generic (PLEG): container finished" podID="5bacb25d-97b6-4491-8fb4-99feae1d802a" containerID="4f8b33b93bacb2c4cce1b6323c23687fc8b36d218a098863741aca30913a9bec" exitCode=0 Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.563043 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" event={"ID":"5bacb25d-97b6-4491-8fb4-99feae1d802a","Type":"ContainerDied","Data":"4f8b33b93bacb2c4cce1b6323c23687fc8b36d218a098863741aca30913a9bec"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.567523 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"94803870c39c663aac3c4df56ed06883072a310c09c31b80ac8c3f4c99915832"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.570621 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.585430 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.585514 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.611538 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" event={"ID":"0b5d722a-1123-4935-9740-52a08d018bc9","Type":"ContainerStarted","Data":"d6c6c85aaea7103c9db4141135520eac51859f2e04811c274102083578dbaefa"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.635073 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"c47ce1b61b78f947bbf881c4500564865b677f7ac60916f2651215a08d905da4"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.885493 4125 generic.go:334] "Generic (PLEG): container finished" podID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerID="1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4" exitCode=0 Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.885631 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerDied","Data":"1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.934509 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" event={"ID":"0f394926-bdb9-425c-b36e-264d7fd34550","Type":"ContainerStarted","Data":"5806024690eae8f3f8c98ac80e4e73766fd80a88dd30ae7e0af35a15980bca6d"} Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.963205 4125 generic.go:334] "Generic (PLEG): container finished" podID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerID="c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93" exitCode=0 Mar 12 13:36:09 crc kubenswrapper[4125]: I0312 13:36:09.963298 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerDied","Data":"c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93"} Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.541901 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.543009 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.543998 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n6sqt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8jhz6_openshift-marketplace(3f4dca86-e6ee-4ec9-8324-86aff960225e): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.544279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.613444 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.613898 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.614272 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-ncrf5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7287f_openshift-marketplace(887d596e-c519-4bfa-af90-3edd9e1b2f0f): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:10 crc kubenswrapper[4125]: E0312 13:36:10.614438 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.151454 4125 generic.go:334] "Generic (PLEG): container finished" podID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerID="1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755" exitCode=0 Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.151777 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerDied","Data":"1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755"} Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.261319 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84fccc7b6-mkncc" event={"ID":"b233d916-bfe3-4ae5-ae39-6b574d1aa05e","Type":"ContainerStarted","Data":"9c5d3925adb66fb4aa984ebb038669e1253dd1ae4d86122b5b7a97e70cd77667"} Mar 12 13:36:11 crc kubenswrapper[4125]: E0312 13:36:11.745308 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:11 crc kubenswrapper[4125]: E0312 13:36:11.745635 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:11 crc kubenswrapper[4125]: E0312 13:36:11.745786 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-tf29r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-8s8pc_openshift-marketplace(c782cf62-a827-4677-b3c2-6f82c5f09cbb): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:11 crc kubenswrapper[4125]: E0312 13:36:11.745913 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.901501 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" event={"ID":"59748b9b-c309-4712-aa85-bb38d71c4915","Type":"ContainerStarted","Data":"dc9b37dcf2971d52dc9cd43eaa6338b59da94d68db04193f258ecf9da45f2b5f"} Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.902462 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.946318 4125 patch_prober.go:28] interesting pod/console-conversion-webhook-595f9969b-l6z49 container/conversion-webhook-server namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" start-of-body= Mar 12 13:36:11 crc kubenswrapper[4125]: I0312 13:36:11.946440 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" containerName="conversion-webhook-server" probeResult="failure" output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" Mar 12 13:36:12 crc kubenswrapper[4125]: E0312 13:36:12.100628 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.183430 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" event={"ID":"a702c6d2-4dde-4077-ab8c-0f8df804bf7a","Type":"ContainerStarted","Data":"5ff02b7d410db9a2b94fc5cd0a58239b56f3904ef7af7c900e0f575f868351f4"} Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.234175 4125 patch_prober.go:28] interesting pod/controller-manager-6ff78978b4-q4vv8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.234533 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.234968 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.235043 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.236535 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:36:12 crc kubenswrapper[4125]: I0312 13:36:12.236614 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:36:12 crc kubenswrapper[4125]: E0312 13:36:12.238285 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:36:12 crc kubenswrapper[4125]: E0312 13:36:12.250912 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.086353 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.086909 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.086908 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.086985 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.125563 4125 patch_prober.go:28] interesting pod/console-conversion-webhook-595f9969b-l6z49 container/conversion-webhook-server namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.125573 4125 patch_prober.go:28] interesting pod/console-conversion-webhook-595f9969b-l6z49 container/conversion-webhook-server namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.136366 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" containerName="conversion-webhook-server" probeResult="failure" output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.136468 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" containerName="conversion-webhook-server" probeResult="failure" output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.213705 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.213781 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.214240 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.214266 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.258637 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" event={"ID":"297ab9b6-2186-4d5b-a952-2bfd59af63c4","Type":"ContainerStarted","Data":"c9c833dba164facead621d87eb687a63bef18f8b60ee000255c9f5e177321e2f"} Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.278066 4125 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="bba8c50c3b78b250784a85e4f14d549a065abd7c3cc07b35c99319c6d3a15245" exitCode=0 Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.278248 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"bba8c50c3b78b250784a85e4f14d549a065abd7c3cc07b35c99319c6d3a15245"} Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.292249 4125 patch_prober.go:28] interesting pod/controller-manager-6ff78978b4-q4vv8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.292358 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.527314 4125 generic.go:334] "Generic (PLEG): container finished" podID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerID="040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6" exitCode=0 Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.527475 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerDied","Data":"040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6"} Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.534617 4125 scope.go:117] "RemoveContainer" containerID="fc1ea6d2d261fe4d6894e8ce5ccb2079349bd81b4208e7b03a1e76d6ba40411a" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.570499 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" event={"ID":"bd556935-a077-45df-ba3f-d42c39326ccd","Type":"ContainerStarted","Data":"91ca68fd625eb7a69575ab3e53c5f5c2b85f637552935a79ebc79d702e1b866a"} Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.574869 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.605063 4125 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.606520 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.648487 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" event={"ID":"ed024e5d-8fc2-4c22-803d-73f3c9795f19","Type":"ContainerStarted","Data":"81d2790dd791bb892de21e378fb0d17b01aa692a849c2c791acc6a67a01144f8"} Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.737406 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" event={"ID":"cf1a8966-f594-490a-9fbb-eec5bafd13d3","Type":"ContainerStarted","Data":"b4bbdd8bae52afd539ea667b39786eddd287b9b9422320618782f8ce9a431ea7"} Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.759330 4125 patch_prober.go:28] interesting pod/console-conversion-webhook-595f9969b-l6z49 container/conversion-webhook-server namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.759422 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" containerName="conversion-webhook-server" probeResult="failure" output="Get \"https://10.217.0.61:9443/readyz\": dial tcp 10.217.0.61:9443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.760100 4125 patch_prober.go:28] interesting pod/controller-manager-6ff78978b4-q4vv8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.760185 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Mar 12 13:36:13 crc kubenswrapper[4125]: E0312 13:36:13.769732 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.865115 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.865244 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.897765 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:36:13 crc kubenswrapper[4125]: I0312 13:36:13.898054 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:36:14 crc kubenswrapper[4125]: E0312 13:36:14.479042 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.801424 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" event={"ID":"0b5c38ff-1fa8-4219-994d-15776acd4a4d","Type":"ContainerStarted","Data":"01904e9caa36dbc7772b537a148f3270c1b6a855aab806556aac5544f9540dc2"} Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.819933 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"03986766a13ed650c14fd79f9e5b20f3fe8b23aa47c6521ddf17adfb8b570506"} Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.822117 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.833623 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.833720 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.841271 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"b3bd15ca3c2b2de4f5f86083f9cc7853e44fe768a4fe62884571627349bc175a"} Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.860000 4125 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 12 13:36:14 crc kubenswrapper[4125]: I0312 13:36:14.860097 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 12 13:36:15 crc kubenswrapper[4125]: I0312 13:36:15.321217 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:36:15 crc kubenswrapper[4125]: I0312 13:36:15.957095 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" event={"ID":"5bacb25d-97b6-4491-8fb4-99feae1d802a","Type":"ContainerStarted","Data":"e468184ee53f9f73d05b987f44003384a1b4a39f4795c0f8360bb94b0b3340b8"} Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.023348 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" event={"ID":"10603adc-d495-423c-9459-4caa405960bb","Type":"ContainerStarted","Data":"ec1944b230168f6384a330628d7e1721839debe0c5aa3bb662a142993154bc06"} Mar 12 13:36:16 crc kubenswrapper[4125]: E0312 13:36:16.045531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.078156 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" event={"ID":"43ae1c37-047b-4ee2-9fee-41e337dd4ac8","Type":"ContainerStarted","Data":"2da14cea2e9328cb16f7e4d671c9f21a6d2615667035cc26a4e4d0f634f80b82"} Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.105316 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" event={"ID":"72854c1e-5ae2-4ed6-9e50-ff3bccde2635","Type":"ContainerStarted","Data":"dfb750c7e4e18e642909a1627c6dc16f8f110b55e0d2e448a29130593acb7d17"} Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.159448 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gbw49" event={"ID":"13045510-8717-4a71-ade4-be95a76440a7","Type":"ContainerStarted","Data":"bd89718d796f2e6ecece11b274626427043fcce77089c7ad9a475afaf4a9400d"} Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.159587 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-gbw49" Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.206468 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.225701 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"5c274f02173d251858e04c2e1bef340f340e73ad687e5608c9f7f08857824fa2"} Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.250261 4125 generic.go:334] "Generic (PLEG): container finished" podID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerID="5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29" exitCode=0 Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.257469 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerDied","Data":"5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29"} Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.262107 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.262252 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:16 crc kubenswrapper[4125]: E0312 13:36:16.445349 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:16 crc kubenswrapper[4125]: E0312 13:36:16.445465 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:16 crc kubenswrapper[4125]: E0312 13:36:16.445577 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-ptdrb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-f4jkp_openshift-marketplace(4092a9f8-5acc-4932-9e90-ef962eeb301a): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:16 crc kubenswrapper[4125]: E0312 13:36:16.445627 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:36:16 crc kubenswrapper[4125]: I0312 13:36:16.917429 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-8-crc" podStartSLOduration=53827322.917336315 podStartE2EDuration="14952h2m2.917336317s" podCreationTimestamp="2024-06-27 13:34:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:36:16.708484125 +0000 UTC m=+947.031870224" watchObservedRunningTime="2026-03-12 13:36:16.917336317 +0000 UTC m=+947.240722386" Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.115211 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-gbw49" Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.310409 4125 generic.go:334] "Generic (PLEG): container finished" podID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerID="606590d421466d5fb63038809a2ecba9accc142178b68d087c9ed02dfcf80ca8" exitCode=0 Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.310627 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" event={"ID":"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab","Type":"ContainerDied","Data":"606590d421466d5fb63038809a2ecba9accc142178b68d087c9ed02dfcf80ca8"} Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.345775 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" event={"ID":"af6b67a3-a2bd-4051-9adc-c208a5a65d79","Type":"ContainerStarted","Data":"5e741ea24a748382b3c92c15feb2fb65a5064974af9975b49afd8532fa0ccef2"} Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.346431 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.352290 4125 patch_prober.go:28] interesting pod/route-controller-manager-5c4dbb8899-tchz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.352420 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.367061 4125 generic.go:334] "Generic (PLEG): container finished" podID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerID="d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594" exitCode=0 Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.367938 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerDied","Data":"d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594"} Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.396285 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" event={"ID":"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be","Type":"ContainerStarted","Data":"88041f1ab1a477c1d60996f5e063a66feee3b9101053ab276026aa59d8ddba88"} Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.429335 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" event={"ID":"45a8038e-e7f2-4d93-a6f5-7753aa54e63f","Type":"ContainerStarted","Data":"9e4c3b4d22fca7e9ce4866f1e217758bde1b5d74c62a8896e7c1f725f4a8f102"} Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.434101 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:17 crc kubenswrapper[4125]: I0312 13:36:17.434219 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:17 crc kubenswrapper[4125]: E0312 13:36:17.526230 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:36:17 crc kubenswrapper[4125]: E0312 13:36:17.589655 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:17 crc kubenswrapper[4125]: E0312 13:36:17.589712 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:17 crc kubenswrapper[4125]: E0312 13:36:17.589899 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-r7dbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rmwfn_openshift-marketplace(9ad279b4-d9dc-42a8-a1c8-a002bd063482): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:17 crc kubenswrapper[4125]: E0312 13:36:17.589962 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.223942 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.224263 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.236040 4125 patch_prober.go:28] interesting pod/apiserver-69c565c9b6-vbdpd container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.236115 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.557479 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" event={"ID":"4f8aa612-9da0-4a2b-911e-6a1764a4e74e","Type":"ContainerStarted","Data":"3883a7c29e487e535902196ea55d448cf3bae7d63b8f81b4cf5b218fc02ec637"} Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.610080 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"6227ee4a8496f1cb024665458d33453ee5216601cd460a108dc527e674a4a58b"} Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.627560 4125 patch_prober.go:28] interesting pod/route-controller-manager-5c4dbb8899-tchz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.627666 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Mar 12 13:36:18 crc kubenswrapper[4125]: E0312 13:36:18.648370 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.885713 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.885775 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.886016 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Mar 12 13:36:18 crc kubenswrapper[4125]: I0312 13:36:18.886079 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.638705 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"2ffb0a3981b1b9b39719f16bcb32d26d367866fdfabc973591418145dc7f0972"} Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.640311 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.669323 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"77cd02d9e8e6f6e26d14b16837b71c69ee149f0b4585c09df5a8e7ac01f44293"} Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.673785 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"a9aa62f93d285cdc767ac1378d09514acb8cee0f731113403a10ced1fcc588b2"} Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.676087 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.690052 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.690222 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.692101 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" event={"ID":"a702c6d2-4dde-4077-ab8c-0f8df804bf7a","Type":"ContainerStarted","Data":"42719075e3e2b72f130391c1d2087f7edcd3e22a2b5b3144feddfb3b65b9391c"} Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.719693 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" event={"ID":"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0","Type":"ContainerStarted","Data":"7919af985e95fedd860aa05206dd9947038b335a008e4262ac820ca54f8ce451"} Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.751290 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" event={"ID":"6d67253e-2acd-4bc1-8185-793587da4f17","Type":"ContainerStarted","Data":"beeb9bf44af1c988b8255931520dd89196509fb76414f15d8ad2cb327fe6aeb4"} Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.925893 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:19 crc kubenswrapper[4125]: I0312 13:36:19.926013 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:20 crc kubenswrapper[4125]: I0312 13:36:20.794772 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" event={"ID":"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab","Type":"ContainerStarted","Data":"e626fe31e9fc0d6f2e34cae2f75ebd1df96daffc967397efe4465fb73926e0dd"} Mar 12 13:36:20 crc kubenswrapper[4125]: I0312 13:36:20.797168 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:20 crc kubenswrapper[4125]: I0312 13:36:20.797221 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:20 crc kubenswrapper[4125]: I0312 13:36:20.894078 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:20 crc kubenswrapper[4125]: I0312 13:36:20.894262 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:21 crc kubenswrapper[4125]: I0312 13:36:21.808373 4125 generic.go:334] "Generic (PLEG): container finished" podID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" containerID="dfb750c7e4e18e642909a1627c6dc16f8f110b55e0d2e448a29130593acb7d17" exitCode=0 Mar 12 13:36:21 crc kubenswrapper[4125]: I0312 13:36:21.808515 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" event={"ID":"72854c1e-5ae2-4ed6-9e50-ff3bccde2635","Type":"ContainerDied","Data":"dfb750c7e4e18e642909a1627c6dc16f8f110b55e0d2e448a29130593acb7d17"} Mar 12 13:36:21 crc kubenswrapper[4125]: I0312 13:36:21.813178 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:21 crc kubenswrapper[4125]: I0312 13:36:21.813321 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:21 crc kubenswrapper[4125]: I0312 13:36:21.891721 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:21 crc kubenswrapper[4125]: I0312 13:36:21.891946 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.819297 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" event={"ID":"13ad7555-5f28-4555-a563-892713a8433a","Type":"ContainerStarted","Data":"586e67cdf44721522bdaa46be7afee74d9457d02a4b95231987aca3f6a4df542"} Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.822085 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.824869 4125 patch_prober.go:28] interesting pod/oauth-openshift-765b47f944-n2lhl container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" start-of-body= Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.824964 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.826774 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"73d5f87237171c13fc808fb989bf36334a871dd6906312f107da49184c37cb76"} Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.831354 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" event={"ID":"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab","Type":"ContainerStarted","Data":"9abe5af36bb650e822fc8617d763cb1ac72e09098227d9187b59e949bcd51a26"} Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.847230 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.904057 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:22 crc kubenswrapper[4125]: I0312 13:36:22.904232 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.086801 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.087566 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.088278 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.088415 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.174388 4125 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.174527 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.175891 4125 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.176307 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.179371 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.179442 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.179541 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.179572 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.278718 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.279306 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.278891 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.279777 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.343704 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.703113 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.703741 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.706510 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-mtx25 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.706804 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.713749 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.714455 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.714737 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.715030 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.783426 4125 patch_prober.go:28] interesting pod/oauth-openshift-765b47f944-n2lhl container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.783516 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.841040 4125 patch_prober.go:28] interesting pod/oauth-openshift-765b47f944-n2lhl container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.841102 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.848376 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.848523 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.891291 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.891379 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:23 crc kubenswrapper[4125]: I0312 13:36:23.923610 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.061436 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:36:24 crc kubenswrapper[4125]: E0312 13:36:24.149011 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:24 crc kubenswrapper[4125]: E0312 13:36:24.149297 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:24 crc kubenswrapper[4125]: E0312 13:36:24.149416 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n6sqt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8jhz6_openshift-marketplace(3f4dca86-e6ee-4ec9-8324-86aff960225e): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:24 crc kubenswrapper[4125]: E0312 13:36:24.149482 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.213530 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.213854 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.217433 4125 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.217604 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.850174 4125 patch_prober.go:28] interesting pod/oauth-openshift-765b47f944-n2lhl container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" start-of-body= Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.850384 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.888202 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:24 crc kubenswrapper[4125]: I0312 13:36:24.888414 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:25 crc kubenswrapper[4125]: I0312 13:36:25.892162 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:25 crc kubenswrapper[4125]: I0312 13:36:25.892513 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:26 crc kubenswrapper[4125]: E0312 13:36:26.028629 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:36:26 crc kubenswrapper[4125]: E0312 13:36:26.029550 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.174481 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.174562 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.174610 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.174737 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.367555 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.523628 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.898266 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:26 crc kubenswrapper[4125]: I0312 13:36:26.898662 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.180336 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.180761 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.181043 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-tf29r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-8s8pc_openshift-marketplace(c782cf62-a827-4677-b3c2-6f82c5f09cbb): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.181103 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.180496 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.181185 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.181245 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-ncrf5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7287f_openshift-marketplace(887d596e-c519-4bfa-af90-3edd9e1b2f0f): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:27 crc kubenswrapper[4125]: E0312 13:36:27.182434 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:36:27 crc kubenswrapper[4125]: I0312 13:36:27.893767 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:27 crc kubenswrapper[4125]: I0312 13:36:27.893909 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:27 crc kubenswrapper[4125]: I0312 13:36:27.919399 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"7e43f2afa9d4a0f0bd36c82d730f4fe1cddb0357bad163f87d7edb6e926f1d1e"} Mar 12 13:36:28 crc kubenswrapper[4125]: I0312 13:36:28.715462 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-mtx25 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Mar 12 13:36:28 crc kubenswrapper[4125]: I0312 13:36:28.715926 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Mar 12 13:36:28 crc kubenswrapper[4125]: I0312 13:36:28.895371 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:28 crc kubenswrapper[4125]: I0312 13:36:28.895896 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:29 crc kubenswrapper[4125]: E0312 13:36:29.045314 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:36:29 crc kubenswrapper[4125]: E0312 13:36:29.167506 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:29 crc kubenswrapper[4125]: E0312 13:36:29.167865 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:29 crc kubenswrapper[4125]: E0312 13:36:29.168077 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-ptdrb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-f4jkp_openshift-marketplace(4092a9f8-5acc-4932-9e90-ef962eeb301a): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:29 crc kubenswrapper[4125]: E0312 13:36:29.168128 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.176791 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.177017 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.177060 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.177113 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.177152 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.178427 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"2ffb0a3981b1b9b39719f16bcb32d26d367866fdfabc973591418145dc7f0972"} pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.178535 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" containerID="cri-o://2ffb0a3981b1b9b39719f16bcb32d26d367866fdfabc973591418145dc7f0972" gracePeriod=30 Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.179228 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.179324 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.899576 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:29 crc kubenswrapper[4125]: I0312 13:36:29.900069 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.540289 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.622513 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") pod \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.622641 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kubelet-dir\") pod \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\" (UID: \"72854c1e-5ae2-4ed6-9e50-ff3bccde2635\") " Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.623027 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "72854c1e-5ae2-4ed6-9e50-ff3bccde2635" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.630935 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "72854c1e-5ae2-4ed6-9e50-ff3bccde2635" (UID: "72854c1e-5ae2-4ed6-9e50-ff3bccde2635"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.724729 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.725159 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72854c1e-5ae2-4ed6-9e50-ff3bccde2635-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.896957 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.897197 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.946638 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.946753 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-8-crc" event={"ID":"72854c1e-5ae2-4ed6-9e50-ff3bccde2635","Type":"ContainerDied","Data":"b602703906a3befe41f92e64002c3d0e9b2cc7371e6fc5748964f0b2db8dee94"} Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.946788 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b602703906a3befe41f92e64002c3d0e9b2cc7371e6fc5748964f0b2db8dee94" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.951079 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" event={"ID":"10603adc-d495-423c-9459-4caa405960bb","Type":"ContainerStarted","Data":"e746be19665ca36701883ea116b6c3a7aac4a5a4cdf83c8186cb5460d41727e8"} Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.957082 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/0.log" Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.957725 4125 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="2ffb0a3981b1b9b39719f16bcb32d26d367866fdfabc973591418145dc7f0972" exitCode=2 Mar 12 13:36:30 crc kubenswrapper[4125]: I0312 13:36:30.957869 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"2ffb0a3981b1b9b39719f16bcb32d26d367866fdfabc973591418145dc7f0972"} Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.430037 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.430378 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.430521 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.430578 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.430669 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.887993 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:31 crc kubenswrapper[4125]: I0312 13:36:31.888094 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:32 crc kubenswrapper[4125]: I0312 13:36:32.180263 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:32 crc kubenswrapper[4125]: I0312 13:36:32.180693 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:32 crc kubenswrapper[4125]: I0312 13:36:32.889572 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:32 crc kubenswrapper[4125]: I0312 13:36:32.889685 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.278230 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.278351 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.278251 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.278752 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.311993 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.337803 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.714531 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.714627 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.717498 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.717590 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.782615 4125 patch_prober.go:28] interesting pod/oauth-openshift-765b47f944-n2lhl container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" start-of-body= Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.782702 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.30:6443/healthz\": dial tcp 10.217.0.30:6443: connect: connection refused" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.847966 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.848095 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.897119 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.897252 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.939997 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.994469 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/0.log" Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.995672 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"78c792e326186d5224a3a883603f4948e9553db7e6abf9de8146d154ac958b88"} Mar 12 13:36:33 crc kubenswrapper[4125]: I0312 13:36:33.997722 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:36:34 crc kubenswrapper[4125]: E0312 13:36:34.154516 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:34 crc kubenswrapper[4125]: E0312 13:36:34.154913 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:34 crc kubenswrapper[4125]: E0312 13:36:34.155105 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-r7dbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rmwfn_openshift-marketplace(9ad279b4-d9dc-42a8-a1c8-a002bd063482): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:34 crc kubenswrapper[4125]: E0312 13:36:34.155196 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:36:34 crc kubenswrapper[4125]: I0312 13:36:34.175360 4125 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:34 crc kubenswrapper[4125]: I0312 13:36:34.175304 4125 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:34 crc kubenswrapper[4125]: I0312 13:36:34.175465 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:34 crc kubenswrapper[4125]: I0312 13:36:34.175560 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:34 crc kubenswrapper[4125]: I0312 13:36:34.897647 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:34 crc kubenswrapper[4125]: I0312 13:36:34.901649 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:35 crc kubenswrapper[4125]: I0312 13:36:35.895216 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:35 crc kubenswrapper[4125]: I0312 13:36:35.895403 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:36 crc kubenswrapper[4125]: I0312 13:36:36.022976 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"d32dc5bab0f48744b3c1834684c713d92b2b36b656010b75458d2d86244529d4"} Mar 12 13:36:36 crc kubenswrapper[4125]: I0312 13:36:36.023083 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:36 crc kubenswrapper[4125]: I0312 13:36:36.023122 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:36 crc kubenswrapper[4125]: I0312 13:36:36.901540 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:36 crc kubenswrapper[4125]: I0312 13:36:36.901934 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:37 crc kubenswrapper[4125]: E0312 13:36:37.141105 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:37 crc kubenswrapper[4125]: E0312 13:36:37.141486 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:37 crc kubenswrapper[4125]: E0312 13:36:37.141662 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-mwzcr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-g4v97_openshift-marketplace(bb917686-edfb-4158-86ad-6fce0abec64c): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:37 crc kubenswrapper[4125]: E0312 13:36:37.141721 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:36:37 crc kubenswrapper[4125]: I0312 13:36:37.888986 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:37 crc kubenswrapper[4125]: I0312 13:36:37.889326 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:38 crc kubenswrapper[4125]: I0312 13:36:38.174234 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:38 crc kubenswrapper[4125]: I0312 13:36:38.174375 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:38 crc kubenswrapper[4125]: I0312 13:36:38.174251 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:38 crc kubenswrapper[4125]: I0312 13:36:38.174643 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:38 crc kubenswrapper[4125]: I0312 13:36:38.901600 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:38 crc kubenswrapper[4125]: I0312 13:36:38.901780 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:39 crc kubenswrapper[4125]: E0312 13:36:39.033017 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:36:39 crc kubenswrapper[4125]: E0312 13:36:39.033339 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:36:39 crc kubenswrapper[4125]: I0312 13:36:39.332901 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:36:39 crc kubenswrapper[4125]: I0312 13:36:39.906945 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:39 crc kubenswrapper[4125]: I0312 13:36:39.907272 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:40 crc kubenswrapper[4125]: I0312 13:36:40.891698 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:40 crc kubenswrapper[4125]: I0312 13:36:40.892054 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:41 crc kubenswrapper[4125]: E0312 13:36:41.030889 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:36:41 crc kubenswrapper[4125]: E0312 13:36:41.031911 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:36:41 crc kubenswrapper[4125]: I0312 13:36:41.174507 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:41 crc kubenswrapper[4125]: I0312 13:36:41.174628 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:41 crc kubenswrapper[4125]: I0312 13:36:41.175126 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:41 crc kubenswrapper[4125]: I0312 13:36:41.175430 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:41 crc kubenswrapper[4125]: I0312 13:36:41.885508 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:41 crc kubenswrapper[4125]: I0312 13:36:41.885899 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:42 crc kubenswrapper[4125]: E0312 13:36:42.031538 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:36:42 crc kubenswrapper[4125]: E0312 13:36:42.031902 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:36:42 crc kubenswrapper[4125]: I0312 13:36:42.887793 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:42 crc kubenswrapper[4125]: I0312 13:36:42.888018 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.213186 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.263090 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-mtx25 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]log ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:36:43 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:36:43 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:36:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.264270 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.278413 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.278483 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.278474 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.278557 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.278606 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.279439 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.279508 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.279859 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="marketplace-operator" containerStatusID={"Type":"cri-o","ID":"03986766a13ed650c14fd79f9e5b20f3fe8b23aa47c6521ddf17adfb8b570506"} pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" containerMessage="Container marketplace-operator failed liveness probe, will be restarted" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.279952 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" containerID="cri-o://03986766a13ed650c14fd79f9e5b20f3fe8b23aa47c6521ddf17adfb8b570506" gracePeriod=30 Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.333101 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-mtx25 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]log ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:36:43 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:36:43 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:36:43 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:36:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.333217 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.713510 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.713729 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.714380 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.714485 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.714534 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.715942 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"a9aa62f93d285cdc767ac1378d09514acb8cee0f731113403a10ced1fcc588b2"} pod="openshift-console/downloads-65476884b9-9wcvx" containerMessage="Container download-server failed liveness probe, will be restarted" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.716108 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" containerID="cri-o://a9aa62f93d285cdc767ac1378d09514acb8cee0f731113403a10ced1fcc588b2" gracePeriod=2 Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.717358 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.717434 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.847875 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.848014 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.890503 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:43 crc kubenswrapper[4125]: I0312 13:36:43.890567 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.018431 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-mtx25 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]log ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:36:44 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:36:44 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:36:44 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:36:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.018527 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.163924 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.214395 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-service-ca/service-ca-666f99b6f-vlbxv"] Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.214631 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" podUID="378552fd-5e53-4882-87ff-95f3d9198861" containerName="service-ca-controller" containerID="cri-o://38e89c2365d4719953c6077644989f5c6cd2d5cabf3bd51557ef06a00469791c" gracePeriod=30 Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.729700 4125 patch_prober.go:28] interesting pod/authentication-operator-7cc7ff75d5-g9qv8 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.730424 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.890693 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:44 crc kubenswrapper[4125]: I0312 13:36:44.890777 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:45 crc kubenswrapper[4125]: E0312 13:36:45.045048 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.102326 4125 generic.go:334] "Generic (PLEG): container finished" podID="6268b7fe-8910-4505-b404-6f1df638105c" containerID="a9aa62f93d285cdc767ac1378d09514acb8cee0f731113403a10ced1fcc588b2" exitCode=0 Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.102418 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerDied","Data":"a9aa62f93d285cdc767ac1378d09514acb8cee0f731113403a10ced1fcc588b2"} Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.105447 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/0.log" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.105500 4125 generic.go:334] "Generic (PLEG): container finished" podID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerID="03986766a13ed650c14fd79f9e5b20f3fe8b23aa47c6521ddf17adfb8b570506" exitCode=2 Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.105522 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerDied","Data":"03986766a13ed650c14fd79f9e5b20f3fe8b23aa47c6521ddf17adfb8b570506"} Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.175733 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.176035 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.177024 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.177111 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.177182 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.178076 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"78c792e326186d5224a3a883603f4948e9553db7e6abf9de8146d154ac958b88"} pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.178211 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" containerID="cri-o://78c792e326186d5224a3a883603f4948e9553db7e6abf9de8146d154ac958b88" gracePeriod=30 Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.450922 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": read tcp 10.217.0.2:39688->10.217.0.23:8443: read: connection reset by peer" start-of-body= Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.451020 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": read tcp 10.217.0.2:39688->10.217.0.23:8443: read: connection reset by peer" Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.915754 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:45 crc kubenswrapper[4125]: I0312 13:36:45.916322 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.157963 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/0.log" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.188393 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/1.log" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.189702 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/0.log" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.202462 4125 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="78c792e326186d5224a3a883603f4948e9553db7e6abf9de8146d154ac958b88" exitCode=255 Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.202620 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"78c792e326186d5224a3a883603f4948e9553db7e6abf9de8146d154ac958b88"} Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.202718 4125 scope.go:117] "RemoveContainer" containerID="2ffb0a3981b1b9b39719f16bcb32d26d367866fdfabc973591418145dc7f0972" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.253946 4125 generic.go:334] "Generic (PLEG): container finished" podID="378552fd-5e53-4882-87ff-95f3d9198861" containerID="38e89c2365d4719953c6077644989f5c6cd2d5cabf3bd51557ef06a00469791c" exitCode=0 Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.254001 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" event={"ID":"378552fd-5e53-4882-87ff-95f3d9198861","Type":"ContainerDied","Data":"38e89c2365d4719953c6077644989f5c6cd2d5cabf3bd51557ef06a00469791c"} Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.806431 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.849769 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") pod \"378552fd-5e53-4882-87ff-95f3d9198861\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.850904 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") pod \"378552fd-5e53-4882-87ff-95f3d9198861\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.851624 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") pod \"378552fd-5e53-4882-87ff-95f3d9198861\" (UID: \"378552fd-5e53-4882-87ff-95f3d9198861\") " Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.852371 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "378552fd-5e53-4882-87ff-95f3d9198861" (UID: "378552fd-5e53-4882-87ff-95f3d9198861"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.856892 4125 reconciler_common.go:300] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/378552fd-5e53-4882-87ff-95f3d9198861-signing-cabundle\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.873010 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf" (OuterVolumeSpecName: "kube-api-access-d7ntf") pod "378552fd-5e53-4882-87ff-95f3d9198861" (UID: "378552fd-5e53-4882-87ff-95f3d9198861"). InnerVolumeSpecName "kube-api-access-d7ntf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.873456 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key" (OuterVolumeSpecName: "signing-key") pod "378552fd-5e53-4882-87ff-95f3d9198861" (UID: "378552fd-5e53-4882-87ff-95f3d9198861"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.890774 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.891319 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.958008 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-d7ntf\" (UniqueName: \"kubernetes.io/projected/378552fd-5e53-4882-87ff-95f3d9198861-kube-api-access-d7ntf\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:46 crc kubenswrapper[4125]: I0312 13:36:46.958121 4125 reconciler_common.go:300] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/378552fd-5e53-4882-87ff-95f3d9198861-signing-key\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.174617 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.175098 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.263324 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/0.log" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.263713 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"2e4460ec3c54f3b8ff53d17aae59a9884a7b46f8168d7c7ea0ebcb59478846ce"} Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.265596 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.266004 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.266699 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.269242 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/1.log" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.273677 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"cb18a42ae7d08102112f0b2dbe345149020fb6be3d4f7930e9cfc31aa700b414"} Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.276517 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.276518 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-666f99b6f-vlbxv" event={"ID":"378552fd-5e53-4882-87ff-95f3d9198861","Type":"ContainerDied","Data":"937398981e25eb6e4d38bb4f0d8fb8dc6dcc16b27b3d8c22df919194b9f3a260"} Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.276944 4125 scope.go:117] "RemoveContainer" containerID="38e89c2365d4719953c6077644989f5c6cd2d5cabf3bd51557ef06a00469791c" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.279946 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"44ab1d97ab605ba35233246ae3683b740f13717f9a0e595713bb0d587b972519"} Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.281707 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.282020 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.282192 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.545404 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6ff78978b4-q4vv8"] Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.545674 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" containerID="cri-o://d943ace82fc3b0b87ffb23bd3be803a83c10d920dfaf0f26664415e34667cff8" gracePeriod=30 Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.664609 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5"] Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.665654 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerName="route-controller-manager" containerID="cri-o://5e741ea24a748382b3c92c15feb2fb65a5064974af9975b49afd8532fa0ccef2" gracePeriod=30 Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.798796 4125 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.889747 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:47 crc kubenswrapper[4125]: I0312 13:36:47.889891 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.021733 4125 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-03-12T13:36:47.799277705Z","Handler":null,"Name":""} Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.331804 4125 csi_plugin.go:99] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.331920 4125 csi_plugin.go:112] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.376410 4125 generic.go:334] "Generic (PLEG): container finished" podID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerID="5e741ea24a748382b3c92c15feb2fb65a5064974af9975b49afd8532fa0ccef2" exitCode=0 Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.377100 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" event={"ID":"af6b67a3-a2bd-4051-9adc-c208a5a65d79","Type":"ContainerDied","Data":"5e741ea24a748382b3c92c15feb2fb65a5064974af9975b49afd8532fa0ccef2"} Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.387745 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.387881 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.389003 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.389062 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.407989 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-service-ca/service-ca-666f99b6f-vlbxv"] Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.443614 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-service-ca/service-ca-666f99b6f-vlbxv"] Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.466531 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-cd974775-4nsv5"] Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.466975 4125 topology_manager.go:215] "Topology Admit Handler" podUID="6639609b-906b-4193-883e-ed1160aa5d50" podNamespace="openshift-service-ca" podName="service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: E0312 13:36:48.467297 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="973266fa-3775-4a33-9ee8-9af757721a2a" containerName="collect-profiles" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.467467 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="973266fa-3775-4a33-9ee8-9af757721a2a" containerName="collect-profiles" Mar 12 13:36:48 crc kubenswrapper[4125]: E0312 13:36:48.467639 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="378552fd-5e53-4882-87ff-95f3d9198861" containerName="service-ca-controller" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.469036 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="378552fd-5e53-4882-87ff-95f3d9198861" containerName="service-ca-controller" Mar 12 13:36:48 crc kubenswrapper[4125]: E0312 13:36:48.469133 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" containerName="pruner" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.469170 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" containerName="pruner" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.469473 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="378552fd-5e53-4882-87ff-95f3d9198861" containerName="service-ca-controller" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.469488 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" containerName="pruner" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.469500 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="973266fa-3775-4a33-9ee8-9af757721a2a" containerName="collect-profiles" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.470087 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.488046 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.488046 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.493423 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-79vsd" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.493907 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.523121 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.523651 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.524998 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.531080 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.626128 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.626250 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.626287 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.627664 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.634258 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.674868 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-cd974775-4nsv5"] Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.722076 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-mtx25 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]log ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:36:48 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:36:48 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:36:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.722194 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.881971 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.918937 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:48 crc kubenswrapper[4125]: I0312 13:36:48.919568 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.096587 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.421536 4125 generic.go:334] "Generic (PLEG): container finished" podID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerID="d943ace82fc3b0b87ffb23bd3be803a83c10d920dfaf0f26664415e34667cff8" exitCode=0 Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.421660 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" event={"ID":"87df87f4-ba66-4137-8e41-1fa632ad4207","Type":"ContainerDied","Data":"d943ace82fc3b0b87ffb23bd3be803a83c10d920dfaf0f26664415e34667cff8"} Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.448924 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/1.log" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.454103 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12"} Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.454508 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.454635 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.454675 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.457914 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.458184 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.561112 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.649988 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") pod \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.650070 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") pod \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.650180 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") pod \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.650237 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") pod \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\" (UID: \"af6b67a3-a2bd-4051-9adc-c208a5a65d79\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.652609 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca" (OuterVolumeSpecName: "client-ca") pod "af6b67a3-a2bd-4051-9adc-c208a5a65d79" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.652690 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config" (OuterVolumeSpecName: "config") pod "af6b67a3-a2bd-4051-9adc-c208a5a65d79" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.673602 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "af6b67a3-a2bd-4051-9adc-c208a5a65d79" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.689184 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn" (OuterVolumeSpecName: "kube-api-access-hpzhn") pod "af6b67a3-a2bd-4051-9adc-c208a5a65d79" (UID: "af6b67a3-a2bd-4051-9adc-c208a5a65d79"). InnerVolumeSpecName "kube-api-access-hpzhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.757701 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.757910 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af6b67a3-a2bd-4051-9adc-c208a5a65d79-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.757936 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hpzhn\" (UniqueName: \"kubernetes.io/projected/af6b67a3-a2bd-4051-9adc-c208a5a65d79-kube-api-access-hpzhn\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.757948 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6b67a3-a2bd-4051-9adc-c208a5a65d79-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.760442 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.859405 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") pod \"87df87f4-ba66-4137-8e41-1fa632ad4207\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.859474 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") pod \"87df87f4-ba66-4137-8e41-1fa632ad4207\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.859621 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") pod \"87df87f4-ba66-4137-8e41-1fa632ad4207\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.859656 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") pod \"87df87f4-ba66-4137-8e41-1fa632ad4207\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.860492 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca" (OuterVolumeSpecName: "client-ca") pod "87df87f4-ba66-4137-8e41-1fa632ad4207" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.860552 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "87df87f4-ba66-4137-8e41-1fa632ad4207" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.860575 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config" (OuterVolumeSpecName: "config") pod "87df87f4-ba66-4137-8e41-1fa632ad4207" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.860737 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") pod \"87df87f4-ba66-4137-8e41-1fa632ad4207\" (UID: \"87df87f4-ba66-4137-8e41-1fa632ad4207\") " Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.861192 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.861211 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.861221 4125 reconciler_common.go:300] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/87df87f4-ba66-4137-8e41-1fa632ad4207-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.878041 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "87df87f4-ba66-4137-8e41-1fa632ad4207" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.879638 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57" (OuterVolumeSpecName: "kube-api-access-pzb57") pod "87df87f4-ba66-4137-8e41-1fa632ad4207" (UID: "87df87f4-ba66-4137-8e41-1fa632ad4207"). InnerVolumeSpecName "kube-api-access-pzb57". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.892014 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.892111 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.962686 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87df87f4-ba66-4137-8e41-1fa632ad4207-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:49 crc kubenswrapper[4125]: I0312 13:36:49.962750 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-pzb57\" (UniqueName: \"kubernetes.io/projected/87df87f4-ba66-4137-8e41-1fa632ad4207-kube-api-access-pzb57\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.037634 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="378552fd-5e53-4882-87ff-95f3d9198861" path="/var/lib/kubelet/pods/378552fd-5e53-4882-87ff-95f3d9198861/volumes" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.465576 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" event={"ID":"af6b67a3-a2bd-4051-9adc-c208a5a65d79","Type":"ContainerDied","Data":"c13bd598fec680c7f0451a8d6f2749af905d284092b8f7ba74d7531994f015c4"} Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.465983 4125 scope.go:117] "RemoveContainer" containerID="5e741ea24a748382b3c92c15feb2fb65a5064974af9975b49afd8532fa0ccef2" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.466107 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.479394 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.479897 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ff78978b4-q4vv8" event={"ID":"87df87f4-ba66-4137-8e41-1fa632ad4207","Type":"ContainerDied","Data":"cb5d563ba52a01dfa56f9b27f2e068ff2ef322ed159c035ea3a168b811ea8ea4"} Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.751182 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6ff78978b4-q4vv8"] Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.775286 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6ff78978b4-q4vv8"] Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.839494 4125 scope.go:117] "RemoveContainer" containerID="d943ace82fc3b0b87ffb23bd3be803a83c10d920dfaf0f26664415e34667cff8" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.888957 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.889120 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:50 crc kubenswrapper[4125]: I0312 13:36:50.974329 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5"] Mar 12 13:36:51 crc kubenswrapper[4125]: I0312 13:36:51.020795 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c4dbb8899-tchz5"] Mar 12 13:36:51 crc kubenswrapper[4125]: I0312 13:36:51.699019 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:36:51 crc kubenswrapper[4125]: I0312 13:36:51.888451 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:51 crc kubenswrapper[4125]: I0312 13:36:51.888631 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.008697 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5cf7764b85-2d6rj"] Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.008865 4125 topology_manager.go:215] "Topology Admit Handler" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" podNamespace="openshift-controller-manager" podName="controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: E0312 13:36:52.009688 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerName="route-controller-manager" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.009708 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerName="route-controller-manager" Mar 12 13:36:52 crc kubenswrapper[4125]: E0312 13:36:52.009721 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.009728 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.009932 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" containerName="controller-manager" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.009947 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" containerName="route-controller-manager" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.010425 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.022056 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.023363 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.023560 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.023708 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-58g82" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.023763 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.023913 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 12 13:36:52 crc kubenswrapper[4125]: E0312 13:36:52.028766 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.047291 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87df87f4-ba66-4137-8e41-1fa632ad4207" path="/var/lib/kubelet/pods/87df87f4-ba66-4137-8e41-1fa632ad4207/volumes" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.048271 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af6b67a3-a2bd-4051-9adc-c208a5a65d79" path="/var/lib/kubelet/pods/af6b67a3-a2bd-4051-9adc-c208a5a65d79/volumes" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.052263 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.092508 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2csb5\" (UniqueName: \"kubernetes.io/projected/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-kube-api-access-2csb5\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.092628 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-proxy-ca-bundles\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.092659 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-client-ca\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.092695 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-config\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.092790 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-serving-cert\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.126044 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5cf7764b85-2d6rj"] Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.194991 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-serving-cert\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.195240 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2csb5\" (UniqueName: \"kubernetes.io/projected/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-kube-api-access-2csb5\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.195284 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-proxy-ca-bundles\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.195313 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-client-ca\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.195341 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-config\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.198515 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-config\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.198633 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-client-ca\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.207910 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-serving-cert\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.211081 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx"] Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.211284 4125 topology_manager.go:215] "Topology Admit Handler" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.212045 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.226047 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.226632 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.234351 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-proxy-ca-bundles\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.235331 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.235682 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.235961 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.253006 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.299098 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-serving-cert\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.299202 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-client-ca\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.299240 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdfkb\" (UniqueName: \"kubernetes.io/projected/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-kube-api-access-zdfkb\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.299318 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-config\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.297717 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx"] Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.318485 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-cd974775-4nsv5"] Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.346361 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2csb5\" (UniqueName: \"kubernetes.io/projected/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-kube-api-access-2csb5\") pod \"controller-manager-5cf7764b85-2d6rj\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.412611 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-serving-cert\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.412731 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-client-ca\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.412761 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zdfkb\" (UniqueName: \"kubernetes.io/projected/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-kube-api-access-zdfkb\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.420492 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-config\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.491682 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-config\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.493075 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-serving-cert\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.501264 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-client-ca\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.512641 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-cd974775-4nsv5" event={"ID":"6639609b-906b-4193-883e-ed1160aa5d50","Type":"ContainerStarted","Data":"b15ee544d42f5487b0908d1c32ffcbd05799f52ccc5663461a718d62ffc2ba78"} Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.635279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.785190 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdfkb\" (UniqueName: \"kubernetes.io/projected/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-kube-api-access-zdfkb\") pod \"route-controller-manager-697fdcccc7-vzksx\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.875632 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.904997 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:52 crc kubenswrapper[4125]: I0312 13:36:52.905640 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.033201 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 12 13:36:53 crc kubenswrapper[4125]: E0312 13:36:53.258066 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:53 crc kubenswrapper[4125]: E0312 13:36:53.258209 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/certified-operator-index:v4.16" Mar 12 13:36:53 crc kubenswrapper[4125]: E0312 13:36:53.258328 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-ncrf5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7287f_openshift-marketplace(887d596e-c519-4bfa-af90-3edd9e1b2f0f): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:53 crc kubenswrapper[4125]: E0312 13:36:53.258382 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.581415 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.714666 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.714749 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.715075 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.715101 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.718194 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.731355 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.851367 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.851482 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.896772 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:53 crc kubenswrapper[4125]: I0312 13:36:53.896906 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.145994 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.146081 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.146242 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-ptdrb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-f4jkp_openshift-marketplace(4092a9f8-5acc-4932-9e90-ef962eeb301a): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.146301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.148336 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.148363 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-operator-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.148435 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-nzb4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dcqzh_openshift-marketplace(6db26b71-4e04-4688-a0c0-00e06e8c888d): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.148582 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.149114 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.149134 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.149221 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n6sqt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8jhz6_openshift-marketplace(3f4dca86-e6ee-4ec9-8324-86aff960225e): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.152905 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.190703 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.191013 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.191217 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-tf29r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-8s8pc_openshift-marketplace(c782cf62-a827-4677-b3c2-6f82c5f09cbb): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:54 crc kubenswrapper[4125]: E0312 13:36:54.191283 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:36:54 crc kubenswrapper[4125]: I0312 13:36:54.530510 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-cd974775-4nsv5" event={"ID":"6639609b-906b-4193-883e-ed1160aa5d50","Type":"ContainerStarted","Data":"0652114292c3cf2f2f0c5d92d15f5ec18bb568f43216dae95cc8afca34d3d4a4"} Mar 12 13:36:54 crc kubenswrapper[4125]: I0312 13:36:54.900640 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:54 crc kubenswrapper[4125]: I0312 13:36:54.901107 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.716195 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podStartSLOduration=8.716074784 podStartE2EDuration="8.716074784s" podCreationTimestamp="2026-03-12 13:36:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:36:54.581655652 +0000 UTC m=+984.905041751" watchObservedRunningTime="2026-03-12 13:36:55.716074784 +0000 UTC m=+986.039460693" Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.718076 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-585546dd8b-v5m4t"] Mar 12 13:36:55 crc kubenswrapper[4125]: E0312 13:36:55.718925 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="unmounted volumes=[registry-storage], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.887133 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.893604 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.957199 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-86594ff457-6b77x"] Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.957377 4125 topology_manager.go:215] "Topology Admit Handler" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" podNamespace="openshift-image-registry" podName="image-registry-86594ff457-6b77x" Mar 12 13:36:55 crc kubenswrapper[4125]: I0312 13:36:55.958098 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.020989 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-86594ff457-6b77x"] Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.038801 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2zvk\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-kube-api-access-r2zvk\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.038998 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-trusted-ca\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.039032 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-tls\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.039109 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8d14510a-ac3d-4029-ae28-538bb2e94e32-ca-trust-extracted\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.039133 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-bound-sa-token\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.039202 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8d14510a-ac3d-4029-ae28-538bb2e94e32-installation-pull-secrets\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.039245 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-certificates\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: E0312 13:36:56.137741 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:56 crc kubenswrapper[4125]: E0312 13:36:56.139517 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/community-operator-index:v4.16" Mar 12 13:36:56 crc kubenswrapper[4125]: E0312 13:36:56.140209 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-n59fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-k9qqb_openshift-marketplace(ccdf38cf-634a-41a2-9c8b-74bb86af80a7): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:36:56 crc kubenswrapper[4125]: E0312 13:36:56.140304 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.140671 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8d14510a-ac3d-4029-ae28-538bb2e94e32-ca-trust-extracted\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.140803 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-bound-sa-token\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.140997 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8d14510a-ac3d-4029-ae28-538bb2e94e32-installation-pull-secrets\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.141196 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-certificates\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.141348 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r2zvk\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-kube-api-access-r2zvk\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.141273 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8d14510a-ac3d-4029-ae28-538bb2e94e32-ca-trust-extracted\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.141649 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-trusted-ca\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.141764 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-tls\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.142759 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-certificates\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.143105 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-trusted-ca\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.162498 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8d14510a-ac3d-4029-ae28-538bb2e94e32-installation-pull-secrets\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.168339 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-tls\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.170655 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-bound-sa-token\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.245134 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2zvk\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-kube-api-access-r2zvk\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.408607 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx"] Mar 12 13:36:56 crc kubenswrapper[4125]: W0312 13:36:56.440564 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc246f84b_a4cc_41ce_b8ce_76615b3fbf3b.slice/crio-cbe3b913a7cd4f4787ddc08fe46fceff9c6d385960b1daa07a680caa8b0c7434 WatchSource:0}: Error finding container cbe3b913a7cd4f4787ddc08fe46fceff9c6d385960b1daa07a680caa8b0c7434: Status 404 returned error can't find the container with id cbe3b913a7cd4f4787ddc08fe46fceff9c6d385960b1daa07a680caa8b0c7434 Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.568002 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.568299 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" event={"ID":"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b","Type":"ContainerStarted","Data":"cbe3b913a7cd4f4787ddc08fe46fceff9c6d385960b1daa07a680caa8b0c7434"} Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.589944 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652401 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652483 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-ca-trust-extracted\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652581 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khtlk\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-kube-api-access-khtlk\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652766 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652805 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652957 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-certificates\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.652983 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.653022 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-bound-sa-token\") pod \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\" (UID: \"c5bb4cdd-21b9-49ed-84ae-a405b60a0306\") " Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.653922 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.653930 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.656494 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.693912 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.695050 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-kube-api-access-khtlk" (OuterVolumeSpecName: "kube-api-access-khtlk") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "kube-api-access-khtlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.695417 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.708548 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.725177 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (OuterVolumeSpecName: "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "c5bb4cdd-21b9-49ed-84ae-a405b60a0306" (UID: "c5bb4cdd-21b9-49ed-84ae-a405b60a0306"). InnerVolumeSpecName "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.754507 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.754963 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-trusted-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.755092 4125 reconciler_common.go:300] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.755443 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-khtlk\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-kube-api-access-khtlk\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.755545 4125 reconciler_common.go:300] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.755685 4125 reconciler_common.go:300] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-certificates\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.755889 4125 reconciler_common.go:300] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-registry-tls\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.756088 4125 reconciler_common.go:300] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5bb4cdd-21b9-49ed-84ae-a405b60a0306-bound-sa-token\") on node \"crc\" DevicePath \"\"" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.759993 4125 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.760142 4125 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ea5f9a7192af1960ec8c50a86fd2d9a756dbf85695798868f611e04a03ec009/globalmount\"" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.839191 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5cf7764b85-2d6rj"] Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.886966 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.887331 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.925684 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-7cbd5666ff-bbfrf\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:36:56 crc kubenswrapper[4125]: I0312 13:36:56.959315 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.043004 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-q786x" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.047341 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.372376 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-86594ff457-6b77x\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.431074 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.431212 4125 topology_manager.go:215] "Topology Admit Handler" podUID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.432240 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.441455 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-dl9g2" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.446576 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.471090 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.471610 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.482728 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.490320 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.572904 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.573021 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.573196 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.586643 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-585546dd8b-v5m4t" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.590054 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" event={"ID":"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d","Type":"ContainerStarted","Data":"a2dacaf5c58e960085a87716a4eb5a0d59a14e61143f52cdc0c6b3da77f3619f"} Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.677089 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.758327 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.809377 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-585546dd8b-v5m4t"] Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.814267 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-585546dd8b-v5m4t"] Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.894757 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:57 crc kubenswrapper[4125]: I0312 13:36:57.894892 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.064613 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5bb4cdd-21b9-49ed-84ae-a405b60a0306" path="/var/lib/kubelet/pods/c5bb4cdd-21b9-49ed-84ae-a405b60a0306/volumes" Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.634926 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" event={"ID":"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b","Type":"ContainerStarted","Data":"2c8b59de86ff1d006a61977dde5615d40c7fbf6d0494d909203b682f10ec5fed"} Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.638796 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.638910 4125 patch_prober.go:28] interesting pod/route-controller-manager-697fdcccc7-vzksx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.639172 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.887328 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:58 crc kubenswrapper[4125]: I0312 13:36:58.887570 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:36:59 crc kubenswrapper[4125]: I0312 13:36:59.665943 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" event={"ID":"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d","Type":"ContainerStarted","Data":"b7e1eea8a37bc6632a6efcae94187228fab0f42f894694350dfef6a577170ae5"} Mar 12 13:36:59 crc kubenswrapper[4125]: I0312 13:36:59.667395 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:36:59 crc kubenswrapper[4125]: I0312 13:36:59.670210 4125 patch_prober.go:28] interesting pod/controller-manager-5cf7764b85-2d6rj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Mar 12 13:36:59 crc kubenswrapper[4125]: I0312 13:36:59.670285 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Mar 12 13:36:59 crc kubenswrapper[4125]: I0312 13:36:59.895868 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:36:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:36:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:36:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:36:59 crc kubenswrapper[4125]: I0312 13:36:59.897208 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.028264 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" podStartSLOduration=8.028213475 podStartE2EDuration="8.028213475s" podCreationTimestamp="2026-03-12 13:36:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:36:58.867086212 +0000 UTC m=+989.190472205" watchObservedRunningTime="2026-03-12 13:37:00.028213475 +0000 UTC m=+990.351599484" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.042112 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-7cbd5666ff-bbfrf"] Mar 12 13:37:00 crc kubenswrapper[4125]: E0312 13:37:00.197936 4125 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:37:00 crc kubenswrapper[4125]: E0312 13:37:00.198542 4125 kuberuntime_image.go:55] "Failed to pull image" err="unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.16" Mar 12 13:37:00 crc kubenswrapper[4125]: E0312 13:37:00.199024 4125 kuberuntime_manager.go:1262] init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.16,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-r7dbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000210000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rmwfn_openshift-marketplace(9ad279b4-d9dc-42a8-a1c8-a002bd063482): ErrImagePull: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication Mar 12 13:37:00 crc kubenswrapper[4125]: E0312 13:37:00.199326 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.335535 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" podStartSLOduration=9.33548345 podStartE2EDuration="9.33548345s" podCreationTimestamp="2026-03-12 13:36:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:00.229185469 +0000 UTC m=+990.552571638" watchObservedRunningTime="2026-03-12 13:37:00.33548345 +0000 UTC m=+990.658869499" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.339619 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/installer-9-crc"] Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.339785 4125 topology_manager.go:215] "Topology Admit Handler" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" podNamespace="openshift-kube-controller-manager" podName="installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.340705 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: W0312 13:37:00.359853 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42b6a393_6194_4620_bf8f_7e4b6cbe5679.slice/crio-f1600462c36204cb188fbb0af281410ce327126b920a08415a23e48fa5d77c6b WatchSource:0}: Error finding container f1600462c36204cb188fbb0af281410ce327126b920a08415a23e48fa5d77c6b: Status 404 returned error can't find the container with id f1600462c36204cb188fbb0af281410ce327126b920a08415a23e48fa5d77c6b Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.377088 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-kubelet-dir\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.377188 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/972b3cfd-f9d0-485e-924b-b5258282d155-kube-api-access\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.377374 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-var-lock\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.480320 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-var-lock\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.480462 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-kubelet-dir\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.480513 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/972b3cfd-f9d0-485e-924b-b5258282d155-kube-api-access\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.481120 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-var-lock\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.481328 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-kubelet-dir\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.625327 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/installer-9-crc"] Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.664485 4125 patch_prober.go:28] interesting pod/route-controller-manager-697fdcccc7-vzksx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.664668 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.42:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.690399 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" event={"ID":"42b6a393-6194-4620-bf8f-7e4b6cbe5679","Type":"ContainerStarted","Data":"f1600462c36204cb188fbb0af281410ce327126b920a08415a23e48fa5d77c6b"} Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.697740 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.892363 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.892912 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:00 crc kubenswrapper[4125]: I0312 13:37:00.894365 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/972b3cfd-f9d0-485e-924b-b5258282d155-kube-api-access\") pod \"installer-9-crc\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:01 crc kubenswrapper[4125]: I0312 13:37:01.022594 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:01 crc kubenswrapper[4125]: I0312 13:37:01.647657 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:37:01 crc kubenswrapper[4125]: I0312 13:37:01.889710 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:01 crc kubenswrapper[4125]: I0312 13:37:01.890351 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:02 crc kubenswrapper[4125]: I0312 13:37:02.579777 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Mar 12 13:37:02 crc kubenswrapper[4125]: W0312 13:37:02.623385 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poddc41379b_41a8_497f_8ac6_4ee19454d1d2.slice/crio-fbb9a245ea03a8e276362c8a97c4748a7a3222ce43bb1033d15d13e27494e91b WatchSource:0}: Error finding container fbb9a245ea03a8e276362c8a97c4748a7a3222ce43bb1033d15d13e27494e91b: Status 404 returned error can't find the container with id fbb9a245ea03a8e276362c8a97c4748a7a3222ce43bb1033d15d13e27494e91b Mar 12 13:37:02 crc kubenswrapper[4125]: I0312 13:37:02.737452 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"dc41379b-41a8-497f-8ac6-4ee19454d1d2","Type":"ContainerStarted","Data":"fbb9a245ea03a8e276362c8a97c4748a7a3222ce43bb1033d15d13e27494e91b"} Mar 12 13:37:02 crc kubenswrapper[4125]: I0312 13:37:02.889380 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:02 crc kubenswrapper[4125]: I0312 13:37:02.889643 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.086322 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-86594ff457-6b77x"] Mar 12 13:37:03 crc kubenswrapper[4125]: W0312 13:37:03.116054 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d14510a_ac3d_4029_ae28_538bb2e94e32.slice/crio-dec419af66e5c6d1f29c5005c971d4a714601c652d36d047e42e480153aaa9fa WatchSource:0}: Error finding container dec419af66e5c6d1f29c5005c971d4a714601c652d36d047e42e480153aaa9fa: Status 404 returned error can't find the container with id dec419af66e5c6d1f29c5005c971d4a714601c652d36d047e42e480153aaa9fa Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.713570 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.715077 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.714530 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.715439 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.777203 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" event={"ID":"42b6a393-6194-4620-bf8f-7e4b6cbe5679","Type":"ContainerStarted","Data":"1e2fec87605c0f6f17be5fe9f57263de9e9c959c83ac27c370eb1927d6b59791"} Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.781508 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-86594ff457-6b77x" event={"ID":"8d14510a-ac3d-4029-ae28-538bb2e94e32","Type":"ContainerStarted","Data":"dec419af66e5c6d1f29c5005c971d4a714601c652d36d047e42e480153aaa9fa"} Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.852021 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.852216 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.897121 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:03 crc kubenswrapper[4125]: I0312 13:37:03.897253 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:04 crc kubenswrapper[4125]: E0312 13:37:04.029609 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:37:04 crc kubenswrapper[4125]: I0312 13:37:04.146751 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" podStartSLOduration=53827369.14670058 podStartE2EDuration="14952h2m49.146700581s" podCreationTimestamp="2024-06-27 13:34:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:03.944413339 +0000 UTC m=+994.267799448" watchObservedRunningTime="2026-03-12 13:37:04.146700581 +0000 UTC m=+994.470086540" Mar 12 13:37:04 crc kubenswrapper[4125]: I0312 13:37:04.794386 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:37:04 crc kubenswrapper[4125]: I0312 13:37:04.905889 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:04 crc kubenswrapper[4125]: I0312 13:37:04.906017 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:05 crc kubenswrapper[4125]: E0312 13:37:05.029565 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:37:05 crc kubenswrapper[4125]: I0312 13:37:05.877235 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/installer-9-crc"] Mar 12 13:37:05 crc kubenswrapper[4125]: I0312 13:37:05.888359 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:05 crc kubenswrapper[4125]: I0312 13:37:05.888647 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:06 crc kubenswrapper[4125]: E0312 13:37:06.035055 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:37:06 crc kubenswrapper[4125]: E0312 13:37:06.035334 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:37:06 crc kubenswrapper[4125]: I0312 13:37:06.815252 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"dc41379b-41a8-497f-8ac6-4ee19454d1d2","Type":"ContainerStarted","Data":"ba6155eb80cde91f25c3116c9069a03b975e1154294dfad654bf69cc6acec3e4"} Mar 12 13:37:06 crc kubenswrapper[4125]: I0312 13:37:06.820276 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-86594ff457-6b77x" event={"ID":"8d14510a-ac3d-4029-ae28-538bb2e94e32","Type":"ContainerStarted","Data":"573365d61ad5859a16302f5bf7be8220308463d7183d6af3dd120eb68123e736"} Mar 12 13:37:06 crc kubenswrapper[4125]: I0312 13:37:06.820481 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:37:06 crc kubenswrapper[4125]: I0312 13:37:06.827022 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-9-crc" event={"ID":"972b3cfd-f9d0-485e-924b-b5258282d155","Type":"ContainerStarted","Data":"9c4637669f76d77a2a1dda152cd35451cc82348e79d6d18f9076cbcd53ef56df"} Mar 12 13:37:06 crc kubenswrapper[4125]: I0312 13:37:06.890379 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:06 crc kubenswrapper[4125]: I0312 13:37:06.891958 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:07 crc kubenswrapper[4125]: I0312 13:37:07.003484 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-image-registry/image-registry-86594ff457-6b77x" podStartSLOduration=12.003426314 podStartE2EDuration="12.003426314s" podCreationTimestamp="2026-03-12 13:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:07.001519325 +0000 UTC m=+997.324905274" watchObservedRunningTime="2026-03-12 13:37:07.003426314 +0000 UTC m=+997.326812353" Mar 12 13:37:07 crc kubenswrapper[4125]: I0312 13:37:07.005438 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=10.005413126 podStartE2EDuration="10.005413126s" podCreationTimestamp="2026-03-12 13:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:06.913109546 +0000 UTC m=+997.236495651" watchObservedRunningTime="2026-03-12 13:37:07.005413126 +0000 UTC m=+997.328799015" Mar 12 13:37:07 crc kubenswrapper[4125]: E0312 13:37:07.027614 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:37:07 crc kubenswrapper[4125]: I0312 13:37:07.052438 4125 patch_prober.go:28] interesting pod/image-registry-7cbd5666ff-bbfrf container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.38:5000/healthz\": dial tcp 10.217.0.38:5000: connect: connection refused" start-of-body= Mar 12 13:37:07 crc kubenswrapper[4125]: I0312 13:37:07.052544 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.38:5000/healthz\": dial tcp 10.217.0.38:5000: connect: connection refused" Mar 12 13:37:07 crc kubenswrapper[4125]: I0312 13:37:07.887724 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:07 crc kubenswrapper[4125]: I0312 13:37:07.888238 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:08 crc kubenswrapper[4125]: I0312 13:37:08.846925 4125 generic.go:334] "Generic (PLEG): container finished" podID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" containerID="ba6155eb80cde91f25c3116c9069a03b975e1154294dfad654bf69cc6acec3e4" exitCode=0 Mar 12 13:37:08 crc kubenswrapper[4125]: I0312 13:37:08.847116 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"dc41379b-41a8-497f-8ac6-4ee19454d1d2","Type":"ContainerDied","Data":"ba6155eb80cde91f25c3116c9069a03b975e1154294dfad654bf69cc6acec3e4"} Mar 12 13:37:08 crc kubenswrapper[4125]: I0312 13:37:08.887984 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:08 crc kubenswrapper[4125]: I0312 13:37:08.888067 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:09 crc kubenswrapper[4125]: E0312 13:37:09.028546 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:37:09 crc kubenswrapper[4125]: E0312 13:37:09.029006 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:37:09 crc kubenswrapper[4125]: I0312 13:37:09.890494 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:09 crc kubenswrapper[4125]: I0312 13:37:09.890594 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:10 crc kubenswrapper[4125]: I0312 13:37:10.890789 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:10 crc kubenswrapper[4125]: I0312 13:37:10.891675 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:11 crc kubenswrapper[4125]: I0312 13:37:11.890877 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:11 crc kubenswrapper[4125]: I0312 13:37:11.891416 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:12 crc kubenswrapper[4125]: E0312 13:37:12.040108 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:37:12 crc kubenswrapper[4125]: I0312 13:37:12.879542 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"dc41379b-41a8-497f-8ac6-4ee19454d1d2","Type":"ContainerDied","Data":"fbb9a245ea03a8e276362c8a97c4748a7a3222ce43bb1033d15d13e27494e91b"} Mar 12 13:37:12 crc kubenswrapper[4125]: I0312 13:37:12.880116 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fbb9a245ea03a8e276362c8a97c4748a7a3222ce43bb1033d15d13e27494e91b" Mar 12 13:37:12 crc kubenswrapper[4125]: I0312 13:37:12.888496 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:12 crc kubenswrapper[4125]: I0312 13:37:12.888644 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:12 crc kubenswrapper[4125]: I0312 13:37:12.899173 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.025853 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kubelet-dir\") pod \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.025985 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kube-api-access\") pod \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\" (UID: \"dc41379b-41a8-497f-8ac6-4ee19454d1d2\") " Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.026045 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "dc41379b-41a8-497f-8ac6-4ee19454d1d2" (UID: "dc41379b-41a8-497f-8ac6-4ee19454d1d2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.026759 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.051079 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "dc41379b-41a8-497f-8ac6-4ee19454d1d2" (UID: "dc41379b-41a8-497f-8ac6-4ee19454d1d2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.128246 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc41379b-41a8-497f-8ac6-4ee19454d1d2-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.714316 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.714408 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.714455 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.716451 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"44ab1d97ab605ba35233246ae3683b740f13717f9a0e595713bb0d587b972519"} pod="openshift-console/downloads-65476884b9-9wcvx" containerMessage="Container download-server failed liveness probe, will be restarted" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.716549 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" containerID="cri-o://44ab1d97ab605ba35233246ae3683b740f13717f9a0e595713bb0d587b972519" gracePeriod=2 Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.717204 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.717381 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.718001 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.718118 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.848310 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.848466 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.898689 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.898857 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-9-crc" event={"ID":"972b3cfd-f9d0-485e-924b-b5258282d155","Type":"ContainerStarted","Data":"668ae8f960878d1d1497a68b5fc115baff849ced6ae4a2760ec901bdbacfc479"} Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.909422 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:13 crc kubenswrapper[4125]: I0312 13:37:13.909518 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.538470 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.538909 4125 topology_manager.go:215] "Topology Admit Handler" podUID="35c093da-a468-44a1-8ff0-09b09268828c" podNamespace="openshift-kube-apiserver" podName="installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: E0312 13:37:14.539081 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" containerName="pruner" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.539117 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" containerName="pruner" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.539298 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" containerName="pruner" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.539772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.563418 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/installer-9-crc" podStartSLOduration=14.563369625 podStartE2EDuration="14.563369625s" podCreationTimestamp="2026-03-12 13:37:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:14.556625821 +0000 UTC m=+1004.880011940" watchObservedRunningTime="2026-03-12 13:37:14.563369625 +0000 UTC m=+1004.886755494" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.613221 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-var-lock\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.613350 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35c093da-a468-44a1-8ff0-09b09268828c-kube-api-access\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.613385 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.630390 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-4kgh8" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.630630 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.670053 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.715104 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35c093da-a468-44a1-8ff0-09b09268828c-kube-api-access\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.715437 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.715552 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-var-lock\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.715633 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-var-lock\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.715718 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.888993 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.889102 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.912328 4125 generic.go:334] "Generic (PLEG): container finished" podID="6268b7fe-8910-4505-b404-6f1df638105c" containerID="44ab1d97ab605ba35233246ae3683b740f13717f9a0e595713bb0d587b972519" exitCode=0 Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.913318 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerDied","Data":"44ab1d97ab605ba35233246ae3683b740f13717f9a0e595713bb0d587b972519"} Mar 12 13:37:14 crc kubenswrapper[4125]: I0312 13:37:14.913377 4125 scope.go:117] "RemoveContainer" containerID="a9aa62f93d285cdc767ac1378d09514acb8cee0f731113403a10ced1fcc588b2" Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.110412 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35c093da-a468-44a1-8ff0-09b09268828c-kube-api-access\") pod \"installer-9-crc\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.302244 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.626119 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-apiserver/apiserver-67cbf64bc9-mtx25"] Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.626657 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" containerID="cri-o://e626fe31e9fc0d6f2e34cae2f75ebd1df96daffc967397efe4465fb73926e0dd" gracePeriod=90 Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.627068 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver-check-endpoints" containerID="cri-o://9abe5af36bb650e822fc8617d763cb1ac72e09098227d9187b59e949bcd51a26" gracePeriod=90 Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.893040 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-apiserver/apiserver-67cbf64bc9-mtx25"] Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.894294 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:15 crc kubenswrapper[4125]: I0312 13:37:15.894393 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.027789 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5cf7764b85-2d6rj"] Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.036224 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" containerName="controller-manager" containerID="cri-o://b7e1eea8a37bc6632a6efcae94187228fab0f42f894694350dfef6a577170ae5" gracePeriod=30 Mar 12 13:37:16 crc kubenswrapper[4125]: E0312 13:37:16.042531 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.843222 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-67cbf64bc9-fq4m9"] Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.843771 4125 topology_manager.go:215] "Topology Admit Handler" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" podNamespace="openshift-apiserver" podName="apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:16 crc kubenswrapper[4125]: E0312 13:37:16.843965 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.843981 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" Mar 12 13:37:16 crc kubenswrapper[4125]: E0312 13:37:16.843994 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="fix-audit-permissions" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.844002 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="fix-audit-permissions" Mar 12 13:37:16 crc kubenswrapper[4125]: E0312 13:37:16.844020 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver-check-endpoints" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.844030 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver-check-endpoints" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.844129 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver-check-endpoints" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.844143 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerName="openshift-apiserver" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.844983 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.863062 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx"] Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.863608 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerName="route-controller-manager" containerID="cri-o://2c8b59de86ff1d006a61977dde5615d40c7fbf6d0494d909203b682f10ec5fed" gracePeriod=30 Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.903094 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.903211 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.918389 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-r9fjc" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.963616 4125 generic.go:334] "Generic (PLEG): container finished" podID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" containerID="b7e1eea8a37bc6632a6efcae94187228fab0f42f894694350dfef6a577170ae5" exitCode=0 Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.963711 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" event={"ID":"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d","Type":"ContainerDied","Data":"b7e1eea8a37bc6632a6efcae94187228fab0f42f894694350dfef6a577170ae5"} Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.979094 4125 generic.go:334] "Generic (PLEG): container finished" podID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerID="9abe5af36bb650e822fc8617d763cb1ac72e09098227d9187b59e949bcd51a26" exitCode=0 Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.999438 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-encryption-config\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.999767 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:16 crc kubenswrapper[4125]: I0312 13:37:16.999959 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.000069 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.000205 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-client\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.000318 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-serving-cert\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.000514 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-config\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.000904 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxzkm\" (UniqueName: \"kubernetes.io/projected/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-kube-api-access-wxzkm\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.001001 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-node-pullsecrets\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.001055 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-image-import-ca\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.001087 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit-dir\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: E0312 13:37:17.032945 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.099535 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103580 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103676 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103702 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103747 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-client\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103769 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-serving-cert\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103867 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-config\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103908 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wxzkm\" (UniqueName: \"kubernetes.io/projected/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-kube-api-access-wxzkm\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103932 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-node-pullsecrets\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.103975 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-image-import-ca\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.104219 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit-dir\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.104253 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-encryption-config\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.105041 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.106201 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-serving-ca\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.107422 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-config\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.108057 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-node-pullsecrets\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.108103 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit-dir\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.108634 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-image-import-ca\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.109083 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-trusted-ca-bundle\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.123082 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-serving-cert\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.146775 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-client\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.151377 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-encryption-config\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.341711 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxzkm\" (UniqueName: \"kubernetes.io/projected/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-kube-api-access-wxzkm\") pod \"apiserver-67cbf64bc9-fq4m9\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.357924 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-7cbd5666ff-bbfrf"] Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.361229 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-67cbf64bc9-fq4m9"] Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.712928 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-7dc8587b5-4h2pb"] Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.713087 4125 topology_manager.go:215] "Topology Admit Handler" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" podNamespace="openshift-image-registry" podName="image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.713736 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.792673 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-7dc8587b5-4h2pb"] Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.926666 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd34258c-0a6c-44b0-ba64-b411ac6bad46-ca-trust-extracted\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.927108 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-certificates\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.927310 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7qlz\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-kube-api-access-m7qlz\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.927483 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd34258c-0a6c-44b0-ba64-b411ac6bad46-installation-pull-secrets\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.927643 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-bound-sa-token\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.927887 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-tls\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.928032 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.928225 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-trusted-ca\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.935694 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.958713 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:17 crc kubenswrapper[4125]: I0312 13:37:17.958805 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.008590 4125 generic.go:334] "Generic (PLEG): container finished" podID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerID="2c8b59de86ff1d006a61977dde5615d40c7fbf6d0494d909203b682f10ec5fed" exitCode=0 Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.009021 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" event={"ID":"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b","Type":"ContainerDied","Data":"2c8b59de86ff1d006a61977dde5615d40c7fbf6d0494d909203b682f10ec5fed"} Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030610 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-trusted-ca\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030693 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd34258c-0a6c-44b0-ba64-b411ac6bad46-ca-trust-extracted\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030725 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-certificates\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030747 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-m7qlz\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-kube-api-access-m7qlz\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030773 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd34258c-0a6c-44b0-ba64-b411ac6bad46-installation-pull-secrets\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030852 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-bound-sa-token\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.030894 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-tls\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.040081 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-trusted-ca\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.040540 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd34258c-0a6c-44b0-ba64-b411ac6bad46-ca-trust-extracted\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.052786 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd34258c-0a6c-44b0-ba64-b411ac6bad46-installation-pull-secrets\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.058013 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-certificates\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: E0312 13:37:18.059995 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.074975 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-tls\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.125022 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"35c093da-a468-44a1-8ff0-09b09268828c","Type":"ContainerStarted","Data":"9293ffca43f0a2173595f04abd3a03ea17c911b2efccdb680f070140769df4fb"} Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.137195 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7qlz\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-kube-api-access-m7qlz\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.202633 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-bound-sa-token\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.323955 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-7dc8587b5-4h2pb\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.886757 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.887175 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.939909 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.956724 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.961600 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:37:18 crc kubenswrapper[4125]: I0312 13:37:18.975393 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:37:19 crc kubenswrapper[4125]: E0312 13:37:19.027737 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.074259 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.074546 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5cf7764b85-2d6rj" event={"ID":"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d","Type":"ContainerDied","Data":"a2dacaf5c58e960085a87716a4eb5a0d59a14e61143f52cdc0c6b3da77f3619f"} Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.074587 4125 scope.go:117] "RemoveContainer" containerID="b7e1eea8a37bc6632a6efcae94187228fab0f42f894694350dfef6a577170ae5" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.090000 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" event={"ID":"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b","Type":"ContainerDied","Data":"cbe3b913a7cd4f4787ddc08fe46fceff9c6d385960b1daa07a680caa8b0c7434"} Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.090103 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.172201 4125 scope.go:117] "RemoveContainer" containerID="2c8b59de86ff1d006a61977dde5615d40c7fbf6d0494d909203b682f10ec5fed" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.279021 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6f4598f687-mvg2d"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.280535 4125 topology_manager.go:215] "Topology Admit Handler" podUID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" podNamespace="openshift-controller-manager" podName="controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: E0312 13:37:19.281768 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerName="route-controller-manager" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.281977 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerName="route-controller-manager" Mar 12 13:37:19 crc kubenswrapper[4125]: E0312 13:37:19.282076 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" containerName="controller-manager" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.288006 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" containerName="controller-manager" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.288310 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" containerName="controller-manager" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.288408 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" containerName="route-controller-manager" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.288917 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.466113 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6f4598f687-mvg2d"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.541343 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-7dc8587b5-4h2pb"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600510 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-serving-cert\") pod \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600581 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-client-ca\") pod \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600701 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdfkb\" (UniqueName: \"kubernetes.io/projected/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-kube-api-access-zdfkb\") pod \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600740 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-config\") pod \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600774 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-serving-cert\") pod \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600803 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-client-ca\") pod \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600951 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-proxy-ca-bundles\") pod \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.600981 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-config\") pod \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\" (UID: \"c246f84b-a4cc-41ce-b8ce-76615b3fbf3b\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.601003 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2csb5\" (UniqueName: \"kubernetes.io/projected/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-kube-api-access-2csb5\") pod \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\" (UID: \"8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d\") " Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.604756 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-client-ca" (OuterVolumeSpecName: "client-ca") pod "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" (UID: "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.620207 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" (UID: "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.622269 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-kube-api-access-zdfkb" (OuterVolumeSpecName: "kube-api-access-zdfkb") pod "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" (UID: "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b"). InnerVolumeSpecName "kube-api-access-zdfkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.624137 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" (UID: "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.625407 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-client-ca" (OuterVolumeSpecName: "client-ca") pod "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" (UID: "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.625669 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-config" (OuterVolumeSpecName: "config") pod "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" (UID: "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.627605 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-config" (OuterVolumeSpecName: "config") pod "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" (UID: "c246f84b-a4cc-41ce-b8ce-76615b3fbf3b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.642172 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-kube-api-access-2csb5" (OuterVolumeSpecName: "kube-api-access-2csb5") pod "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" (UID: "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d"). InnerVolumeSpecName "kube-api-access-2csb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.648585 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" (UID: "8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703243 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-serving-cert\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703533 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-config\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703575 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-proxy-ca-bundles\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703621 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxdrp\" (UniqueName: \"kubernetes.io/projected/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-kube-api-access-pxdrp\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703652 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-client-ca\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703697 4125 reconciler_common.go:300] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703712 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703736 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2csb5\" (UniqueName: \"kubernetes.io/projected/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-kube-api-access-2csb5\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703749 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703759 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703771 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zdfkb\" (UniqueName: \"kubernetes.io/projected/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b-kube-api-access-zdfkb\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703781 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703791 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.703804 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.789945 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-67cbf64bc9-fq4m9"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.808304 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5cf7764b85-2d6rj"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.819858 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-serving-cert\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.819942 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-config\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.819990 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-proxy-ca-bundles\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.820046 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pxdrp\" (UniqueName: \"kubernetes.io/projected/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-kube-api-access-pxdrp\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.820083 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-client-ca\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.821042 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-client-ca\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.822235 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-proxy-ca-bundles\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.822409 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-config\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.830693 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5cf7764b85-2d6rj"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.835423 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-765b47f944-n2lhl"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.861026 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-serving-cert\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.887026 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.887112 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.906033 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.917567 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-697fdcccc7-vzksx"] Mar 12 13:37:19 crc kubenswrapper[4125]: I0312 13:37:19.925584 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxdrp\" (UniqueName: \"kubernetes.io/projected/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-kube-api-access-pxdrp\") pod \"controller-manager-6f4598f687-mvg2d\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.042782 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d" path="/var/lib/kubelet/pods/8f1b4f0e-8190-4b76-a04b-0dfd9d4b1a5d/volumes" Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.043609 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c246f84b-a4cc-41ce-b8ce-76615b3fbf3b" path="/var/lib/kubelet/pods/c246f84b-a4cc-41ce-b8ce-76615b3fbf3b/volumes" Mar 12 13:37:20 crc kubenswrapper[4125]: E0312 13:37:20.061255 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.106237 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerStarted","Data":"c75f0aaf1ed5d02ba790f7f91b5f1610e4d4bde81ce94ac51410108751348e85"} Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.113660 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"ac3dfa4ed7931e5462fcdd7627e4282ba7aa0fd1c33cc8f485d6bfc2ea90ad2b"} Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.115484 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.115598 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.115657 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.133489 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" event={"ID":"fd34258c-0a6c-44b0-ba64-b411ac6bad46","Type":"ContainerStarted","Data":"4061b35f86b8b4e424f10f00863af2ce515a25eb291fe51e62419db7da9538e0"} Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.212054 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.887125 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:20 crc kubenswrapper[4125]: I0312 13:37:20.888083 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:21 crc kubenswrapper[4125]: E0312 13:37:21.031464 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.181099 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"35c093da-a468-44a1-8ff0-09b09268828c","Type":"ContainerStarted","Data":"a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d"} Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.194800 4125 generic.go:334] "Generic (PLEG): container finished" podID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerID="f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532" exitCode=0 Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.195117 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerDied","Data":"f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532"} Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.206914 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" event={"ID":"fd34258c-0a6c-44b0-ba64-b411ac6bad46","Type":"ContainerStarted","Data":"9c2dc36f0c3ee44e1f4c7d09861313a36668b4d06de91c2b5855b11f060bbb11"} Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.207083 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.207934 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.208013 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.237438 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg"] Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.237626 4125 topology_manager.go:215] "Topology Admit Handler" podUID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.238475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.240676 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.246721 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.247969 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.250391 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.253722 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.261234 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=7.261189594 podStartE2EDuration="7.261189594s" podCreationTimestamp="2026-03-12 13:37:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:21.256247229 +0000 UTC m=+1011.579633399" watchObservedRunningTime="2026-03-12 13:37:21.261189594 +0000 UTC m=+1011.584575703" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.295542 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg"] Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.302959 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.350040 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjrsx\" (UniqueName: \"kubernetes.io/projected/ab05ab4a-e996-434d-9d75-49e67b6f8abf-kube-api-access-qjrsx\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.350373 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-client-ca\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.350584 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-config\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.350693 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab05ab4a-e996-434d-9d75-49e67b6f8abf-serving-cert\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.416915 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" podStartSLOduration=4.416787493 podStartE2EDuration="4.416787493s" podCreationTimestamp="2026-03-12 13:37:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:21.415702776 +0000 UTC m=+1011.739088715" watchObservedRunningTime="2026-03-12 13:37:21.416787493 +0000 UTC m=+1011.740173552" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.454499 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qjrsx\" (UniqueName: \"kubernetes.io/projected/ab05ab4a-e996-434d-9d75-49e67b6f8abf-kube-api-access-qjrsx\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.454582 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-client-ca\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.454621 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-config\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.454717 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab05ab4a-e996-434d-9d75-49e67b6f8abf-serving-cert\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.459686 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-config\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.460538 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-client-ca\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.491889 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab05ab4a-e996-434d-9d75-49e67b6f8abf-serving-cert\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.520347 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjrsx\" (UniqueName: \"kubernetes.io/projected/ab05ab4a-e996-434d-9d75-49e67b6f8abf-kube-api-access-qjrsx\") pod \"route-controller-manager-5c8556d9cf-6l4wg\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.560656 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.572075 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6f4598f687-mvg2d"] Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.888035 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:21 crc kubenswrapper[4125]: I0312 13:37:21.888208 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:22 crc kubenswrapper[4125]: I0312 13:37:22.227789 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" event={"ID":"8e7580cc-d14e-4069-ac2e-3bc1e5582fae","Type":"ContainerStarted","Data":"80578baf9c99f7959f33fbb42d1680b74475f7b8ad9ab7e8a2a9124c547fa286"} Mar 12 13:37:22 crc kubenswrapper[4125]: I0312 13:37:22.228389 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:22 crc kubenswrapper[4125]: I0312 13:37:22.228448 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:22 crc kubenswrapper[4125]: I0312 13:37:22.834351 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg"] Mar 12 13:37:22 crc kubenswrapper[4125]: W0312 13:37:22.866966 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab05ab4a_e996_434d_9d75_49e67b6f8abf.slice/crio-a1d2f27f431f642368b15cff624b05dd9ae578c9e0f521e03312ab7873e45569 WatchSource:0}: Error finding container a1d2f27f431f642368b15cff624b05dd9ae578c9e0f521e03312ab7873e45569: Status 404 returned error can't find the container with id a1d2f27f431f642368b15cff624b05dd9ae578c9e0f521e03312ab7873e45569 Mar 12 13:37:22 crc kubenswrapper[4125]: I0312 13:37:22.902958 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:22 crc kubenswrapper[4125]: I0312 13:37:22.903015 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:23 crc kubenswrapper[4125]: E0312 13:37:23.036070 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.239706 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerStarted","Data":"7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a"} Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.243044 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" event={"ID":"8e7580cc-d14e-4069-ac2e-3bc1e5582fae","Type":"ContainerStarted","Data":"249a2a8171d8bdd35e3ae42b4c0fcd4f4755976f998a414d0940b501fafbf666"} Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.244495 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" event={"ID":"ab05ab4a-e996-434d-9d75-49e67b6f8abf","Type":"ContainerStarted","Data":"a1d2f27f431f642368b15cff624b05dd9ae578c9e0f521e03312ab7873e45569"} Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.268707 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-console/console-8568c59db8-fspjn"] Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.268883 4125 topology_manager.go:215] "Topology Admit Handler" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" podNamespace="openshift-console" podName="console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.269605 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.272279 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-ng44q" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.321477 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8568c59db8-fspjn"] Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.405974 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.406334 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.406417 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.406450 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.406487 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.406514 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.406562 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.413904 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" podStartSLOduration=7.413799441 podStartE2EDuration="7.413799441s" podCreationTimestamp="2026-03-12 13:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:23.405646646 +0000 UTC m=+1013.729032495" watchObservedRunningTime="2026-03-12 13:37:23.413799441 +0000 UTC m=+1013.737185450" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.508053 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.508127 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.508203 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.509473 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.509585 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.510925 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.511497 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.511600 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.511494 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.512390 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.516686 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.541617 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.550923 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.556207 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.596462 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.715343 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.715446 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.715451 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.715540 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.848207 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.848323 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.889003 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.889084 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.946258 4125 patch_prober.go:28] interesting pod/authentication-operator-7cc7ff75d5-g9qv8 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": EOF" start-of-body= Mar 12 13:37:23 crc kubenswrapper[4125]: I0312 13:37:23.946363 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": EOF" Mar 12 13:37:24 crc kubenswrapper[4125]: E0312 13:37:24.039091 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.257522 4125 generic.go:334] "Generic (PLEG): container finished" podID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" containerID="9352ce646adc0ab0c2ea7437c967dcebf4f8a1d8d5617ae7b2529767b9a18575" exitCode=0 Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.257631 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" event={"ID":"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e","Type":"ContainerDied","Data":"9352ce646adc0ab0c2ea7437c967dcebf4f8a1d8d5617ae7b2529767b9a18575"} Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.258279 4125 scope.go:117] "RemoveContainer" containerID="9352ce646adc0ab0c2ea7437c967dcebf4f8a1d8d5617ae7b2529767b9a18575" Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.265738 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.281502 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.886636 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:24 crc kubenswrapper[4125]: I0312 13:37:24.887256 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.116053 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8568c59db8-fspjn"] Mar 12 13:37:25 crc kubenswrapper[4125]: W0312 13:37:25.152327 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb223d47_9acc_4b0d_b8e8_39d4d9f3e4c4.slice/crio-26e0cd516e5fd4b2758683d44bd7c09d19653725714d2864f48199af64540503 WatchSource:0}: Error finding container 26e0cd516e5fd4b2758683d44bd7c09d19653725714d2864f48199af64540503: Status 404 returned error can't find the container with id 26e0cd516e5fd4b2758683d44bd7c09d19653725714d2864f48199af64540503 Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.277105 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" event={"ID":"ab05ab4a-e996-434d-9d75-49e67b6f8abf","Type":"ContainerStarted","Data":"0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8"} Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.277727 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.287086 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerStarted","Data":"331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f"} Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.294609 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" event={"ID":"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e","Type":"ContainerStarted","Data":"9a5d225c8e20fda3ecc4480ced29355b936e2fc341c0cd32b5652a7179f25af0"} Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.297891 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.302757 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8568c59db8-fspjn" event={"ID":"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4","Type":"ContainerStarted","Data":"26e0cd516e5fd4b2758683d44bd7c09d19653725714d2864f48199af64540503"} Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.460268 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" podStartSLOduration=9.460221477 podStartE2EDuration="9.460221477s" podCreationTimestamp="2026-03-12 13:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:25.388592176 +0000 UTC m=+1015.711978325" watchObservedRunningTime="2026-03-12 13:37:25.460221477 +0000 UTC m=+1015.783607386" Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.890723 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:25 crc kubenswrapper[4125]: I0312 13:37:25.890891 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:26 crc kubenswrapper[4125]: I0312 13:37:26.888758 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:26 crc kubenswrapper[4125]: I0312 13:37:26.889310 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:26 crc kubenswrapper[4125]: I0312 13:37:26.960754 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podStartSLOduration=10.960706524999999 podStartE2EDuration="10.960706525s" podCreationTimestamp="2026-03-12 13:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:25.669481522 +0000 UTC m=+1015.992867781" watchObservedRunningTime="2026-03-12 13:37:26.960706525 +0000 UTC m=+1017.284092424" Mar 12 13:37:26 crc kubenswrapper[4125]: I0312 13:37:26.964405 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-86594ff457-6b77x"] Mar 12 13:37:26 crc kubenswrapper[4125]: I0312 13:37:26.983303 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.266533 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-6fbd648f87-j4bk5"] Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.266907 4125 topology_manager.go:215] "Topology Admit Handler" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" podNamespace="openshift-image-registry" podName="image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.267596 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.322268 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8568c59db8-fspjn" event={"ID":"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4","Type":"ContainerStarted","Data":"61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1"} Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.334842 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-6fbd648f87-j4bk5"] Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.337218 4125 generic.go:334] "Generic (PLEG): container finished" podID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" containerID="5c274f02173d251858e04c2e1bef340f340e73ad687e5608c9f7f08857824fa2" exitCode=0 Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.337292 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerDied","Data":"5c274f02173d251858e04c2e1bef340f340e73ad687e5608c9f7f08857824fa2"} Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.337861 4125 scope.go:117] "RemoveContainer" containerID="5c274f02173d251858e04c2e1bef340f340e73ad687e5608c9f7f08857824fa2" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.360201 4125 generic.go:334] "Generic (PLEG): container finished" podID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" containerID="2da14cea2e9328cb16f7e4d671c9f21a6d2615667035cc26a4e4d0f634f80b82" exitCode=0 Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.360320 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" event={"ID":"43ae1c37-047b-4ee2-9fee-41e337dd4ac8","Type":"ContainerDied","Data":"2da14cea2e9328cb16f7e4d671c9f21a6d2615667035cc26a4e4d0f634f80b82"} Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.361042 4125 scope.go:117] "RemoveContainer" containerID="2da14cea2e9328cb16f7e4d671c9f21a6d2615667035cc26a4e4d0f634f80b82" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.457331 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.460910 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.461028 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.461184 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.461370 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.461453 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.461482 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.461622 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.562656 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.562788 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.562860 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.562895 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.562927 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.562980 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.563001 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.564309 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.565560 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.567361 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.586793 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.595774 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-console/console-8568c59db8-fspjn" podStartSLOduration=4.595533479 podStartE2EDuration="4.595533479s" podCreationTimestamp="2026-03-12 13:37:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:27.587125283 +0000 UTC m=+1017.910511212" watchObservedRunningTime="2026-03-12 13:37:27.595533479 +0000 UTC m=+1017.918919368" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.614015 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.661260 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.706558 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.886941 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.887310 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:27 crc kubenswrapper[4125]: I0312 13:37:27.902417 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:28 crc kubenswrapper[4125]: I0312 13:37:28.192684 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:28 crc kubenswrapper[4125]: I0312 13:37:28.885901 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:28 crc kubenswrapper[4125]: I0312 13:37:28.886020 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:28 crc kubenswrapper[4125]: I0312 13:37:28.941372 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:28 crc kubenswrapper[4125]: I0312 13:37:28.941484 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:29 crc kubenswrapper[4125]: E0312 13:37:29.036071 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:37:29 crc kubenswrapper[4125]: I0312 13:37:29.073682 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-fq4m9 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]log ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:37:29 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:37:29 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:37:29 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:37:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:29 crc kubenswrapper[4125]: I0312 13:37:29.073914 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:29 crc kubenswrapper[4125]: I0312 13:37:29.888474 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:29 crc kubenswrapper[4125]: I0312 13:37:29.889081 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:30 crc kubenswrapper[4125]: I0312 13:37:30.906324 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:30 crc kubenswrapper[4125]: I0312 13:37:30.906652 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:31 crc kubenswrapper[4125]: E0312 13:37:31.034641 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.431108 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.431644 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.431693 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.431759 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.431801 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.495756 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" event={"ID":"43ae1c37-047b-4ee2-9fee-41e337dd4ac8","Type":"ContainerStarted","Data":"b9c6cfbde7f0bf041540c0fd362582e7f949994201c9106e67327b0db098a92b"} Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.521915 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"9b5afeab4859b7029fc4b5a128c7340410d21e58df32e94508ec85146f811359"} Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.891173 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:31 crc kubenswrapper[4125]: I0312 13:37:31.891328 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:32 crc kubenswrapper[4125]: E0312 13:37:32.030301 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 12 13:37:32 crc kubenswrapper[4125]: I0312 13:37:32.214498 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-6fbd648f87-j4bk5"] Mar 12 13:37:32 crc kubenswrapper[4125]: I0312 13:37:32.542340 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" event={"ID":"5cad292d-912c-4787-a5fa-0ade98e731eb","Type":"ContainerStarted","Data":"95363646b7bacc0b12e9cc648925587d1ce416f8446344858795fb644c78b39f"} Mar 12 13:37:32 crc kubenswrapper[4125]: I0312 13:37:32.676921 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6f4598f687-mvg2d"] Mar 12 13:37:32 crc kubenswrapper[4125]: I0312 13:37:32.677199 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" podUID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" containerName="controller-manager" containerID="cri-o://249a2a8171d8bdd35e3ae42b4c0fcd4f4755976f998a414d0940b501fafbf666" gracePeriod=30 Mar 12 13:37:32 crc kubenswrapper[4125]: I0312 13:37:32.889787 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:32 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:32 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:32 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:32 crc kubenswrapper[4125]: I0312 13:37:32.890085 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:33 crc kubenswrapper[4125]: E0312 13:37:33.030093 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.298413 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg"] Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.310024 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" podUID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" containerName="route-controller-manager" containerID="cri-o://0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8" gracePeriod=30 Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.560396 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" event={"ID":"5cad292d-912c-4787-a5fa-0ade98e731eb","Type":"ContainerStarted","Data":"11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f"} Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.560669 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.568078 4125 generic.go:334] "Generic (PLEG): container finished" podID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" containerID="249a2a8171d8bdd35e3ae42b4c0fcd4f4755976f998a414d0940b501fafbf666" exitCode=0 Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.568136 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" event={"ID":"8e7580cc-d14e-4069-ac2e-3bc1e5582fae","Type":"ContainerDied","Data":"249a2a8171d8bdd35e3ae42b4c0fcd4f4755976f998a414d0940b501fafbf666"} Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.599773 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.600501 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.603700 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.603878 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.713369 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.713447 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.713926 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.714003 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.851498 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.851591 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.925123 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.925308 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.969001 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-fq4m9 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]log ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:37:33 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:37:33 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:37:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:33 crc kubenswrapper[4125]: I0312 13:37:33.969097 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:34 crc kubenswrapper[4125]: E0312 13:37:34.028279 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.229792 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.302959 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podStartSLOduration=7.302909055 podStartE2EDuration="7.302909055s" podCreationTimestamp="2026-03-12 13:37:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:33.642273616 +0000 UTC m=+1023.965659805" watchObservedRunningTime="2026-03-12 13:37:34.302909055 +0000 UTC m=+1024.626295034" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.352413 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab05ab4a-e996-434d-9d75-49e67b6f8abf-serving-cert\") pod \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.352489 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-config\") pod \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.352599 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjrsx\" (UniqueName: \"kubernetes.io/projected/ab05ab4a-e996-434d-9d75-49e67b6f8abf-kube-api-access-qjrsx\") pod \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.352641 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-client-ca\") pod \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\" (UID: \"ab05ab4a-e996-434d-9d75-49e67b6f8abf\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.353449 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-client-ca" (OuterVolumeSpecName: "client-ca") pod "ab05ab4a-e996-434d-9d75-49e67b6f8abf" (UID: "ab05ab4a-e996-434d-9d75-49e67b6f8abf"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.353469 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-config" (OuterVolumeSpecName: "config") pod "ab05ab4a-e996-434d-9d75-49e67b6f8abf" (UID: "ab05ab4a-e996-434d-9d75-49e67b6f8abf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.359860 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab05ab4a-e996-434d-9d75-49e67b6f8abf-kube-api-access-qjrsx" (OuterVolumeSpecName: "kube-api-access-qjrsx") pod "ab05ab4a-e996-434d-9d75-49e67b6f8abf" (UID: "ab05ab4a-e996-434d-9d75-49e67b6f8abf"). InnerVolumeSpecName "kube-api-access-qjrsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.360261 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab05ab4a-e996-434d-9d75-49e67b6f8abf-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ab05ab4a-e996-434d-9d75-49e67b6f8abf" (UID: "ab05ab4a-e996-434d-9d75-49e67b6f8abf"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.454145 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab05ab4a-e996-434d-9d75-49e67b6f8abf-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.454234 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.454252 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-qjrsx\" (UniqueName: \"kubernetes.io/projected/ab05ab4a-e996-434d-9d75-49e67b6f8abf-kube-api-access-qjrsx\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.454264 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ab05ab4a-e996-434d-9d75-49e67b6f8abf-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.575081 4125 generic.go:334] "Generic (PLEG): container finished" podID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" containerID="0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8" exitCode=0 Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.575197 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" event={"ID":"ab05ab4a-e996-434d-9d75-49e67b6f8abf","Type":"ContainerDied","Data":"0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8"} Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.575228 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" event={"ID":"ab05ab4a-e996-434d-9d75-49e67b6f8abf","Type":"ContainerDied","Data":"a1d2f27f431f642368b15cff624b05dd9ae578c9e0f521e03312ab7873e45569"} Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.575260 4125 scope.go:117] "RemoveContainer" containerID="0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.575368 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.580897 4125 generic.go:334] "Generic (PLEG): container finished" podID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" containerID="bf3cc5d384c697ede552b2e7310ef4955e8ac25586b981a98c7f89ae2248f131" exitCode=0 Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.581012 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" event={"ID":"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf","Type":"ContainerDied","Data":"bf3cc5d384c697ede552b2e7310ef4955e8ac25586b981a98c7f89ae2248f131"} Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.581538 4125 scope.go:117] "RemoveContainer" containerID="bf3cc5d384c697ede552b2e7310ef4955e8ac25586b981a98c7f89ae2248f131" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.594350 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" event={"ID":"8e7580cc-d14e-4069-ac2e-3bc1e5582fae","Type":"ContainerDied","Data":"80578baf9c99f7959f33fbb42d1680b74475f7b8ad9ab7e8a2a9124c547fa286"} Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.594411 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80578baf9c99f7959f33fbb42d1680b74475f7b8ad9ab7e8a2a9124c547fa286" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.617727 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.765115 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-serving-cert\") pod \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.765405 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-config\") pod \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.765467 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-proxy-ca-bundles\") pod \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.765549 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-client-ca\") pod \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.765600 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxdrp\" (UniqueName: \"kubernetes.io/projected/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-kube-api-access-pxdrp\") pod \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\" (UID: \"8e7580cc-d14e-4069-ac2e-3bc1e5582fae\") " Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.772097 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8e7580cc-d14e-4069-ac2e-3bc1e5582fae" (UID: "8e7580cc-d14e-4069-ac2e-3bc1e5582fae"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.772735 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-client-ca" (OuterVolumeSpecName: "client-ca") pod "8e7580cc-d14e-4069-ac2e-3bc1e5582fae" (UID: "8e7580cc-d14e-4069-ac2e-3bc1e5582fae"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.779637 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-config" (OuterVolumeSpecName: "config") pod "8e7580cc-d14e-4069-ac2e-3bc1e5582fae" (UID: "8e7580cc-d14e-4069-ac2e-3bc1e5582fae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.845758 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8e7580cc-d14e-4069-ac2e-3bc1e5582fae" (UID: "8e7580cc-d14e-4069-ac2e-3bc1e5582fae"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.849542 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-kube-api-access-pxdrp" (OuterVolumeSpecName: "kube-api-access-pxdrp") pod "8e7580cc-d14e-4069-ac2e-3bc1e5582fae" (UID: "8e7580cc-d14e-4069-ac2e-3bc1e5582fae"). InnerVolumeSpecName "kube-api-access-pxdrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.860580 4125 scope.go:117] "RemoveContainer" containerID="0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8" Mar 12 13:37:34 crc kubenswrapper[4125]: E0312 13:37:34.870135 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8\": container with ID starting with 0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8 not found: ID does not exist" containerID="0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.870235 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8"} err="failed to get container status \"0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8\": rpc error: code = NotFound desc = could not find container \"0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8\": container with ID starting with 0f5737fda4e4b227cbedd1ee6a1a4edf7c6cfd47c285f3be6eee48b65f44b8f8 not found: ID does not exist" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.872792 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.873258 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.873403 4125 reconciler_common.go:300] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.873418 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.873435 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-pxdrp\" (UniqueName: \"kubernetes.io/projected/8e7580cc-d14e-4069-ac2e-3bc1e5582fae-kube-api-access-pxdrp\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.897139 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:34 crc kubenswrapper[4125]: I0312 13:37:34.897331 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.301001 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9"] Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.301206 4125 topology_manager.go:215] "Topology Admit Handler" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: E0312 13:37:35.301401 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" containerName="route-controller-manager" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.301417 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" containerName="route-controller-manager" Mar 12 13:37:35 crc kubenswrapper[4125]: E0312 13:37:35.301437 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" containerName="controller-manager" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.301447 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" containerName="controller-manager" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.301582 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" containerName="route-controller-manager" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.301599 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" containerName="controller-manager" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.302043 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.339049 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.342195 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.342454 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.348543 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.348762 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.350007 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.453525 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9"] Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.461496 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg"] Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.498885 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5c8556d9cf-6l4wg"] Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.499952 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-client-ca\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.500334 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-serving-cert\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.500510 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58mvf\" (UniqueName: \"kubernetes.io/projected/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-kube-api-access-58mvf\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.500794 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-config\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.602135 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-58mvf\" (UniqueName: \"kubernetes.io/projected/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-kube-api-access-58mvf\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.602370 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-config\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.602422 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-client-ca\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.602448 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-serving-cert\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.602910 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-686c6c748c-qbnnr_9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7/kube-storage-version-migrator-operator/0.log" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.602994 4125 generic.go:334] "Generic (PLEG): container finished" podID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" containerID="690c3e41c88de001213b70a1c67447cbdf7b536c279ebd273a32e03268f91192" exitCode=1 Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.603179 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" event={"ID":"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7","Type":"ContainerDied","Data":"690c3e41c88de001213b70a1c67447cbdf7b536c279ebd273a32e03268f91192"} Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.603995 4125 scope.go:117] "RemoveContainer" containerID="690c3e41c88de001213b70a1c67447cbdf7b536c279ebd273a32e03268f91192" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.604876 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-config\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.606642 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-client-ca\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.608482 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f4598f687-mvg2d" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.651495 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-serving-cert\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.767558 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-58mvf\" (UniqueName: \"kubernetes.io/projected/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-kube-api-access-58mvf\") pod \"route-controller-manager-584c5db66f-kcmc9\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.886757 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.886960 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:35 crc kubenswrapper[4125]: I0312 13:37:35.929895 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:36 crc kubenswrapper[4125]: E0312 13:37:36.031443 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" Mar 12 13:37:36 crc kubenswrapper[4125]: E0312 13:37:36.031630 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:37:36 crc kubenswrapper[4125]: I0312 13:37:36.052244 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab05ab4a-e996-434d-9d75-49e67b6f8abf" path="/var/lib/kubelet/pods/ab05ab4a-e996-434d-9d75-49e67b6f8abf/volumes" Mar 12 13:37:36 crc kubenswrapper[4125]: I0312 13:37:36.731051 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6f4598f687-mvg2d"] Mar 12 13:37:36 crc kubenswrapper[4125]: I0312 13:37:36.751560 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6f4598f687-mvg2d"] Mar 12 13:37:36 crc kubenswrapper[4125]: I0312 13:37:36.889249 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:36 crc kubenswrapper[4125]: I0312 13:37:36.890135 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.240632 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7559d9b74c-lxhxw"] Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.241023 4125 topology_manager.go:215] "Topology Admit Handler" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" podNamespace="openshift-controller-manager" podName="controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.241745 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.254451 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.254624 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.259674 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.259959 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.260187 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-58g82" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.261788 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.262888 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-config\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.263092 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvxcs\" (UniqueName: \"kubernetes.io/projected/4e18989a-5a3c-4b45-8821-4b91287eaf1e-kube-api-access-fvxcs\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.263248 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-client-ca\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.263285 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-proxy-ca-bundles\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.263390 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e18989a-5a3c-4b45-8821-4b91287eaf1e-serving-cert\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.278252 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.353057 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7559d9b74c-lxhxw"] Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.365634 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-client-ca\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.367069 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-proxy-ca-bundles\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.367466 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e18989a-5a3c-4b45-8821-4b91287eaf1e-serving-cert\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.367691 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-config\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.367757 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fvxcs\" (UniqueName: \"kubernetes.io/projected/4e18989a-5a3c-4b45-8821-4b91287eaf1e-kube-api-access-fvxcs\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.368067 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-client-ca\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.369891 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-config\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.381568 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-proxy-ca-bundles\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.430715 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e18989a-5a3c-4b45-8821-4b91287eaf1e-serving-cert\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.449997 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvxcs\" (UniqueName: \"kubernetes.io/projected/4e18989a-5a3c-4b45-8821-4b91287eaf1e-kube-api-access-fvxcs\") pod \"controller-manager-7559d9b74c-lxhxw\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.582310 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.639726 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9"] Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.659141 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/installer-7-crc"] Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.659300 4125 topology_manager.go:215] "Topology Admit Handler" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" podNamespace="openshift-kube-scheduler" podName="installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.660255 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.670296 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler"/"kube-root-ca.crt" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.670556 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler"/"installer-sa-dockercfg-9ln8g" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.678077 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" event={"ID":"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf","Type":"ContainerStarted","Data":"593aacabc70ebe73983ec5939ded125b308cf49e6217d48e00aa691dc424737b"} Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.688141 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-686c6c748c-qbnnr_9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7/kube-storage-version-migrator-operator/0.log" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.688334 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" event={"ID":"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7","Type":"ContainerStarted","Data":"3f1fdef3a940c729422da41873d6e80634d146ef15a783470690bdf2cb2ee1f2"} Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.691450 4125 generic.go:334] "Generic (PLEG): container finished" podID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" containerID="01904e9caa36dbc7772b537a148f3270c1b6a855aab806556aac5544f9540dc2" exitCode=0 Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.691498 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" event={"ID":"0b5c38ff-1fa8-4219-994d-15776acd4a4d","Type":"ContainerDied","Data":"01904e9caa36dbc7772b537a148f3270c1b6a855aab806556aac5544f9540dc2"} Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.692227 4125 scope.go:117] "RemoveContainer" containerID="01904e9caa36dbc7772b537a148f3270c1b6a855aab806556aac5544f9540dc2" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.712214 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/installer-7-crc"] Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.782208 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba116478-01f2-47d9-8b88-9db94f1478e3-kube-api-access\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.782354 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-kubelet-dir\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.782417 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-var-lock\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.887915 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba116478-01f2-47d9-8b88-9db94f1478e3-kube-api-access\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.888023 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-kubelet-dir\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.888087 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-var-lock\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.888228 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-var-lock\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.888283 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-kubelet-dir\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.892326 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.892446 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:37 crc kubenswrapper[4125]: I0312 13:37:37.984596 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba116478-01f2-47d9-8b88-9db94f1478e3-kube-api-access\") pod \"installer-7-crc\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:38 crc kubenswrapper[4125]: I0312 13:37:38.012987 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:37:38 crc kubenswrapper[4125]: I0312 13:37:38.092006 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e7580cc-d14e-4069-ac2e-3bc1e5582fae" path="/var/lib/kubelet/pods/8e7580cc-d14e-4069-ac2e-3bc1e5582fae/volumes" Mar 12 13:37:38 crc kubenswrapper[4125]: I0312 13:37:38.889916 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:38 crc kubenswrapper[4125]: I0312 13:37:38.891600 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:38 crc kubenswrapper[4125]: I0312 13:37:38.961286 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:39 crc kubenswrapper[4125]: I0312 13:37:39.020595 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:39 crc kubenswrapper[4125]: I0312 13:37:39.035041 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:37:39 crc kubenswrapper[4125]: I0312 13:37:39.067954 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" event={"ID":"dd2f98e4-4656-4c95-8c6f-5959bd9f876a","Type":"ContainerStarted","Data":"9a4668c1cadfef1e1cfb5c60fd3d67f78983fa21555193a1cb4c00a4ae2f6ecc"} Mar 12 13:37:39 crc kubenswrapper[4125]: I0312 13:37:39.897063 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:39 crc kubenswrapper[4125]: I0312 13:37:39.897489 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:40 crc kubenswrapper[4125]: I0312 13:37:40.901536 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:40 crc kubenswrapper[4125]: I0312 13:37:40.902378 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:41 crc kubenswrapper[4125]: I0312 13:37:41.905289 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:41 crc kubenswrapper[4125]: I0312 13:37:41.925258 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:42 crc kubenswrapper[4125]: I0312 13:37:42.897467 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:42 crc kubenswrapper[4125]: I0312 13:37:42.897873 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.305989 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" event={"ID":"dd2f98e4-4656-4c95-8c6f-5959bd9f876a","Type":"ContainerStarted","Data":"e5e4ae2e28c6d0c65895c9e36ea602043bbd8bda8e4978a7f740eb0d6f142453"} Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.307681 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.328450 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" start-of-body= Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.328551 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.423616 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerName="registry" containerID="cri-o://1e2fec87605c0f6f17be5fe9f57263de9e9c959c83ac27c370eb1927d6b59791" gracePeriod=30 Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.423891 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podStartSLOduration=10.423785204 podStartE2EDuration="10.423785204s" podCreationTimestamp="2026-03-12 13:37:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:43.415377659 +0000 UTC m=+1033.738763798" watchObservedRunningTime="2026-03-12 13:37:43.423785204 +0000 UTC m=+1033.747171113" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.598594 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.598688 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.651656 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7559d9b74c-lxhxw"] Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.713783 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.713887 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.714018 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.714275 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.714360 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.720761 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.720897 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.727517 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"ac3dfa4ed7931e5462fcdd7627e4282ba7aa0fd1c33cc8f485d6bfc2ea90ad2b"} pod="openshift-console/downloads-65476884b9-9wcvx" containerMessage="Container download-server failed liveness probe, will be restarted" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.727602 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" containerID="cri-o://ac3dfa4ed7931e5462fcdd7627e4282ba7aa0fd1c33cc8f485d6bfc2ea90ad2b" gracePeriod=2 Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.851462 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.851556 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.890443 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:43 crc kubenswrapper[4125]: I0312 13:37:43.890519 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:44 crc kubenswrapper[4125]: E0312 13:37:44.039752 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:37:44 crc kubenswrapper[4125]: E0312 13:37:44.054060 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.16\\\"\"" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.213968 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.214730 4125 dynamic_cafile_content.go:119] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.391709 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.405598 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" event={"ID":"0b5c38ff-1fa8-4219-994d-15776acd4a4d","Type":"ContainerStarted","Data":"73004d96ae50972445d8337bf850d2634dd12588ebd02fa1a6b599770000c3b3"} Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.432305 4125 generic.go:334] "Generic (PLEG): container finished" podID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerID="1e2fec87605c0f6f17be5fe9f57263de9e9c959c83ac27c370eb1927d6b59791" exitCode=0 Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.432385 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" event={"ID":"42b6a393-6194-4620-bf8f-7e4b6cbe5679","Type":"ContainerDied","Data":"1e2fec87605c0f6f17be5fe9f57263de9e9c959c83ac27c370eb1927d6b59791"} Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.447711 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" event={"ID":"4e18989a-5a3c-4b45-8821-4b91287eaf1e","Type":"ContainerStarted","Data":"f23e82f3c0b766599813e9145ee071e0b4440155ed5f6c21aaf40d5562190d4e"} Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.889898 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.890398 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.921397 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:37:44 crc kubenswrapper[4125]: I0312 13:37:44.955634 4125 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","poddc41379b-41a8-497f-8ac6-4ee19454d1d2"] err="unable to destroy cgroup paths for cgroup [kubepods poddc41379b-41a8-497f-8ac6-4ee19454d1d2] : Timed out while waiting for systemd to remove kubepods-poddc41379b_41a8_497f_8ac6_4ee19454d1d2.slice" Mar 12 13:37:44 crc kubenswrapper[4125]: E0312 13:37:44.955725 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods poddc41379b-41a8-497f-8ac6-4ee19454d1d2] : unable to destroy cgroup paths for cgroup [kubepods poddc41379b-41a8-497f-8ac6-4ee19454d1d2] : Timed out while waiting for systemd to remove kubepods-poddc41379b_41a8_497f_8ac6_4ee19454d1d2.slice" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podUID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.038716 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/installer-7-crc"] Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.145990 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" containerID="cri-o://586e67cdf44721522bdaa46be7afee74d9457d02a4b95231987aca3f6a4df542" gracePeriod=15 Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.302264 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.893271 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.893366 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.896360 4125 generic.go:334] "Generic (PLEG): container finished" podID="6268b7fe-8910-4505-b404-6f1df638105c" containerID="ac3dfa4ed7931e5462fcdd7627e4282ba7aa0fd1c33cc8f485d6bfc2ea90ad2b" exitCode=0 Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.896456 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerDied","Data":"ac3dfa4ed7931e5462fcdd7627e4282ba7aa0fd1c33cc8f485d6bfc2ea90ad2b"} Mar 12 13:37:45 crc kubenswrapper[4125]: I0312 13:37:45.896492 4125 scope.go:117] "RemoveContainer" containerID="44ab1d97ab605ba35233246ae3683b740f13717f9a0e595713bb0d587b972519" Mar 12 13:37:46 crc kubenswrapper[4125]: I0312 13:37:46.879126 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-7-crc" event={"ID":"ba116478-01f2-47d9-8b88-9db94f1478e3","Type":"ContainerStarted","Data":"c9bc6e6741a6359131ee7e1dcdf9c466f61a3de0e373186c1523b015b1ed4390"} Mar 12 13:37:46 crc kubenswrapper[4125]: I0312 13:37:46.879511 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 12 13:37:46 crc kubenswrapper[4125]: I0312 13:37:46.919090 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:46 crc kubenswrapper[4125]: I0312 13:37:46.919199 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:47 crc kubenswrapper[4125]: E0312 13:37:47.109274 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.116985 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-apiserver/apiserver-67cbf64bc9-fq4m9"] Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.117248 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" containerID="cri-o://7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a" gracePeriod=90 Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.117554 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver-check-endpoints" containerID="cri-o://331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f" gracePeriod=90 Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.378922 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.534252 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/42b6a393-6194-4620-bf8f-7e4b6cbe5679-installation-pull-secrets\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.534532 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f9ss\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-kube-api-access-4f9ss\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.535039 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-trusted-ca\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.536050 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.536092 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-bound-sa-token\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.536147 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-tls\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.536222 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/42b6a393-6194-4620-bf8f-7e4b6cbe5679-ca-trust-extracted\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.536982 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-certificates\") pod \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\" (UID: \"42b6a393-6194-4620-bf8f-7e4b6cbe5679\") " Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.538335 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.542825 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-trusted-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.556776 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42b6a393-6194-4620-bf8f-7e4b6cbe5679-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.564064 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.607904 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.610298 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.610757 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-kube-api-access-4f9ss" (OuterVolumeSpecName: "kube-api-access-4f9ss") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "kube-api-access-4f9ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.611545 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b6a393-6194-4620-bf8f-7e4b6cbe5679-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.656336 4125 reconciler_common.go:300] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-tls\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.656399 4125 reconciler_common.go:300] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/42b6a393-6194-4620-bf8f-7e4b6cbe5679-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.656414 4125 reconciler_common.go:300] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/42b6a393-6194-4620-bf8f-7e4b6cbe5679-registry-certificates\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.656431 4125 reconciler_common.go:300] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/42b6a393-6194-4620-bf8f-7e4b6cbe5679-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.656447 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-4f9ss\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-kube-api-access-4f9ss\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.656458 4125 reconciler_common.go:300] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/42b6a393-6194-4620-bf8f-7e4b6cbe5679-bound-sa-token\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.700596 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.899336 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:47 crc kubenswrapper[4125]: I0312 13:37:47.899604 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.038216 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (OuterVolumeSpecName: "registry-storage") pod "42b6a393-6194-4620-bf8f-7e4b6cbe5679" (UID: "42b6a393-6194-4620-bf8f-7e4b6cbe5679"). InnerVolumeSpecName "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.240122 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.262896 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" event={"ID":"4e18989a-5a3c-4b45-8821-4b91287eaf1e","Type":"ContainerStarted","Data":"cfeb08a81fd6a8dca2eb53f6d7ae1bc9af487d431a775664a7d4cf86980cf131"} Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.265442 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.281972 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.386143 4125 generic.go:334] "Generic (PLEG): container finished" podID="13ad7555-5f28-4555-a563-892713a8433a" containerID="586e67cdf44721522bdaa46be7afee74d9457d02a4b95231987aca3f6a4df542" exitCode=0 Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.386304 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" event={"ID":"13ad7555-5f28-4555-a563-892713a8433a","Type":"ContainerDied","Data":"586e67cdf44721522bdaa46be7afee74d9457d02a4b95231987aca3f6a4df542"} Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.386654 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" podStartSLOduration=16.386587286 podStartE2EDuration="16.386587286s" podCreationTimestamp="2026-03-12 13:37:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:48.374427673 +0000 UTC m=+1038.697813842" watchObservedRunningTime="2026-03-12 13:37:48.386587286 +0000 UTC m=+1038.709973175" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.470789 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-7dc8587b5-4h2pb"] Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.554112 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" event={"ID":"42b6a393-6194-4620-bf8f-7e4b6cbe5679","Type":"ContainerDied","Data":"f1600462c36204cb188fbb0af281410ce327126b920a08415a23e48fa5d77c6b"} Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.554159 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.554238 4125 scope.go:117] "RemoveContainer" containerID="1e2fec87605c0f6f17be5fe9f57263de9e9c959c83ac27c370eb1927d6b59791" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.898458 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.899056 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.990963 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-fq4m9 container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]log ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]etcd-readiness ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]informer-sync ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:37:48 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:37:48 crc kubenswrapper[4125]: [-]shutdown failed: reason withheld Mar 12 13:37:48 crc kubenswrapper[4125]: readyz check failed Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.991312 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:48 crc kubenswrapper[4125]: I0312 13:37:48.991413 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.302579 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.303131 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager" containerID="cri-o://5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" gracePeriod=30 Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.303480 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" containerID="cri-o://24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" gracePeriod=30 Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.303518 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-cert-syncer" containerID="cri-o://f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" gracePeriod=30 Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.303646 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-recovery-controller" containerID="cri-o://fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" gracePeriod=30 Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305475 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305655 4125 topology_manager.go:215] "Topology Admit Handler" podUID="94e8a39ea660d88d01c6db5ba5e6d884" podNamespace="openshift-kube-controller-manager" podName="kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.305890 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-cert-syncer" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305907 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-cert-syncer" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.305920 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305927 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.305939 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305948 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.305973 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305981 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.305991 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.305998 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306011 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306019 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306029 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-recovery-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306037 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-recovery-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306051 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerName="registry" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306059 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerName="registry" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306072 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306079 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306228 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306243 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306252 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerName="registry" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306261 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-cert-syncer" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306274 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306282 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306291 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager-recovery-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306301 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306311 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="kube-controller-manager" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306421 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306430 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306439 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306446 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: E0312 13:37:49.306458 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306464 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306587 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306605 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.306802 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerName="cluster-policy-controller" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.413063 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.413256 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.518701 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.518816 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.518994 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.519106 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.601255 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-7cbd5666ff-bbfrf"] Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.692121 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-7cbd5666ff-bbfrf"] Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.905327 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:49 crc kubenswrapper[4125]: I0312 13:37:49.905765 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.095042 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" path="/var/lib/kubelet/pods/42b6a393-6194-4620-bf8f-7e4b6cbe5679/volumes" Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.154306 4125 generic.go:334] "Generic (PLEG): container finished" podID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerID="331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f" exitCode=0 Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.154432 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerDied","Data":"331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f"} Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.474500 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.474585 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.895081 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:50 crc kubenswrapper[4125]: I0312 13:37:50.895446 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.061185 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/6.log" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.061613 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.127644 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/kube-controller-manager-cert-syncer/0.log" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133650 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133704 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133733 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133789 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13ad7555-5f28-4555-a563-892713a8433a-audit-dir\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133819 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133909 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133945 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.133994 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.134018 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.134058 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.141764 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.141817 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.141915 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.141957 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") pod \"13ad7555-5f28-4555-a563-892713a8433a\" (UID: \"13ad7555-5f28-4555-a563-892713a8433a\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.144066 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.144738 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/13ad7555-5f28-4555-a563-892713a8433a-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.171908 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.172671 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.172924 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.175306 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.175762 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.216546 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.217467 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.224013 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.224735 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68" (OuterVolumeSpecName: "kube-api-access-w4r68") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "kube-api-access-w4r68". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.225255 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.225620 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.226244 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.227252 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "13ad7555-5f28-4555-a563-892713a8433a" (UID: "13ad7555-5f28-4555-a563-892713a8433a"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.239508 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-controller-manager/kube-controller-manager-crc" oldPodUID="2eb2b200bca0d10cf0fe16fb7c0caf80" podUID="94e8a39ea660d88d01c6db5ba5e6d884" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243345 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2eb2b200bca0d10cf0fe16fb7c0caf80-resource-dir\") pod \"2eb2b200bca0d10cf0fe16fb7c0caf80\" (UID: \"2eb2b200bca0d10cf0fe16fb7c0caf80\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243488 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2eb2b200bca0d10cf0fe16fb7c0caf80-cert-dir\") pod \"2eb2b200bca0d10cf0fe16fb7c0caf80\" (UID: \"2eb2b200bca0d10cf0fe16fb7c0caf80\") " Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243771 4125 reconciler_common.go:300] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13ad7555-5f28-4555-a563-892713a8433a-audit-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243797 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243817 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243877 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243893 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-w4r68\" (UniqueName: \"kubernetes.io/projected/13ad7555-5f28-4555-a563-892713a8433a-kube-api-access-w4r68\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243909 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243921 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243934 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243946 4125 reconciler_common.go:300] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-audit-policies\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243957 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243970 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243983 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.243996 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.244008 4125 reconciler_common.go:300] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13ad7555-5f28-4555-a563-892713a8433a-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.244055 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2eb2b200bca0d10cf0fe16fb7c0caf80-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "2eb2b200bca0d10cf0fe16fb7c0caf80" (UID: "2eb2b200bca0d10cf0fe16fb7c0caf80"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.244081 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2eb2b200bca0d10cf0fe16fb7c0caf80-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "2eb2b200bca0d10cf0fe16fb7c0caf80" (UID: "2eb2b200bca0d10cf0fe16fb7c0caf80"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.346048 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2eb2b200bca0d10cf0fe16fb7c0caf80-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.346334 4125 reconciler_common.go:300] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2eb2b200bca0d10cf0fe16fb7c0caf80-cert-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.853891 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-7-crc" event={"ID":"ba116478-01f2-47d9-8b88-9db94f1478e3","Type":"ContainerStarted","Data":"3720ac3cffdd9fcb2d6fefed93776a8424b4a6f80e62440cd3007049b0610069"} Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.898275 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.898385 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:51 crc kubenswrapper[4125]: I0312 13:37:51.932774 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-scheduler/installer-7-crc" podStartSLOduration=14.932735015 podStartE2EDuration="14.932735015s" podCreationTimestamp="2026-03-12 13:37:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:37:51.911358733 +0000 UTC m=+1042.234744622" watchObservedRunningTime="2026-03-12 13:37:51.932735015 +0000 UTC m=+1042.256120934" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.051052 4125 patch_prober.go:28] interesting pod/image-registry-7cbd5666ff-bbfrf container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.38:5000/healthz\": dial tcp 10.217.0.38:5000: i/o timeout" start-of-body= Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.051137 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-7cbd5666ff-bbfrf" podUID="42b6a393-6194-4620-bf8f-7e4b6cbe5679" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.38:5000/healthz\": dial tcp 10.217.0.38:5000: i/o timeout" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.180488 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2eb2b200bca0d10cf0fe16fb7c0caf80" path="/var/lib/kubelet/pods/2eb2b200bca0d10cf0fe16fb7c0caf80/volumes" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.183727 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-10-crc"] Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.183790 4125 topology_manager.go:215] "Topology Admit Handler" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: E0312 13:37:52.184258 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.184275 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.184490 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="13ad7555-5f28-4555-a563-892713a8433a" containerName="oauth-openshift" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.187554 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-10-crc"] Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.188621 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.277984 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/410dbf23-e4f3-4307-910c-ad0a079c33e2-kube-api-access\") pod \"revision-pruner-10-crc\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.278350 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/410dbf23-e4f3-4307-910c-ad0a079c33e2-kubelet-dir\") pod \"revision-pruner-10-crc\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.369404 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/cluster-policy-controller/6.log" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.379405 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/410dbf23-e4f3-4307-910c-ad0a079c33e2-kubelet-dir\") pod \"revision-pruner-10-crc\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.379611 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/410dbf23-e4f3-4307-910c-ad0a079c33e2-kube-api-access\") pod \"revision-pruner-10-crc\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.380137 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/410dbf23-e4f3-4307-910c-ad0a079c33e2-kubelet-dir\") pod \"revision-pruner-10-crc\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.464136 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-image-registry/image-registry-86594ff457-6b77x" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" containerName="registry" containerID="cri-o://573365d61ad5859a16302f5bf7be8220308463d7183d6af3dd120eb68123e736" gracePeriod=30 Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.527928 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_2eb2b200bca0d10cf0fe16fb7c0caf80/kube-controller-manager-cert-syncer/0.log" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.756174 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" exitCode=0 Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.756289 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" exitCode=0 Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.756332 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" exitCode=2 Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.756350 4125 generic.go:334] "Generic (PLEG): container finished" podID="2eb2b200bca0d10cf0fe16fb7c0caf80" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" exitCode=0 Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.756803 4125 scope.go:117] "RemoveContainer" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.757021 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.918154 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:52 crc kubenswrapper[4125]: I0312 13:37:52.918336 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.022020 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-controller-manager/kube-controller-manager-crc" oldPodUID="2eb2b200bca0d10cf0fe16fb7c0caf80" podUID="94e8a39ea660d88d01c6db5ba5e6d884" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.030918 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/410dbf23-e4f3-4307-910c-ad0a079c33e2-kube-api-access\") pod \"revision-pruner-10-crc\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.064603 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.065722 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-765b47f944-n2lhl" event={"ID":"13ad7555-5f28-4555-a563-892713a8433a","Type":"ContainerDied","Data":"ae07e756a7d12ed057ca356f074f3bef9b7c944c57f3e10700d1db5545a7d68d"} Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.099472 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.099786 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.099986 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.100108 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.134961 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.352030 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-765b47f944-n2lhl"] Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.412596 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-765b47f944-n2lhl"] Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.521498 4125 generic.go:334] "Generic (PLEG): container finished" podID="972b3cfd-f9d0-485e-924b-b5258282d155" containerID="668ae8f960878d1d1497a68b5fc115baff849ced6ae4a2760ec901bdbacfc479" exitCode=0 Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.521720 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-9-crc" event={"ID":"972b3cfd-f9d0-485e-924b-b5258282d155","Type":"ContainerDied","Data":"668ae8f960878d1d1497a68b5fc115baff849ced6ae4a2760ec901bdbacfc479"} Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.599363 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.600072 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.713469 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.713495 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.713674 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.713750 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.713874 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.847260 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.847335 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.888688 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:53 crc kubenswrapper[4125]: I0312 13:37:53.888757 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.037812 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-fq4m9 container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]log ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]etcd-readiness ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]informer-sync ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:37:54 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:37:54 crc kubenswrapper[4125]: [-]shutdown failed: reason withheld Mar 12 13:37:54 crc kubenswrapper[4125]: readyz check failed Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.037957 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.160956 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.161024 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.164035 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13ad7555-5f28-4555-a563-892713a8433a" path="/var/lib/kubelet/pods/13ad7555-5f28-4555-a563-892713a8433a/volumes" Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.167536 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"903cc09b29eb6b57a4de9a646e0d2d20b91d23eac8fd8cd6470da91e14b35e89"} Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.895918 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:54 crc kubenswrapper[4125]: I0312 13:37:54.896305 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:55 crc kubenswrapper[4125]: I0312 13:37:55.785613 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/0.log" Mar 12 13:37:55 crc kubenswrapper[4125]: I0312 13:37:55.786098 4125 generic.go:334] "Generic (PLEG): container finished" podID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerID="94803870c39c663aac3c4df56ed06883072a310c09c31b80ac8c3f4c99915832" exitCode=1 Mar 12 13:37:55 crc kubenswrapper[4125]: I0312 13:37:55.786467 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerDied","Data":"94803870c39c663aac3c4df56ed06883072a310c09c31b80ac8c3f4c99915832"} Mar 12 13:37:55 crc kubenswrapper[4125]: I0312 13:37:55.787961 4125 scope.go:117] "RemoveContainer" containerID="94803870c39c663aac3c4df56ed06883072a310c09c31b80ac8c3f4c99915832" Mar 12 13:37:55 crc kubenswrapper[4125]: I0312 13:37:55.890212 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:55 crc kubenswrapper[4125]: I0312 13:37:55.890440 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.075736 4125 generic.go:334] "Generic (PLEG): container finished" podID="8d14510a-ac3d-4029-ae28-538bb2e94e32" containerID="573365d61ad5859a16302f5bf7be8220308463d7183d6af3dd120eb68123e736" exitCode=0 Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.078709 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.078757 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.563732 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6499cf79cf-qdfbh"] Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.564097 4125 topology_manager.go:215] "Topology Admit Handler" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" podNamespace="openshift-authentication" podName="oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.564949 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-86594ff457-6b77x" event={"ID":"8d14510a-ac3d-4029-ae28-538bb2e94e32","Type":"ContainerDied","Data":"573365d61ad5859a16302f5bf7be8220308463d7183d6af3dd120eb68123e736"} Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.565117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.573576 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.573707 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.577177 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.577366 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.585028 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.585290 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.585520 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.585673 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.585108 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.586194 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.587464 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-6sd5l" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.600630 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.608909 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687078 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687234 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687275 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687305 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687359 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687402 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687450 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-dir\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687491 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687545 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687611 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687656 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687758 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687803 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.687956 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.759574 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798316 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798414 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798457 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798508 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798542 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798571 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798611 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798645 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798675 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798704 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798754 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798791 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798957 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-dir\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.798999 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.803986 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.833356 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.842977 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.846376 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.846538 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-dir\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.860548 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.895184 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.909731 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.922934 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.923098 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.967939 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.995667 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.996236 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.996517 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:56 crc kubenswrapper[4125]: I0312 13:37:56.996723 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.005717 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.212625 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6499cf79cf-qdfbh"] Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.391763 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.396349 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/installer-9-crc" podUID="35c093da-a468-44a1-8ff0-09b09268828c" containerName="installer" containerID="cri-o://a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d" gracePeriod=30 Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.465454 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.496523 4125 patch_prober.go:28] interesting pod/image-registry-86594ff457-6b77x container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.44:5000/healthz\": dial tcp 10.217.0.44:5000: connect: connection refused" start-of-body= Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.507359 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-86594ff457-6b77x" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.44:5000/healthz\": dial tcp 10.217.0.44:5000: connect: connection refused" Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.541641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.908522 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:57 crc kubenswrapper[4125]: I0312 13:37:57.908889 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.272090 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.309484 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.580772 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-10-crc"] Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.583075 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.583313 4125 topology_manager.go:215] "Topology Admit Handler" podUID="bf055e84f32193b9c1c21b0c34a61f01" podNamespace="openshift-kube-apiserver" podName="kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.584510 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.584716 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.585221 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.585558 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver" containerID="cri-o://8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" gracePeriod=15 Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.585675 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" containerID="cri-o://1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf" gracePeriod=15 Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.585713 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" gracePeriod=15 Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.585776 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45" gracePeriod=15 Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.585929 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-syncer" containerID="cri-o://5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" gracePeriod=15 Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586244 4125 topology_manager.go:215] "Topology Admit Handler" podUID="48128e8d38b5cbcd2691da698bd9cac3" podNamespace="openshift-kube-apiserver" podName="kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586450 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586463 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586473 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586481 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586490 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586498 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586508 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586515 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586524 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586532 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586541 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586551 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586564 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-syncer" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586571 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-syncer" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586580 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586588 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586598 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-insecure-readyz" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586605 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-insecure-readyz" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586619 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-regeneration-controller" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586627 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-regeneration-controller" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.586636 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="setup" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586643 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="setup" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586753 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586764 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586774 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-insecure-readyz" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586785 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586796 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586804 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-regeneration-controller" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586863 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-cert-syncer" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586880 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.586891 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.587013 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.587022 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: E0312 13:37:58.587034 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.587041 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.587138 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.587176 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.587197 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="53c1db1508241fbac1bedf9130341ffe" containerName="kube-apiserver-check-endpoints" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.601786 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.601891 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.601950 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.602100 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.602137 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.625275 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="53c1db1508241fbac1bedf9130341ffe" podUID="48128e8d38b5cbcd2691da698bd9cac3" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.704280 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.706018 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.707229 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.707577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.708478 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.708617 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.708766 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.709030 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.709223 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.709396 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.709488 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.709543 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.709572 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.769233 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.793399 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.811903 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.812004 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.812052 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.812236 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.812328 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.812371 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.903128 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.903248 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.985213 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-fq4m9 container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]log ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]etcd-readiness ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]informer-sync ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:37:58 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:37:58 crc kubenswrapper[4125]: [-]shutdown failed: reason withheld Mar 12 13:37:58 crc kubenswrapper[4125]: readyz check failed Mar 12 13:37:58 crc kubenswrapper[4125]: I0312 13:37:58.985308 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.728549 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.862641 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-kubelet-dir\") pod \"972b3cfd-f9d0-485e-924b-b5258282d155\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.862787 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/972b3cfd-f9d0-485e-924b-b5258282d155-kube-api-access\") pod \"972b3cfd-f9d0-485e-924b-b5258282d155\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.862893 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-var-lock\") pod \"972b3cfd-f9d0-485e-924b-b5258282d155\" (UID: \"972b3cfd-f9d0-485e-924b-b5258282d155\") " Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.863393 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-var-lock" (OuterVolumeSpecName: "var-lock") pod "972b3cfd-f9d0-485e-924b-b5258282d155" (UID: "972b3cfd-f9d0-485e-924b-b5258282d155"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.863415 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "972b3cfd-f9d0-485e-924b-b5258282d155" (UID: "972b3cfd-f9d0-485e-924b-b5258282d155"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.903548 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:37:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:37:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:37:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.905049 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.921948 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/972b3cfd-f9d0-485e-924b-b5258282d155-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "972b3cfd-f9d0-485e-924b-b5258282d155" (UID: "972b3cfd-f9d0-485e-924b-b5258282d155"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.966589 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.966648 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/972b3cfd-f9d0-485e-924b-b5258282d155-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:37:59 crc kubenswrapper[4125]: I0312 13:37:59.966663 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/972b3cfd-f9d0-485e-924b-b5258282d155-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.030146 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.082727 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="df02f99a-b4f8-4711-aedf-964dcb4d3400" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.083002 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="df02f99a-b4f8-4711-aedf-964dcb4d3400" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.150094 4125 kubelet.go:1922] "Deleted mirror pod because it is outdated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.161939 4125 generic.go:334] "Generic (PLEG): container finished" podID="cc291782-27d2-4a74-af79-c7dcb31535d2" containerID="33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86" exitCode=0 Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.188214 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.437471 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.437527 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.437547 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.437578 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerDied","Data":"33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86"} Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.438016 4125 scope.go:117] "RemoveContainer" containerID="33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.443480 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.444209 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:00 crc kubenswrapper[4125]: E0312 13:38:00.651123 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.772406 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-10-crc" event={"ID":"410dbf23-e4f3-4307-910c-ad0a079c33e2","Type":"ContainerStarted","Data":"dd1c832419b5b04fba347b193c61e0cfe421961c8ceb49df83b3c5b58fdc5c38"} Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.891389 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:00 crc kubenswrapper[4125]: I0312 13:38:00.891748 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.546002 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.547542 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.557586 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.558526 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.737944 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738312 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8d14510a-ac3d-4029-ae28-538bb2e94e32-ca-trust-extracted\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738357 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-trusted-ca\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738386 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-tls\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738409 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-bound-sa-token\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738431 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2zvk\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-kube-api-access-r2zvk\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738457 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-certificates\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.738481 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8d14510a-ac3d-4029-ae28-538bb2e94e32-installation-pull-secrets\") pod \"8d14510a-ac3d-4029-ae28-538bb2e94e32\" (UID: \"8d14510a-ac3d-4029-ae28-538bb2e94e32\") " Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.759117 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d14510a-ac3d-4029-ae28-538bb2e94e32-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.760363 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.761452 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.830754 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d14510a-ac3d-4029-ae28-538bb2e94e32-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.831099 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-kube-api-access-r2zvk" (OuterVolumeSpecName: "kube-api-access-r2zvk") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "kube-api-access-r2zvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.831275 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.841057 4125 reconciler_common.go:300] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8d14510a-ac3d-4029-ae28-538bb2e94e32-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.841269 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-trusted-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.841290 4125 reconciler_common.go:300] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-tls\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.841325 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-r2zvk\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-kube-api-access-r2zvk\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.841337 4125 reconciler_common.go:300] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8d14510a-ac3d-4029-ae28-538bb2e94e32-registry-certificates\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.841351 4125 reconciler_common.go:300] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8d14510a-ac3d-4029-ae28-538bb2e94e32-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.844337 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (OuterVolumeSpecName: "registry-storage") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.847806 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8d14510a-ac3d-4029-ae28-538bb2e94e32" (UID: "8d14510a-ac3d-4029-ae28-538bb2e94e32"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.891979 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.892066 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:01 crc kubenswrapper[4125]: I0312 13:38:01.951022 4125 reconciler_common.go:300] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8d14510a-ac3d-4029-ae28-538bb2e94e32-bound-sa-token\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.030932 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.037892 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.039762 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.664257 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.686449 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.688347 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.701521 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35c093da-a468-44a1-8ff0-09b09268828c-kube-api-access\") pod \"35c093da-a468-44a1-8ff0-09b09268828c\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.701581 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-var-lock\") pod \"35c093da-a468-44a1-8ff0-09b09268828c\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.701628 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-kubelet-dir\") pod \"35c093da-a468-44a1-8ff0-09b09268828c\" (UID: \"35c093da-a468-44a1-8ff0-09b09268828c\") " Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.702279 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "35c093da-a468-44a1-8ff0-09b09268828c" (UID: "35c093da-a468-44a1-8ff0-09b09268828c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.705320 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-var-lock" (OuterVolumeSpecName: "var-lock") pod "35c093da-a468-44a1-8ff0-09b09268828c" (UID: "35c093da-a468-44a1-8ff0-09b09268828c"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.715994 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.743381 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.744623 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c093da-a468-44a1-8ff0-09b09268828c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "35c093da-a468-44a1-8ff0-09b09268828c" (UID: "35c093da-a468-44a1-8ff0-09b09268828c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.799521 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/6.log" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.807726 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35c093da-a468-44a1-8ff0-09b09268828c-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.807885 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.807901 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/35c093da-a468-44a1-8ff0-09b09268828c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.891379 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:02 crc kubenswrapper[4125]: I0312 13:38:02.891550 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.006917 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-cert-syncer/0.log" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.085764 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.085883 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.145421 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf" exitCode=0 Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.145602 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" exitCode=0 Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.145624 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45" exitCode=0 Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.145671 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" exitCode=2 Mar 12 13:38:03 crc kubenswrapper[4125]: E0312 13:38:03.457223 4125 remote_runtime.go:193] "RunPodSandbox from runtime service failed" err=< Mar 12 13:38:03 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db" Netns:"/var/run/netns/d2bc30bf-34a2-4fad-9e7d-77f16f9a467e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:38:03 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:38:03 crc kubenswrapper[4125]: > Mar 12 13:38:03 crc kubenswrapper[4125]: E0312 13:38:03.457315 4125 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Mar 12 13:38:03 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db" Netns:"/var/run/netns/d2bc30bf-34a2-4fad-9e7d-77f16f9a467e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:38:03 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:38:03 crc kubenswrapper[4125]: > pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:03 crc kubenswrapper[4125]: E0312 13:38:03.457343 4125 kuberuntime_manager.go:1172] "CreatePodSandbox for pod failed" err=< Mar 12 13:38:03 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db" Netns:"/var/run/netns/d2bc30bf-34a2-4fad-9e7d-77f16f9a467e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:38:03 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:38:03 crc kubenswrapper[4125]: > pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:03 crc kubenswrapper[4125]: E0312 13:38:03.457504 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-6499cf79cf-qdfbh_openshift-authentication(b61ce6b0-a70f-42b7-9435-3d6acba81ccf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-6499cf79cf-qdfbh_openshift-authentication(b61ce6b0-a70f-42b7-9435-3d6acba81ccf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db\\\" Netns:\\\"/var/run/netns/d2bc30bf-34a2-4fad-9e7d-77f16f9a467e\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=5fc2c4eb9e063b34674b45a6844bb614b596cad8ca6b67e54b0bb4941fa685db;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s\\\": dial tcp 192.168.130.11:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.597925 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.598271 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.621357 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-9-crc" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.621571 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-9-crc" event={"ID":"972b3cfd-f9d0-485e-924b-b5258282d155","Type":"ContainerDied","Data":"9c4637669f76d77a2a1dda152cd35451cc82348e79d6d18f9076cbcd53ef56df"} Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.622519 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c4637669f76d77a2a1dda152cd35451cc82348e79d6d18f9076cbcd53ef56df" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.623982 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.625049 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.627076 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.628346 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.631458 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.639441 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.642514 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.646129 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.653519 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.655479 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.714744 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.714882 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.715893 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.715964 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: W0312 13:38:03.788643 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94e8a39ea660d88d01c6db5ba5e6d884.slice/crio-53b6100d1f832b76dbb1cefa2d3f01e49f4039a6ae1d3bd27a3c4a5f86699f8a WatchSource:0}: Error finding container 53b6100d1f832b76dbb1cefa2d3f01e49f4039a6ae1d3bd27a3c4a5f86699f8a: Status 404 returned error can't find the container with id 53b6100d1f832b76dbb1cefa2d3f01e49f4039a6ae1d3bd27a3c4a5f86699f8a Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.852210 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.852302 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.889565 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.889768 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.942516 4125 patch_prober.go:28] interesting pod/apiserver-67cbf64bc9-fq4m9 container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.56:8443/readyz\": dial tcp 10.217.0.56:8443: connect: connection refused" start-of-body= Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.943572 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.56:8443/readyz\": dial tcp 10.217.0.56:8443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.947150 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-86594ff457-6b77x" event={"ID":"8d14510a-ac3d-4029-ae28-538bb2e94e32","Type":"ContainerDied","Data":"dec419af66e5c6d1f29c5005c971d4a714601c652d36d047e42e480153aaa9fa"} Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.947291 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-86594ff457-6b77x" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.951364 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.953198 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.954538 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.955539 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.961324 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.976067 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.977875 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.979109 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.981277 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:03 crc kubenswrapper[4125]: I0312 13:38:03.982303 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.051285 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/6.log" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.111024 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.267365 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-cert-syncer/0.log" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.271175 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/0.log" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.272474 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.274966 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.277130 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.278243 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.279447 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.281721 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.284032 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.434388 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.436472 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.438303 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.439093 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.441095 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.443124 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.444052 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.445055 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.568271 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-resource-dir\") pod \"53c1db1508241fbac1bedf9130341ffe\" (UID: \"53c1db1508241fbac1bedf9130341ffe\") " Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.568343 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-audit-dir\") pod \"53c1db1508241fbac1bedf9130341ffe\" (UID: \"53c1db1508241fbac1bedf9130341ffe\") " Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.568435 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-cert-dir\") pod \"53c1db1508241fbac1bedf9130341ffe\" (UID: \"53c1db1508241fbac1bedf9130341ffe\") " Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.568676 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "53c1db1508241fbac1bedf9130341ffe" (UID: "53c1db1508241fbac1bedf9130341ffe"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.568713 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "53c1db1508241fbac1bedf9130341ffe" (UID: "53c1db1508241fbac1bedf9130341ffe"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.568732 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "53c1db1508241fbac1bedf9130341ffe" (UID: "53c1db1508241fbac1bedf9130341ffe"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.592976 4125 scope.go:117] "RemoveContainer" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.615783 4125 generic.go:334] "Generic (PLEG): container finished" podID="35c093da-a468-44a1-8ff0-09b09268828c" containerID="a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d" exitCode=0 Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.615905 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"35c093da-a468-44a1-8ff0-09b09268828c","Type":"ContainerDied","Data":"a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d"} Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.615973 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.618301 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.620465 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.621630 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.622932 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.625069 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.627057 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.627913 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.629647 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.631069 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.631906 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.633426 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.634340 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.637342 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.646749 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.670142 4125 reconciler_common.go:300] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-cert-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.670217 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.670257 4125 reconciler_common.go:300] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/53c1db1508241fbac1bedf9130341ffe-audit-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.878561 4125 scope.go:117] "RemoveContainer" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.885973 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:04 crc kubenswrapper[4125]: I0312 13:38:04.886062 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.005958 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.008290 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.009738 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.010523 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.011534 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.013424 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.014790 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.016519 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.017362 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:05 crc kubenswrapper[4125]: E0312 13:38:05.062451 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077391 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-serving-ca\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077453 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxzkm\" (UniqueName: \"kubernetes.io/projected/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-kube-api-access-wxzkm\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077514 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077537 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-image-import-ca\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077611 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-client\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077633 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-serving-cert\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077667 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-node-pullsecrets\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077713 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-encryption-config\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077743 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-config\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077778 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit-dir\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.077801 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-trusted-ca-bundle\") pod \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\" (UID: \"3178c6ca-b9b2-446c-990f-8bf4a9f01b96\") " Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.080489 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.080615 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.082702 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.080734 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.083094 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.083500 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-config" (OuterVolumeSpecName: "config") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.084102 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit" (OuterVolumeSpecName: "audit") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.101801 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.123231 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.123748 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.143459 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-kube-api-access-wxzkm" (OuterVolumeSpecName: "kube-api-access-wxzkm") pod "3178c6ca-b9b2-446c-990f-8bf4a9f01b96" (UID: "3178c6ca-b9b2-446c-990f-8bf4a9f01b96"). InnerVolumeSpecName "kube-api-access-wxzkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180383 4125 reconciler_common.go:300] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180437 4125 reconciler_common.go:300] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-image-import-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180451 4125 reconciler_common.go:300] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-client\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180463 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180475 4125 reconciler_common.go:300] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180489 4125 reconciler_common.go:300] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-encryption-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180500 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180511 4125 reconciler_common.go:300] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-audit-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180522 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180532 4125 reconciler_common.go:300] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.180545 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wxzkm\" (UniqueName: \"kubernetes.io/projected/3178c6ca-b9b2-446c-990f-8bf4a9f01b96-kube-api-access-wxzkm\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.273485 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.273582 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.611365 4125 scope.go:117] "RemoveContainer" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.886090 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:05 crc kubenswrapper[4125]: I0312 13:38:05.886777 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.171433 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.172454 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.181399 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.186963 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.188078 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.188105 4125 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.188634 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="200ms" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.234075 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53c1db1508241fbac1bedf9130341ffe" path="/var/lib/kubelet/pods/53c1db1508241fbac1bedf9130341ffe/volumes" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.236334 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerStarted","Data":"53b6100d1f832b76dbb1cefa2d3f01e49f4039a6ae1d3bd27a3c4a5f86699f8a"} Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.390152 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="400ms" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.436769 4125 scope.go:117] "RemoveContainer" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.556802 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": container with ID starting with 24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3 not found: ID does not exist" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.556913 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3"} err="failed to get container status \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": rpc error: code = NotFound desc = could not find container \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": container with ID starting with 24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3 not found: ID does not exist" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.556932 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.606066 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"35c093da-a468-44a1-8ff0-09b09268828c","Type":"ContainerDied","Data":"9293ffca43f0a2173595f04abd3a03ea17c911b2efccdb680f070140769df4fb"} Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.645697 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": container with ID starting with 93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3 not found: ID does not exist" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.645761 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} err="failed to get container status \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": rpc error: code = NotFound desc = could not find container \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": container with ID starting with 93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3 not found: ID does not exist" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.645779 4125 scope.go:117] "RemoveContainer" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.727889 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": container with ID starting with fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921 not found: ID does not exist" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.727944 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921"} err="failed to get container status \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": rpc error: code = NotFound desc = could not find container \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": container with ID starting with fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921 not found: ID does not exist" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.727959 4125 scope.go:117] "RemoveContainer" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.791802 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="800ms" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.819920 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": container with ID starting with f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d not found: ID does not exist" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.820001 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d"} err="failed to get container status \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": rpc error: code = NotFound desc = could not find container \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": container with ID starting with f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d not found: ID does not exist" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.820026 4125 scope.go:117] "RemoveContainer" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.887561 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.887656 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:06 crc kubenswrapper[4125]: E0312 13:38:06.915584 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": container with ID starting with 5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad not found: ID does not exist" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.915648 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad"} err="failed to get container status \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": rpc error: code = NotFound desc = could not find container \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": container with ID starting with 5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad not found: ID does not exist" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.915666 4125 scope.go:117] "RemoveContainer" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" Mar 12 13:38:06 crc kubenswrapper[4125]: I0312 13:38:06.915898 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"bf055e84f32193b9c1c21b0c34a61f01","Type":"ContainerStarted","Data":"93febfbc53dc33a567388b5e38533f7db9c6e053d83c7e1dbdb1e48845aeea52"} Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.044034 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3"} err="failed to get container status \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": rpc error: code = NotFound desc = could not find container \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": container with ID starting with 24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3 not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.044072 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.099145 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} err="failed to get container status \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": rpc error: code = NotFound desc = could not find container \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": container with ID starting with 93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3 not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.099227 4125 scope.go:117] "RemoveContainer" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.251741 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921"} err="failed to get container status \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": rpc error: code = NotFound desc = could not find container \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": container with ID starting with fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921 not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.251937 4125 scope.go:117] "RemoveContainer" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.352707 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d"} err="failed to get container status \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": rpc error: code = NotFound desc = could not find container \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": container with ID starting with f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.352849 4125 scope.go:117] "RemoveContainer" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" Mar 12 13:38:07 crc kubenswrapper[4125]: E0312 13:38:07.594935 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="1.6s" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.616477 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad"} err="failed to get container status \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": rpc error: code = NotFound desc = could not find container \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": container with ID starting with 5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.616524 4125 scope.go:117] "RemoveContainer" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.721115 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3"} err="failed to get container status \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": rpc error: code = NotFound desc = could not find container \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": container with ID starting with 24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3 not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.721272 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.722393 4125 generic.go:334] "Generic (PLEG): container finished" podID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerID="7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a" exitCode=0 Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.722606 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.722659 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerDied","Data":"7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a"} Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.722710 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" event={"ID":"3178c6ca-b9b2-446c-990f-8bf4a9f01b96","Type":"ContainerDied","Data":"c75f0aaf1ed5d02ba790f7f91b5f1610e4d4bde81ce94ac51410108751348e85"} Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.727526 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.729196 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.731625 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.733289 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.742043 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.744152 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.746975 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.749411 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.755894 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.757706 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.759464 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.761258 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.762922 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.763901 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.774398 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} err="failed to get container status \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": rpc error: code = NotFound desc = could not find container \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": container with ID starting with 93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3 not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.774548 4125 scope.go:117] "RemoveContainer" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.877764 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921"} err="failed to get container status \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": rpc error: code = NotFound desc = could not find container \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": container with ID starting with fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921 not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.877879 4125 scope.go:117] "RemoveContainer" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.890494 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.890564 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.972016 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d"} err="failed to get container status \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": rpc error: code = NotFound desc = could not find container \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": container with ID starting with f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d not found: ID does not exist" Mar 12 13:38:07 crc kubenswrapper[4125]: I0312 13:38:07.972105 4125 scope.go:117] "RemoveContainer" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.059242 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad"} err="failed to get container status \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": rpc error: code = NotFound desc = could not find container \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": container with ID starting with 5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad not found: ID does not exist" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.059379 4125 scope.go:117] "RemoveContainer" containerID="24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.159464 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3"} err="failed to get container status \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": rpc error: code = NotFound desc = could not find container \"24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3\": container with ID starting with 24da57bb9bd5b269c80a46c6ba3cce475dedf9dc1c7e9a88b48684ded40177c3 not found: ID does not exist" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.159586 4125 scope.go:117] "RemoveContainer" containerID="93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.243253 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3"} err="failed to get container status \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": rpc error: code = NotFound desc = could not find container \"93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3\": container with ID starting with 93387cbf56de5a58fb0a29d9c48b855a190335220b90b2b041237d26c1dda7f3 not found: ID does not exist" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.243365 4125 scope.go:117] "RemoveContainer" containerID="fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.304300 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921"} err="failed to get container status \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": rpc error: code = NotFound desc = could not find container \"fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921\": container with ID starting with fbfce8fb8242e6264cd917cf02777f11943c16061e82c9f47c31b6e6987da921 not found: ID does not exist" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.304450 4125 scope.go:117] "RemoveContainer" containerID="f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.397457 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-check-endpoints/6.log" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.397940 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d"} err="failed to get container status \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": rpc error: code = NotFound desc = could not find container \"f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d\": container with ID starting with f84d10833785e7c0bda2cf38ce969f00eb4dfe63a0e6c7ce4bcc6ea9758c467d not found: ID does not exist" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.400733 4125 scope.go:117] "RemoveContainer" containerID="5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.473278 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad"} err="failed to get container status \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": rpc error: code = NotFound desc = could not find container \"5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad\": container with ID starting with 5f3e22e876bfd789f3b7d427ea1461fd486b02dcb988a7298bc4aa24670e01ad not found: ID does not exist" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.473330 4125 scope.go:117] "RemoveContainer" containerID="586e67cdf44721522bdaa46be7afee74d9457d02a4b95231987aca3f6a4df542" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.544102 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-cert-syncer/0.log" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.760931 4125 generic.go:334] "Generic (PLEG): container finished" podID="53c1db1508241fbac1bedf9130341ffe" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" exitCode=0 Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.761275 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.772239 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.773506 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.774498 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.775387 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.778995 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.780590 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.783019 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.784560 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.787365 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.791136 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.793187 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.794325 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.795356 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.799685 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.802727 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.803913 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.885734 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:08 crc kubenswrapper[4125]: I0312 13:38:08.886121 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.025009 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.032487 4125 status_manager.go:853] "Failed to get status for pod" podUID="53c1db1508241fbac1bedf9130341ffe" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.033507 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.034233 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.034865 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.036546 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.044302 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.048202 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.048739 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.070129 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.070202 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.071711 4125 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.072299 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.207801 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="3.2s" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.208461 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/0.log" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.208571 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.209117 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.209692 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"c6d10a00de1ac306e3b2905f7c86dca8567df9d41f149845495c032671f7caf6"} Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.406501 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.408314 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.409023 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.411465 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.412499 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.412558 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:38:09 crc kubenswrapper[4125]: E0312 13:38:09.471594 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.16\\\"\"" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.897945 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.898038 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:09 crc kubenswrapper[4125]: I0312 13:38:09.948344 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.210918 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.211069 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.850341 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"bf055e84f32193b9c1c21b0c34a61f01","Type":"ContainerStarted","Data":"b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288"} Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.863567 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.865213 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.868658 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.870399 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.889466 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.892426 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.893002 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.896541 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.906240 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:10 crc kubenswrapper[4125]: I0312 13:38:10.906323 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.176092 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.176239 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.176313 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.176258 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.600021 4125 scope.go:117] "RemoveContainer" containerID="573365d61ad5859a16302f5bf7be8220308463d7183d6af3dd120eb68123e736" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.745263 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerStarted","Data":"9e714039139b1c5c2bfce080a2f7e5a156823333dd11f32400eeaed832816a11"} Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.756426 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.766940 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.767650 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.768325 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.774743 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.775534 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.779136 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.780236 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.893390 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:11 crc kubenswrapper[4125]: I0312 13:38:11.893541 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.042478 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.046332 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.051091 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.052583 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.057042 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.059958 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.064427 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.071255 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.075960 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.127304 4125 generic.go:334] "Generic (PLEG): container finished" podID="410dbf23-e4f3-4307-910c-ad0a079c33e2" containerID="d4bf1ee3eb89745dc69475637ba48fdfdb95284b46c45d37f782bba4268f5fd3" exitCode=0 Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.128699 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.131458 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.133958 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.134653 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.135688 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.137368 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.138779 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.140093 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.141466 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.143320 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.190805 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-10-crc" event={"ID":"410dbf23-e4f3-4307-910c-ad0a079c33e2","Type":"ContainerDied","Data":"d4bf1ee3eb89745dc69475637ba48fdfdb95284b46c45d37f782bba4268f5fd3"} Mar 12 13:38:12 crc kubenswrapper[4125]: E0312 13:38:12.410413 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="6.4s" Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.888770 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:12 crc kubenswrapper[4125]: I0312 13:38:12.888965 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:13 crc kubenswrapper[4125]: E0312 13:38:13.151518 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.16\\\"\"" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.343622 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.343946 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.599072 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.599223 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.713647 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.713778 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.713896 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.713970 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.714029 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.714752 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.714963 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.716451 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"903cc09b29eb6b57a4de9a646e0d2d20b91d23eac8fd8cd6470da91e14b35e89"} pod="openshift-console/downloads-65476884b9-9wcvx" containerMessage="Container download-server failed liveness probe, will be restarted" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.716617 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" containerID="cri-o://903cc09b29eb6b57a4de9a646e0d2d20b91d23eac8fd8cd6470da91e14b35e89" gracePeriod=2 Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.743805 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/1.log" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.775653 4125 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12" exitCode=0 Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.776060 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12"} Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.777056 4125 scope.go:117] "RemoveContainer" containerID="228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12" Mar 12 13:38:13 crc kubenswrapper[4125]: E0312 13:38:13.777604 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.778288 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.789640 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.791738 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.792629 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.794507 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.800406 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.807055 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.808431 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.810034 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.818310 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.823492 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.847354 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.847885 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.890890 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:13 crc kubenswrapper[4125]: I0312 13:38:13.891036 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.019072 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerStarted","Data":"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043"} Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.087652 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.087785 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.345121 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.345599 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.356358 4125 scope.go:117] "RemoveContainer" containerID="a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d" Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.415081 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerName="registry" containerID="cri-o://9c2dc36f0c3ee44e1f4c7d09861313a36668b4d06de91c2b5855b11f060bbb11" gracePeriod=30 Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.416091 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.416201 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.539900 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerStarted","Data":"c76a19a8753fa1b9058eaf08ea849a9688345330833cccd06ceea3b47da93e75"} Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.893252 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:14 crc kubenswrapper[4125]: I0312 13:38:14.893352 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:15 crc kubenswrapper[4125]: I0312 13:38:15.003955 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-cert-syncer/0.log" Mar 12 13:38:15 crc kubenswrapper[4125]: E0312 13:38:15.065127 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:15 crc kubenswrapper[4125]: E0312 13:38:15.208384 4125 remote_runtime.go:193] "RunPodSandbox from runtime service failed" err=< Mar 12 13:38:15 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d" Netns:"/var/run/netns/5f647c9e-f5a6-4f4d-8963-529dda86e8cf" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:38:15 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:38:15 crc kubenswrapper[4125]: > Mar 12 13:38:15 crc kubenswrapper[4125]: E0312 13:38:15.208434 4125 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Mar 12 13:38:15 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d" Netns:"/var/run/netns/5f647c9e-f5a6-4f4d-8963-529dda86e8cf" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:38:15 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:38:15 crc kubenswrapper[4125]: > pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:15 crc kubenswrapper[4125]: E0312 13:38:15.208453 4125 kuberuntime_manager.go:1172] "CreatePodSandbox for pod failed" err=< Mar 12 13:38:15 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d" Netns:"/var/run/netns/5f647c9e-f5a6-4f4d-8963-529dda86e8cf" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:38:15 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:38:15 crc kubenswrapper[4125]: > pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:15 crc kubenswrapper[4125]: E0312 13:38:15.208537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-6499cf79cf-qdfbh_openshift-authentication(b61ce6b0-a70f-42b7-9435-3d6acba81ccf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-6499cf79cf-qdfbh_openshift-authentication(b61ce6b0-a70f-42b7-9435-3d6acba81ccf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d\\\" Netns:\\\"/var/run/netns/5f647c9e-f5a6-4f4d-8963-529dda86e8cf\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=e551407f4d69e0fef2f544222c7c4897cb357aa9335034e52d2eda4703fd815d;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s\\\": dial tcp 192.168.130.11:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 12 13:38:15 crc kubenswrapper[4125]: I0312 13:38:15.887651 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:15 crc kubenswrapper[4125]: I0312 13:38:15.887765 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:16 crc kubenswrapper[4125]: I0312 13:38:16.624621 4125 generic.go:334] "Generic (PLEG): container finished" podID="6268b7fe-8910-4505-b404-6f1df638105c" containerID="903cc09b29eb6b57a4de9a646e0d2d20b91d23eac8fd8cd6470da91e14b35e89" exitCode=0 Mar 12 13:38:16 crc kubenswrapper[4125]: I0312 13:38:16.625100 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerDied","Data":"903cc09b29eb6b57a4de9a646e0d2d20b91d23eac8fd8cd6470da91e14b35e89"} Mar 12 13:38:16 crc kubenswrapper[4125]: I0312 13:38:16.865958 4125 generic.go:334] "Generic (PLEG): container finished" podID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerID="9c2dc36f0c3ee44e1f4c7d09861313a36668b4d06de91c2b5855b11f060bbb11" exitCode=0 Mar 12 13:38:16 crc kubenswrapper[4125]: I0312 13:38:16.866111 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" event={"ID":"fd34258c-0a6c-44b0-ba64-b411ac6bad46","Type":"ContainerDied","Data":"9c2dc36f0c3ee44e1f4c7d09861313a36668b4d06de91c2b5855b11f060bbb11"} Mar 12 13:38:16 crc kubenswrapper[4125]: I0312 13:38:16.886602 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:16 crc kubenswrapper[4125]: I0312 13:38:16.886697 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.076718 4125 scope.go:117] "RemoveContainer" containerID="228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12" Mar 12 13:38:17 crc kubenswrapper[4125]: E0312 13:38:17.077216 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.078145 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.078797 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.079719 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.080430 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.081572 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.082689 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.083581 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.084802 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.085515 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.086899 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.089535 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.889125 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:38:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:38:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:38:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.891216 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.891347 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.893481 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"6227ee4a8496f1cb024665458d33453ee5216601cd460a108dc527e674a4a58b"} pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" containerMessage="Container router failed startup probe, will be restarted" Mar 12 13:38:17 crc kubenswrapper[4125]: I0312 13:38:17.893536 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" containerID="cri-o://6227ee4a8496f1cb024665458d33453ee5216601cd460a108dc527e674a4a58b" gracePeriod=3600 Mar 12 13:38:18 crc kubenswrapper[4125]: E0312 13:38:18.811112 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.812648 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.816267 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.824499 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.827324 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.829937 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.829955 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.832455 4125 generic.go:334] "Generic (PLEG): container finished" podID="48128e8d38b5cbcd2691da698bd9cac3" containerID="00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2" exitCode=0 Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.833328 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerDied","Data":"00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2"} Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.834731 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.834749 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:38:19 crc kubenswrapper[4125]: E0312 13:38:19.837255 4125 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.837859 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.838777 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.842084 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.843239 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.846088 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.846907 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.848068 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.848776 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.849529 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.850469 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:19 crc kubenswrapper[4125]: I0312 13:38:19.851736 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:20 crc kubenswrapper[4125]: I0312 13:38:20.104040 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerStarted","Data":"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd"} Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.028933 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.030750 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.031982 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.032633 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.033647 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.035271 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.036111 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.036907 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.037386 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.038391 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:22 crc kubenswrapper[4125]: I0312 13:38:22.039699 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.359106 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_installer-7-crc_ba116478-01f2-47d9-8b88-9db94f1478e3/installer/0.log" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.359240 4125 generic.go:334] "Generic (PLEG): container finished" podID="ba116478-01f2-47d9-8b88-9db94f1478e3" containerID="3720ac3cffdd9fcb2d6fefed93776a8424b4a6f80e62440cd3007049b0610069" exitCode=1 Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.359282 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-7-crc" event={"ID":"ba116478-01f2-47d9-8b88-9db94f1478e3","Type":"ContainerDied","Data":"3720ac3cffdd9fcb2d6fefed93776a8424b4a6f80e62440cd3007049b0610069"} Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.361134 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.362084 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.363090 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.364938 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.366016 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.367331 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.368052 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.368774 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.369855 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.371291 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.372949 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.373863 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.598060 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.598199 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.714246 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.714371 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.847553 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.847644 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.957747 4125 patch_prober.go:28] interesting pod/image-registry-7dc8587b5-4h2pb container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.57:5000/healthz\": dial tcp 10.217.0.57:5000: i/o timeout" start-of-body= Mar 12 13:38:23 crc kubenswrapper[4125]: I0312 13:38:23.958623 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.57:5000/healthz\": dial tcp 10.217.0.57:5000: i/o timeout" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.085569 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.085895 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.085974 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.087059 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.087118 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.087992 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="console-operator" containerStatusID={"Type":"cri-o","ID":"c6d10a00de1ac306e3b2905f7c86dca8567df9d41f149845495c032671f7caf6"} pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" containerMessage="Container console-operator failed liveness probe, will be restarted" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.088060 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" containerID="cri-o://c6d10a00de1ac306e3b2905f7c86dca8567df9d41f149845495c032671f7caf6" gracePeriod=30 Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.151883 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": read tcp 10.217.0.2:45154->10.217.0.62:8443: read: connection reset by peer" start-of-body= Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.151990 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": read tcp 10.217.0.2:45154->10.217.0.62:8443: read: connection reset by peer" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.534717 4125 generic.go:334] "Generic (PLEG): container finished" podID="71af81a9-7d43-49b2-9287-c375900aa905" containerID="fad9cea35e0ebc15a97c9f69995245b3677742d9f62c2632e4840da1eef2d0a3" exitCode=0 Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.534913 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerDied","Data":"fad9cea35e0ebc15a97c9f69995245b3677742d9f62c2632e4840da1eef2d0a3"} Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.539042 4125 scope.go:117] "RemoveContainer" containerID="fad9cea35e0ebc15a97c9f69995245b3677742d9f62c2632e4840da1eef2d0a3" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.539225 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.542245 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.543371 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.544078 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.546271 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.548879 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.549671 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.550306 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.551212 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.551977 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.552652 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.553913 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:24 crc kubenswrapper[4125]: I0312 13:38:24.555431 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:25 crc kubenswrapper[4125]: E0312 13:38:25.071627 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:25 crc kubenswrapper[4125]: I0312 13:38:25.701255 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/1.log" Mar 12 13:38:25 crc kubenswrapper[4125]: I0312 13:38:25.722088 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/0.log" Mar 12 13:38:25 crc kubenswrapper[4125]: I0312 13:38:25.722245 4125 generic.go:334] "Generic (PLEG): container finished" podID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerID="c6d10a00de1ac306e3b2905f7c86dca8567df9d41f149845495c032671f7caf6" exitCode=255 Mar 12 13:38:25 crc kubenswrapper[4125]: I0312 13:38:25.722289 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerDied","Data":"c6d10a00de1ac306e3b2905f7c86dca8567df9d41f149845495c032671f7caf6"} Mar 12 13:38:25 crc kubenswrapper[4125]: E0312 13:38:25.814222 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:38:29 crc kubenswrapper[4125]: E0312 13:38:29.960454 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:29 crc kubenswrapper[4125]: E0312 13:38:29.962521 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:29 crc kubenswrapper[4125]: E0312 13:38:29.964411 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:29 crc kubenswrapper[4125]: E0312 13:38:29.966769 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:29 crc kubenswrapper[4125]: E0312 13:38:29.967993 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:29 crc kubenswrapper[4125]: E0312 13:38:29.968012 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:38:30 crc kubenswrapper[4125]: I0312 13:38:30.029065 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:30 crc kubenswrapper[4125]: I0312 13:38:30.030687 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.432907 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" status="Running" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.433070 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Pending" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.433135 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.433193 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.433260 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.433288 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Pending" Mar 12 13:38:31 crc kubenswrapper[4125]: I0312 13:38:31.742259 4125 scope.go:117] "RemoveContainer" containerID="462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.029603 4125 scope.go:117] "RemoveContainer" containerID="228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.031408 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.032125 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.032752 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.033554 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.041899 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.043085 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.044579 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.045605 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.046879 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.048074 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.049985 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.052632 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: I0312 13:38:32.053939 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:32 crc kubenswrapper[4125]: E0312 13:38:32.817191 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.086804 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.087054 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.598333 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.598471 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.713685 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.713795 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.848032 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.848238 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.958442 4125 patch_prober.go:28] interesting pod/image-registry-7dc8587b5-4h2pb container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.57:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:38:33 crc kubenswrapper[4125]: I0312 13:38:33.958523 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.57:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:38:35 crc kubenswrapper[4125]: E0312 13:38:35.074520 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.293641 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.299130 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.300589 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.300641 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.303325 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.305044 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.307324 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.308772 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.310082 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.311683 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.313993 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.315339 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.316672 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.317478 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.318524 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.319497 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.320548 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.321305 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.322114 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.322962 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.323552 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.324231 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.326332 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.328064 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.329908 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.331215 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.333806 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.335207 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.336948 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.433648 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7qlz\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-kube-api-access-m7qlz\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.433760 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-bound-sa-token\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.433887 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/410dbf23-e4f3-4307-910c-ad0a079c33e2-kubelet-dir\") pod \"410dbf23-e4f3-4307-910c-ad0a079c33e2\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.433930 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd34258c-0a6c-44b0-ba64-b411ac6bad46-installation-pull-secrets\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.434255 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.434310 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/410dbf23-e4f3-4307-910c-ad0a079c33e2-kube-api-access\") pod \"410dbf23-e4f3-4307-910c-ad0a079c33e2\" (UID: \"410dbf23-e4f3-4307-910c-ad0a079c33e2\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.434357 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-tls\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.434382 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-trusted-ca\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.434417 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-certificates\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.434573 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd34258c-0a6c-44b0-ba64-b411ac6bad46-ca-trust-extracted\") pod \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\" (UID: \"fd34258c-0a6c-44b0-ba64-b411ac6bad46\") " Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.436465 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd34258c-0a6c-44b0-ba64-b411ac6bad46-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.438289 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/410dbf23-e4f3-4307-910c-ad0a079c33e2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "410dbf23-e4f3-4307-910c-ad0a079c33e2" (UID: "410dbf23-e4f3-4307-910c-ad0a079c33e2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.439020 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.440797 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.455749 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd34258c-0a6c-44b0-ba64-b411ac6bad46-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.467509 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-kube-api-access-m7qlz" (OuterVolumeSpecName: "kube-api-access-m7qlz") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "kube-api-access-m7qlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.469774 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.474239 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.484503 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/410dbf23-e4f3-4307-910c-ad0a079c33e2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "410dbf23-e4f3-4307-910c-ad0a079c33e2" (UID: "410dbf23-e4f3-4307-910c-ad0a079c33e2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.505990 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (OuterVolumeSpecName: "registry-storage") pod "fd34258c-0a6c-44b0-ba64-b411ac6bad46" (UID: "fd34258c-0a6c-44b0-ba64-b411ac6bad46"). InnerVolumeSpecName "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.536961 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/410dbf23-e4f3-4307-910c-ad0a079c33e2-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537029 4125 reconciler_common.go:300] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-tls\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537045 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-trusted-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537065 4125 reconciler_common.go:300] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd34258c-0a6c-44b0-ba64-b411ac6bad46-registry-certificates\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537076 4125 reconciler_common.go:300] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd34258c-0a6c-44b0-ba64-b411ac6bad46-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537088 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-m7qlz\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-kube-api-access-m7qlz\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537099 4125 reconciler_common.go:300] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd34258c-0a6c-44b0-ba64-b411ac6bad46-bound-sa-token\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537110 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/410dbf23-e4f3-4307-910c-ad0a079c33e2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:35 crc kubenswrapper[4125]: I0312 13:38:35.537122 4125 reconciler_common.go:300] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd34258c-0a6c-44b0-ba64-b411ac6bad46-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.823361 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.824263 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" event={"ID":"fd34258c-0a6c-44b0-ba64-b411ac6bad46","Type":"ContainerDied","Data":"4061b35f86b8b4e424f10f00863af2ce515a25eb291fe51e62419db7da9538e0"} Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.830335 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.831423 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.835662 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.836749 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.844607 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.845607 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.846490 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.846595 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-10-crc" event={"ID":"410dbf23-e4f3-4307-910c-ad0a079c33e2","Type":"ContainerDied","Data":"dd1c832419b5b04fba347b193c61e0cfe421961c8ceb49df83b3c5b58fdc5c38"} Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.846643 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd1c832419b5b04fba347b193c61e0cfe421961c8ceb49df83b3c5b58fdc5c38" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.846717 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.847268 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.853483 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.854786 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.856647 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.857542 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.859576 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.861221 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.865573 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.866686 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.868600 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.870504 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.872666 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.874096 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.877701 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.879901 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.883048 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.883949 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.885014 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.885883 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.887121 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.890254 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.890948 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.891457 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.893693 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.895108 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.896332 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.897334 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.899276 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.900062 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.901438 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.902135 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.903281 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.904050 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.904688 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:36 crc kubenswrapper[4125]: I0312 13:38:36.905389 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.862872 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-network-operator_network-operator-767c585db5-zd56b_cc291782-27d2-4a74-af79-c7dcb31535d2/network-operator/1.log" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.865443 4125 generic.go:334] "Generic (PLEG): container finished" podID="cc291782-27d2-4a74-af79-c7dcb31535d2" containerID="9e714039139b1c5c2bfce080a2f7e5a156823333dd11f32400eeaed832816a11" exitCode=255 Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.865567 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerDied","Data":"9e714039139b1c5c2bfce080a2f7e5a156823333dd11f32400eeaed832816a11"} Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.866682 4125 scope.go:117] "RemoveContainer" containerID="9e714039139b1c5c2bfce080a2f7e5a156823333dd11f32400eeaed832816a11" Mar 12 13:38:37 crc kubenswrapper[4125]: E0312 13:38:37.868025 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=network-operator pod=network-operator-767c585db5-zd56b_openshift-network-operator(cc291782-27d2-4a74-af79-c7dcb31535d2)\"" pod="openshift-network-operator/network-operator-767c585db5-zd56b" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.869233 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.870222 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.871134 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.872423 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.876370 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.876993 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.877467 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.877970 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.878962 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.879583 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.880135 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.880937 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.882243 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:37 crc kubenswrapper[4125]: I0312 13:38:37.883371 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.878693 4125 generic.go:334] "Generic (PLEG): container finished" podID="b54e8941-2fc4-432a-9e51-39684df9089e" containerID="51a0ed51573e54783eaec0b562a7d00b746823f3e4730c5f84cee47fb9d258c7" exitCode=0 Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.878755 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" event={"ID":"b54e8941-2fc4-432a-9e51-39684df9089e","Type":"ContainerDied","Data":"51a0ed51573e54783eaec0b562a7d00b746823f3e4730c5f84cee47fb9d258c7"} Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.879392 4125 scope.go:117] "RemoveContainer" containerID="51a0ed51573e54783eaec0b562a7d00b746823f3e4730c5f84cee47fb9d258c7" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.882363 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.883720 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.884546 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.885075 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.885763 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.886603 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.887423 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.888076 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.889305 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.890276 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.891079 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.892204 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.893339 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.894744 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:38 crc kubenswrapper[4125]: I0312 13:38:38.895652 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.819053 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.984773 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.985576 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.986404 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.987267 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.988076 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:39 crc kubenswrapper[4125]: E0312 13:38:39.988093 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.630799 4125 scope.go:117] "RemoveContainer" containerID="a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d" Mar 12 13:38:40 crc kubenswrapper[4125]: E0312 13:38:40.631542 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d\": container with ID starting with a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d not found: ID does not exist" containerID="a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.631621 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d"} err="failed to get container status \"a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d\": rpc error: code = NotFound desc = could not find container \"a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d\": container with ID starting with a2a6a58b02b4a9ce82fbfd4c92fb1e146afcf27d5e0119cbee830ed57a732f3d not found: ID does not exist" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.631642 4125 scope.go:117] "RemoveContainer" containerID="331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.725690 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_installer-7-crc_ba116478-01f2-47d9-8b88-9db94f1478e3/installer/0.log" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.725779 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.727487 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.728457 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.729452 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.730350 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.731232 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.732341 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.733360 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.735032 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.735617 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.736667 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.737614 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.739349 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.740040 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.740545 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.741067 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.760045 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-kubelet-dir\") pod \"ba116478-01f2-47d9-8b88-9db94f1478e3\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.760153 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-var-lock\") pod \"ba116478-01f2-47d9-8b88-9db94f1478e3\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.760230 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba116478-01f2-47d9-8b88-9db94f1478e3-kube-api-access\") pod \"ba116478-01f2-47d9-8b88-9db94f1478e3\" (UID: \"ba116478-01f2-47d9-8b88-9db94f1478e3\") " Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.760512 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ba116478-01f2-47d9-8b88-9db94f1478e3" (UID: "ba116478-01f2-47d9-8b88-9db94f1478e3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.760979 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-var-lock" (OuterVolumeSpecName: "var-lock") pod "ba116478-01f2-47d9-8b88-9db94f1478e3" (UID: "ba116478-01f2-47d9-8b88-9db94f1478e3"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.780300 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba116478-01f2-47d9-8b88-9db94f1478e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ba116478-01f2-47d9-8b88-9db94f1478e3" (UID: "ba116478-01f2-47d9-8b88-9db94f1478e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.862133 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.862188 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ba116478-01f2-47d9-8b88-9db94f1478e3-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.862208 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba116478-01f2-47d9-8b88-9db94f1478e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.904366 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_installer-7-crc_ba116478-01f2-47d9-8b88-9db94f1478e3/installer/0.log" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.904469 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-7-crc" event={"ID":"ba116478-01f2-47d9-8b88-9db94f1478e3","Type":"ContainerDied","Data":"c9bc6e6741a6359131ee7e1dcdf9c466f61a3de0e373186c1523b015b1ed4390"} Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.904559 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9bc6e6741a6359131ee7e1dcdf9c466f61a3de0e373186c1523b015b1ed4390" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.904628 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-7-crc" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.906272 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.907233 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.908115 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.908745 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.912511 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.918247 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.924303 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.927698 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.929973 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.931724 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.934443 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.935729 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.938060 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.939389 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.940413 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.941582 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.944286 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.946754 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.948290 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.950055 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.950785 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.952389 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.953196 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.954204 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.955007 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.956651 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.957456 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.958617 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.960051 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:40 crc kubenswrapper[4125]: I0312 13:38:40.961413 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.028469 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.030244 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.031289 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.032530 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.034231 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.035205 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.036252 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.037116 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.037960 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.038916 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.039603 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.040262 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.041339 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.042219 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.042761 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.117079 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/0.log" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.117225 4125 generic.go:334] "Generic (PLEG): container finished" podID="7d51f445-054a-4e4f-a67b-a828f5a32511" containerID="c47ce1b61b78f947bbf881c4500564865b677f7ac60916f2651215a08d905da4" exitCode=1 Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.117264 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerDied","Data":"c47ce1b61b78f947bbf881c4500564865b677f7ac60916f2651215a08d905da4"} Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.117941 4125 scope.go:117] "RemoveContainer" containerID="c47ce1b61b78f947bbf881c4500564865b677f7ac60916f2651215a08d905da4" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.119270 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.120262 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.120965 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.126447 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.127555 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.128607 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.130157 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.132725 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.134014 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.135704 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.137212 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.138328 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.139129 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.139886 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.140607 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:42 crc kubenswrapper[4125]: I0312 13:38:42.141437 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.085931 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.086091 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.159131 4125 generic.go:334] "Generic (PLEG): container finished" podID="0f394926-bdb9-425c-b36e-264d7fd34550" containerID="5806024690eae8f3f8c98ac80e4e73766fd80a88dd30ae7e0af35a15980bca6d" exitCode=0 Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.159280 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" event={"ID":"0f394926-bdb9-425c-b36e-264d7fd34550","Type":"ContainerDied","Data":"5806024690eae8f3f8c98ac80e4e73766fd80a88dd30ae7e0af35a15980bca6d"} Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.160021 4125 scope.go:117] "RemoveContainer" containerID="5806024690eae8f3f8c98ac80e4e73766fd80a88dd30ae7e0af35a15980bca6d" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.161584 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.163250 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.164527 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.165348 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.166511 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.167426 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.168331 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.169275 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.170092 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.170969 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.172101 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.173195 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.174895 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.176389 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.180411 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.181275 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.182159 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.599012 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.599106 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.714260 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.714433 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.851644 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:38:43 crc kubenswrapper[4125]: I0312 13:38:43.851752 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: E0312 13:38:45.083061 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.496921 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-7xghp_51a02bbf-2d40-4f84-868a-d399ea18a846/approver/0.log" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.498801 4125 generic.go:334] "Generic (PLEG): container finished" podID="51a02bbf-2d40-4f84-868a-d399ea18a846" containerID="16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086" exitCode=1 Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.498956 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerDied","Data":"16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086"} Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.500135 4125 scope.go:117] "RemoveContainer" containerID="16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.501718 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.503253 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.504702 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.506453 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.507547 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.508453 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.509157 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.509724 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.510553 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.511637 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.512506 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.513758 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.514377 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.514946 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.515561 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.516531 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.517289 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:45 crc kubenswrapper[4125]: I0312 13:38:45.517986 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:46 crc kubenswrapper[4125]: I0312 13:38:46.516233 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-apiserver_apiserver-67cbf64bc9-mtx25_23eb88d6-6aea-4542-a2b9-8f3fd106b4ab/openshift-apiserver/0.log" Mar 12 13:38:46 crc kubenswrapper[4125]: I0312 13:38:46.516932 4125 generic.go:334] "Generic (PLEG): container finished" podID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" containerID="e626fe31e9fc0d6f2e34cae2f75ebd1df96daffc967397efe4465fb73926e0dd" exitCode=137 Mar 12 13:38:46 crc kubenswrapper[4125]: E0312 13:38:46.821303 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:38:48 crc kubenswrapper[4125]: I0312 13:38:48.967484 4125 scope.go:117] "RemoveContainer" containerID="1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.026025 4125 scope.go:117] "RemoveContainer" containerID="9e714039139b1c5c2bfce080a2f7e5a156823333dd11f32400eeaed832816a11" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.035936 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.038706 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.040933 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.042115 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.043204 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.044358 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.066692 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.068733 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.084879 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.087553 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.091045 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.091668 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.092962 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.094548 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.095707 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.096679 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.098608 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.101116 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.636962 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_53c1db1508241fbac1bedf9130341ffe/kube-apiserver-cert-syncer/0.log" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.644345 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.645116 4125 generic.go:334] "Generic (PLEG): container finished" podID="94e8a39ea660d88d01c6db5ba5e6d884" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" exitCode=255 Mar 12 13:38:49 crc kubenswrapper[4125]: I0312 13:38:49.645196 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerDied","Data":"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd"} Mar 12 13:38:50 crc kubenswrapper[4125]: E0312 13:38:50.160428 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:50 crc kubenswrapper[4125]: E0312 13:38:50.161226 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:50 crc kubenswrapper[4125]: E0312 13:38:50.161801 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:50 crc kubenswrapper[4125]: E0312 13:38:50.162659 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:50 crc kubenswrapper[4125]: E0312 13:38:50.163871 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:50 crc kubenswrapper[4125]: E0312 13:38:50.163894 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.031329 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.033054 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.034154 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.035017 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.035604 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.037940 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.040274 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.041314 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.042331 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.043416 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.044742 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.046023 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.046956 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.047749 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.048658 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.049601 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.050960 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:52 crc kubenswrapper[4125]: I0312 13:38:52.052308 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:53 crc kubenswrapper[4125]: E0312 13:38:53.085602 4125 desired_state_of_world_populator.go:320] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 192.168.130.11:6443: connect: connection refused" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" volumeName="registry-storage" Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.086723 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.086803 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.598556 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.598887 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.714121 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.714259 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:38:53 crc kubenswrapper[4125]: E0312 13:38:53.823638 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.847674 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:38:53 crc kubenswrapper[4125]: I0312 13:38:53.847760 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:38:55 crc kubenswrapper[4125]: E0312 13:38:55.085789 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.720745 4125 generic.go:334] "Generic (PLEG): container finished" podID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" containerID="81d2790dd791bb892de21e378fb0d17b01aa692a849c2c791acc6a67a01144f8" exitCode=0 Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.720850 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" event={"ID":"ed024e5d-8fc2-4c22-803d-73f3c9795f19","Type":"ContainerDied","Data":"81d2790dd791bb892de21e378fb0d17b01aa692a849c2c791acc6a67a01144f8"} Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.721508 4125 scope.go:117] "RemoveContainer" containerID="81d2790dd791bb892de21e378fb0d17b01aa692a849c2c791acc6a67a01144f8" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.723528 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.724752 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.726059 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.727252 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.728636 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.730002 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.731388 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.732720 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.734294 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.735681 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.736604 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.737732 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.739108 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.740119 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.741490 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.742551 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.743652 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.744707 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:38:57 crc kubenswrapper[4125]: I0312 13:38:57.745634 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.229444 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.230966 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.232408 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.233570 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.234555 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.234605 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:39:00 crc kubenswrapper[4125]: E0312 13:39:00.825987 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.747039 4125 generic.go:334] "Generic (PLEG): container finished" podID="6d67253e-2acd-4bc1-8185-793587da4f17" containerID="beeb9bf44af1c988b8255931520dd89196509fb76414f15d8ad2cb327fe6aeb4" exitCode=0 Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.747193 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" event={"ID":"6d67253e-2acd-4bc1-8185-793587da4f17","Type":"ContainerDied","Data":"beeb9bf44af1c988b8255931520dd89196509fb76414f15d8ad2cb327fe6aeb4"} Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.748518 4125 scope.go:117] "RemoveContainer" containerID="beeb9bf44af1c988b8255931520dd89196509fb76414f15d8ad2cb327fe6aeb4" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.750108 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.751553 4125 status_manager.go:853] "Failed to get status for pod" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/pods/service-ca-operator-546b4f8984-pwccz\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.753167 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.754447 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.755316 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.756476 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.757430 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.758040 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.759301 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.760397 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.761389 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.762140 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.763100 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.764041 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.765128 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.765932 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.766934 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.768143 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.769278 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:01 crc kubenswrapper[4125]: I0312 13:39:01.770482 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.028395 4125 status_manager.go:853] "Failed to get status for pod" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/pods/service-ca-operator-546b4f8984-pwccz\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.029619 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.035416 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.036296 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.037042 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.037794 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.039783 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.041069 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.041794 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.042555 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.043425 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.044297 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.045059 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.045779 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.046646 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.047705 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.048372 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.049165 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.050062 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:02 crc kubenswrapper[4125]: I0312 13:39:02.050758 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.100881 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.100978 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.598550 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.598640 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.714124 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.714236 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.848792 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:39:03 crc kubenswrapper[4125]: I0312 13:39:03.848948 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:39:05 crc kubenswrapper[4125]: E0312 13:39:05.088363 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:39:06 crc kubenswrapper[4125]: I0312 13:39:06.819613 4125 generic.go:334] "Generic (PLEG): container finished" podID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerID="6227ee4a8496f1cb024665458d33453ee5216601cd460a108dc527e674a4a58b" exitCode=0 Mar 12 13:39:06 crc kubenswrapper[4125]: I0312 13:39:06.820052 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerDied","Data":"6227ee4a8496f1cb024665458d33453ee5216601cd460a108dc527e674a4a58b"} Mar 12 13:39:07 crc kubenswrapper[4125]: E0312 13:39:07.828325 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:39:10 crc kubenswrapper[4125]: E0312 13:39:10.457008 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:10 crc kubenswrapper[4125]: E0312 13:39:10.458652 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:10 crc kubenswrapper[4125]: E0312 13:39:10.460624 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:10 crc kubenswrapper[4125]: E0312 13:39:10.461355 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:10 crc kubenswrapper[4125]: E0312 13:39:10.462258 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:10 crc kubenswrapper[4125]: E0312 13:39:10.462294 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.028682 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.029757 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.030676 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.033254 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.034009 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.034710 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.035496 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.036243 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.037004 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.037671 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.038528 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.039354 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.040054 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.040722 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.041497 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.042256 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.042955 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.043777 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.044566 4125 status_manager.go:853] "Failed to get status for pod" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/pods/service-ca-operator-546b4f8984-pwccz\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:12 crc kubenswrapper[4125]: I0312 13:39:12.045561 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.092195 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.092302 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.408967 4125 scope.go:117] "RemoveContainer" containerID="7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.606604 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.606691 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.713881 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.714009 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.848323 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.849747 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.910996 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"3960db248f20122220f3e0fb329a258b5910cc112f4afdfd31d492837b4e1713"} Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.914096 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.914337 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.914424 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.915913 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.917993 4125 status_manager.go:853] "Failed to get status for pod" podUID="6268b7fe-8910-4505-b404-6f1df638105c" pod="openshift-console/downloads-65476884b9-9wcvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-65476884b9-9wcvx\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.919394 4125 status_manager.go:853] "Failed to get status for pod" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/pods/service-ca-operator-546b4f8984-pwccz\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.920363 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.925300 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.926599 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.927865 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.929086 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.930751 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.931874 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.951461 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.952508 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.958866 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.963554 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.964645 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.966631 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.969110 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.970607 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.973979 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.979442 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:13 crc kubenswrapper[4125]: I0312 13:39:13.981068 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: E0312 13:39:14.831299 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="7s" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.938527 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.940399 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerStarted","Data":"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262"} Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.949531 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerStarted","Data":"254ad9a98529033932dc1b9c446efaa247d53e9d673f4d28116134c8c0e44635"} Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.956455 4125 status_manager.go:853] "Failed to get status for pod" podUID="6268b7fe-8910-4505-b404-6f1df638105c" pod="openshift-console/downloads-65476884b9-9wcvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-65476884b9-9wcvx\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.959084 4125 status_manager.go:853] "Failed to get status for pod" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/pods/service-ca-operator-546b4f8984-pwccz\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.960567 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerStarted","Data":"79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2"} Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.961985 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.965406 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.967632 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.969268 4125 generic.go:334] "Generic (PLEG): container finished" podID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerID="e5e4ae2e28c6d0c65895c9e36ea602043bbd8bda8e4978a7f740eb0d6f142453" exitCode=0 Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.969380 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.969363 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" event={"ID":"dd2f98e4-4656-4c95-8c6f-5959bd9f876a","Type":"ContainerDied","Data":"e5e4ae2e28c6d0c65895c9e36ea602043bbd8bda8e4978a7f740eb0d6f142453"} Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.970405 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.970927 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.972039 4125 scope.go:117] "RemoveContainer" containerID="e5e4ae2e28c6d0c65895c9e36ea602043bbd8bda8e4978a7f740eb0d6f142453" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.972337 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.974648 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.976294 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.978512 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:14 crc kubenswrapper[4125]: I0312 13:39:14.992613 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.002602 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.005792 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.007538 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.008599 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.009790 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.011108 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.012255 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.014137 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.015563 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.017688 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.018994 4125 status_manager.go:853] "Failed to get status for pod" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/pods/openshift-config-operator-77658b5b66-dq5sc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.020072 4125 status_manager.go:853] "Failed to get status for pod" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" pod="openshift-network-operator/network-operator-767c585db5-zd56b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/pods/network-operator-767c585db5-zd56b\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.021072 4125 status_manager.go:853] "Failed to get status for pod" podUID="71af81a9-7d43-49b2-9287-c375900aa905" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.022074 4125 status_manager.go:853] "Failed to get status for pod" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-584c5db66f-kcmc9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.023088 4125 status_manager.go:853] "Failed to get status for pod" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" pod="openshift-kube-scheduler/installer-7-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/installer-7-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.023999 4125 status_manager.go:853] "Failed to get status for pod" podUID="6268b7fe-8910-4505-b404-6f1df638105c" pod="openshift-console/downloads-65476884b9-9wcvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-65476884b9-9wcvx\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.025069 4125 status_manager.go:853] "Failed to get status for pod" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/pods/service-ca-operator-546b4f8984-pwccz\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.026924 4125 status_manager.go:853] "Failed to get status for pod" podUID="94e8a39ea660d88d01c6db5ba5e6d884" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.028483 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.029658 4125 status_manager.go:853] "Failed to get status for pod" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/pods/openshift-controller-manager-operator-7978d7d7f6-2nt8z\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.030708 4125 status_manager.go:853] "Failed to get status for pod" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" pod="openshift-network-node-identity/network-node-identity-7xghp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-7xghp\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.031611 4125 status_manager.go:853] "Failed to get status for pod" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/pods/ingress-operator-7d46d5bb6d-rrg6t\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.033223 4125 status_manager.go:853] "Failed to get status for pod" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" pod="openshift-apiserver/apiserver-67cbf64bc9-fq4m9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/pods/apiserver-67cbf64bc9-fq4m9\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.034472 4125 status_manager.go:853] "Failed to get status for pod" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.035555 4125 status_manager.go:853] "Failed to get status for pod" podUID="35c093da-a468-44a1-8ff0-09b09268828c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.036609 4125 status_manager.go:853] "Failed to get status for pod" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" pod="openshift-image-registry/image-registry-7dc8587b5-4h2pb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-7dc8587b5-4h2pb\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.037872 4125 status_manager.go:853] "Failed to get status for pod" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" pod="openshift-kube-controller-manager/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/installer-9-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.038998 4125 status_manager.go:853] "Failed to get status for pod" podUID="bf055e84f32193b9c1c21b0c34a61f01" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.039909 4125 status_manager.go:853] "Failed to get status for pod" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/pods/kube-apiserver-operator-78d54458c4-sc8h7\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.040531 4125 status_manager.go:853] "Failed to get status for pod" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" pod="openshift-image-registry/image-registry-86594ff457-6b77x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-86594ff457-6b77x\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.041231 4125 status_manager.go:853] "Failed to get status for pod" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/revision-pruner-10-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.041912 4125 status_manager.go:853] "Failed to get status for pod" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/cluster-image-registry-operator-7769bd8d7d-q5cvv\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:39:15 crc kubenswrapper[4125]: E0312 13:39:15.099506 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-operator/events/network-operator-767c585db5-zd56b.189c1b0466b4d6e0\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{network-operator-767c585db5-zd56b.189c1b0466b4d6e0 openshift-network-operator 25330 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-operator,Name:network-operator-767c585db5-zd56b,UID:cc291782-27d2-4a74-af79-c7dcb31535d2,APIVersion:v1,ResourceVersion:23773,FieldPath:spec.containers{network-operator},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:01d2cb35b80655d65c4b64e2298483814e2abac94eef5497089ee1e03234f4fc\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:27:38 +0000 UTC,LastTimestamp:2026-03-12 13:38:00.642485817 +0000 UTC m=+1050.965871706,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.930389 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.932403 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.980568 4125 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 12 13:39:15 crc kubenswrapper[4125]: I0312 13:39:15.980732 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.086236 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.087076 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.597804 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.597940 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.727277 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.847678 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:39:23 crc kubenswrapper[4125]: I0312 13:39:23.847781 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:39:24 crc kubenswrapper[4125]: I0312 13:39:24.071438 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/0.log" Mar 12 13:39:24 crc kubenswrapper[4125]: I0312 13:39:24.071544 4125 generic.go:334] "Generic (PLEG): container finished" podID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerID="2e4460ec3c54f3b8ff53d17aae59a9884a7b46f8168d7c7ea0ebcb59478846ce" exitCode=0 Mar 12 13:39:24 crc kubenswrapper[4125]: I0312 13:39:24.071584 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerDied","Data":"2e4460ec3c54f3b8ff53d17aae59a9884a7b46f8168d7c7ea0ebcb59478846ce"} Mar 12 13:39:24 crc kubenswrapper[4125]: I0312 13:39:24.072500 4125 scope.go:117] "RemoveContainer" containerID="2e4460ec3c54f3b8ff53d17aae59a9884a7b46f8168d7c7ea0ebcb59478846ce" Mar 12 13:39:27 crc kubenswrapper[4125]: E0312 13:39:27.663438 4125 reflector.go:147] object-"openshift-apiserver"/"openshift-service-ca.crt": Failed to watch *v1.ConfigMap: unknown (get configmaps) Mar 12 13:39:27 crc kubenswrapper[4125]: E0312 13:39:27.813311 4125 reflector.go:147] object-"openshift-apiserver"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: unknown (get configmaps) Mar 12 13:39:28 crc kubenswrapper[4125]: E0312 13:39:28.264534 4125 reflector.go:147] object-"openshift-apiserver"/"trusted-ca-bundle": Failed to watch *v1.ConfigMap: unknown (get configmaps) Mar 12 13:39:28 crc kubenswrapper[4125]: E0312 13:39:28.639667 4125 reflector.go:147] object-"openshift-apiserver"/"etcd-client": Failed to watch *v1.Secret: unknown (get secrets) Mar 12 13:39:31 crc kubenswrapper[4125]: I0312 13:39:31.433647 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Pending" Mar 12 13:39:31 crc kubenswrapper[4125]: I0312 13:39:31.433773 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:39:31 crc kubenswrapper[4125]: I0312 13:39:31.433801 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:39:31 crc kubenswrapper[4125]: I0312 13:39:31.433889 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Pending" Mar 12 13:39:31 crc kubenswrapper[4125]: I0312 13:39:31.433928 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:39:31 crc kubenswrapper[4125]: I0312 13:39:31.433960 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" status="Running" Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.089003 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.089102 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.277361 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.277438 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.599588 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.600038 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.847885 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:39:33 crc kubenswrapper[4125]: I0312 13:39:33.847975 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:39:43 crc kubenswrapper[4125]: I0312 13:39:43.086623 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:39:43 crc kubenswrapper[4125]: I0312 13:39:43.087284 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:39:43 crc kubenswrapper[4125]: I0312 13:39:43.605416 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:39:43 crc kubenswrapper[4125]: I0312 13:39:43.605531 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:39:43 crc kubenswrapper[4125]: I0312 13:39:43.849112 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:39:43 crc kubenswrapper[4125]: I0312 13:39:43.849340 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:39:45 crc kubenswrapper[4125]: E0312 13:39:45.199749 4125 reflector.go:147] object-"openshift-apiserver"/"serving-cert": Failed to watch *v1.Secret: unknown (get secrets) Mar 12 13:39:45 crc kubenswrapper[4125]: I0312 13:39:45.385334 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-5d9b995f6b-fcgd7_71af81a9-7d43-49b2-9287-c375900aa905/kube-scheduler-operator-container/1.log" Mar 12 13:39:45 crc kubenswrapper[4125]: I0312 13:39:45.387099 4125 generic.go:334] "Generic (PLEG): container finished" podID="71af81a9-7d43-49b2-9287-c375900aa905" containerID="254ad9a98529033932dc1b9c446efaa247d53e9d673f4d28116134c8c0e44635" exitCode=255 Mar 12 13:39:45 crc kubenswrapper[4125]: I0312 13:39:45.387171 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerDied","Data":"254ad9a98529033932dc1b9c446efaa247d53e9d673f4d28116134c8c0e44635"} Mar 12 13:39:45 crc kubenswrapper[4125]: I0312 13:39:45.388092 4125 scope.go:117] "RemoveContainer" containerID="254ad9a98529033932dc1b9c446efaa247d53e9d673f4d28116134c8c0e44635" Mar 12 13:39:45 crc kubenswrapper[4125]: E0312 13:39:45.388774 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler-operator-container\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-scheduler-operator-container pod=openshift-kube-scheduler-operator-5d9b995f6b-fcgd7_openshift-kube-scheduler-operator(71af81a9-7d43-49b2-9287-c375900aa905)\"" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.319587 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-apiserver_apiserver-67cbf64bc9-mtx25_23eb88d6-6aea-4542-a2b9-8f3fd106b4ab/openshift-apiserver/0.log" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.322956 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.444788 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-apiserver_apiserver-67cbf64bc9-mtx25_23eb88d6-6aea-4542-a2b9-8f3fd106b4ab/openshift-apiserver/0.log" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.445917 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-67cbf64bc9-mtx25" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.471605 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-node-pullsecrets\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.471755 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.471797 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.471913 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.471966 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.471990 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472026 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472080 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit-dir\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472117 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472166 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472243 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") pod \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\" (UID: \"23eb88d6-6aea-4542-a2b9-8f3fd106b4ab\") " Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472286 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.472517 4125 reconciler_common.go:300] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.474149 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config" (OuterVolumeSpecName: "config") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.474260 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.474653 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.475502 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.475933 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.476333 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit" (OuterVolumeSpecName: "audit") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.484873 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.484903 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.484888 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.486267 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9" (OuterVolumeSpecName: "kube-api-access-r8qj9") pod "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" (UID: "23eb88d6-6aea-4542-a2b9-8f3fd106b4ab"). InnerVolumeSpecName "kube-api-access-r8qj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.566357 4125 scope.go:117] "RemoveContainer" containerID="ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573715 4125 reconciler_common.go:300] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573764 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573785 4125 reconciler_common.go:300] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-client\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573798 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573854 4125 reconciler_common.go:300] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-image-import-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573873 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-r8qj9\" (UniqueName: \"kubernetes.io/projected/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-kube-api-access-r8qj9\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573883 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573893 4125 reconciler_common.go:300] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573904 4125 reconciler_common.go:300] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-audit-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.573914 4125 reconciler_common.go:300] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab-encryption-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:39:51 crc kubenswrapper[4125]: E0312 13:39:51.593962 4125 remote_runtime.go:193] "RunPodSandbox from runtime service failed" err=< Mar 12 13:39:51 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4" Netns:"/var/run/netns/c38de595-f48c-4b69-b465-f556b7d1e6e2" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:39:51 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:39:51 crc kubenswrapper[4125]: > Mar 12 13:39:51 crc kubenswrapper[4125]: E0312 13:39:51.594025 4125 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Mar 12 13:39:51 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4" Netns:"/var/run/netns/c38de595-f48c-4b69-b465-f556b7d1e6e2" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:39:51 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:39:51 crc kubenswrapper[4125]: > pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:39:51 crc kubenswrapper[4125]: E0312 13:39:51.594909 4125 kuberuntime_manager.go:1172] "CreatePodSandbox for pod failed" err=< Mar 12 13:39:51 crc kubenswrapper[4125]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4" Netns:"/var/run/netns/c38de595-f48c-4b69-b465-f556b7d1e6e2" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:39:51 crc kubenswrapper[4125]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 12 13:39:51 crc kubenswrapper[4125]: > pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:39:51 crc kubenswrapper[4125]: E0312 13:39:51.595024 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-6499cf79cf-qdfbh_openshift-authentication(b61ce6b0-a70f-42b7-9435-3d6acba81ccf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-6499cf79cf-qdfbh_openshift-authentication(b61ce6b0-a70f-42b7-9435-3d6acba81ccf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-6499cf79cf-qdfbh_openshift-authentication_b61ce6b0-a70f-42b7-9435-3d6acba81ccf_0(4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4): error adding pod openshift-authentication_oauth-openshift-6499cf79cf-qdfbh to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4\\\" Netns:\\\"/var/run/netns/c38de595-f48c-4b69-b465-f556b7d1e6e2\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-6499cf79cf-qdfbh;K8S_POD_INFRA_CONTAINER_ID=4c769717e00f62be96f9d71b7d558b6ab9d768933903df19a5e64a56b1c3c3c4;K8S_POD_UID=b61ce6b0-a70f-42b7-9435-3d6acba81ccf\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh] networking: Multus: [openshift-authentication/oauth-openshift-6499cf79cf-qdfbh/b61ce6b0-a70f-42b7-9435-3d6acba81ccf]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-6499cf79cf-qdfbh in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-6499cf79cf-qdfbh?timeout=1m0s\\\": dial tcp 192.168.130.11:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.779751 4125 scope.go:117] "RemoveContainer" containerID="f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532" Mar 12 13:39:51 crc kubenswrapper[4125]: E0312 13:39:51.909465 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23eb88d6_6aea_4542_a2b9_8f3fd106b4ab.slice\": RecentStats: unable to find data in memory cache]" Mar 12 13:39:51 crc kubenswrapper[4125]: I0312 13:39:51.950735 4125 scope.go:117] "RemoveContainer" containerID="968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.052249 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23eb88d6-6aea-4542-a2b9-8f3fd106b4ab" path="/var/lib/kubelet/pods/23eb88d6-6aea-4542-a2b9-8f3fd106b4ab/volumes" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.204218 4125 scope.go:117] "RemoveContainer" containerID="331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.206294 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f\": container with ID starting with 331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f not found: ID does not exist" containerID="331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.206376 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f"} err="failed to get container status \"331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f\": rpc error: code = NotFound desc = could not find container \"331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f\": container with ID starting with 331cd522a741132d2ba5cfd6898d2f3a51ebb7bb6ffc904640fc88f794a6b04f not found: ID does not exist" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.206392 4125 scope.go:117] "RemoveContainer" containerID="7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.206970 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a\": container with ID starting with 7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a not found: ID does not exist" containerID="7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.207046 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a"} err="failed to get container status \"7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a\": rpc error: code = NotFound desc = could not find container \"7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a\": container with ID starting with 7198e782ffd84469f5c9a20e64b955eda7314fd7a50fe925df9a1b668c44440a not found: ID does not exist" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.207092 4125 scope.go:117] "RemoveContainer" containerID="f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.210689 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532\": container with ID starting with f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532 not found: ID does not exist" containerID="f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.210728 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532"} err="failed to get container status \"f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532\": rpc error: code = NotFound desc = could not find container \"f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532\": container with ID starting with f39cc14c487919c2055acf08aad27f3ba5937cc4805361e98489e139c0a43532 not found: ID does not exist" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.210743 4125 scope.go:117] "RemoveContainer" containerID="1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.213184 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\": container with ID starting with 1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf not found: ID does not exist" containerID="1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.213258 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf"} err="failed to get container status \"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\": rpc error: code = NotFound desc = could not find container \"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\": container with ID starting with 1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf not found: ID does not exist" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.213272 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.214148 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\": container with ID starting with cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba not found: ID does not exist" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.214173 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba"} err="failed to get container status \"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\": rpc error: code = NotFound desc = could not find container \"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\": container with ID starting with cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba not found: ID does not exist" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.214183 4125 scope.go:117] "RemoveContainer" containerID="968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.431056 4125 scope.go:117] "RemoveContainer" containerID="5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.431433 4125 remote_runtime.go:385] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-apiserver-insecure-readyz_kube-apiserver-crc_openshift-kube-apiserver_53c1db1508241fbac1bedf9130341ffe_0 in pod sandbox 697b4a85368117d1d37e0a159aa6f0d0120abacb377ede75707265f996f54fcf from index: no such id: '968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291'" containerID="968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.431478 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291"} err="rpc error: code = Unknown desc = failed to delete container k8s_kube-apiserver-insecure-readyz_kube-apiserver-crc_openshift-kube-apiserver_53c1db1508241fbac1bedf9130341ffe_0 in pod sandbox 697b4a85368117d1d37e0a159aa6f0d0120abacb377ede75707265f996f54fcf from index: no such id: '968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291'" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.431488 4125 scope.go:117] "RemoveContainer" containerID="462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.436410 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\": container with ID starting with 462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45 not found: ID does not exist" containerID="462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.436457 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45"} err="failed to get container status \"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\": rpc error: code = NotFound desc = could not find container \"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\": container with ID starting with 462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45 not found: ID does not exist" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.436469 4125 scope.go:117] "RemoveContainer" containerID="5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.489268 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/0.log" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.490334 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"9c3e595db3b1fe865bb35c919363f1aa729037b3064769f8a26dd8e09daa4bf4"} Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.559630 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" event={"ID":"0f394926-bdb9-425c-b36e-264d7fd34550","Type":"ContainerStarted","Data":"800c4981596d8a23ac050f28ba7aba12a608c921258584f6805b3996f4c2d014"} Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.587497 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" event={"ID":"b54e8941-2fc4-432a-9e51-39684df9089e","Type":"ContainerStarted","Data":"f24a4f61565dcc1fbe4db711caff06ad56bc0b357a56c8214532d739d8441d1b"} Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.711687 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/1.log" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.738082 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"65798658ef7013bf85ec95fa390973b92dfcdc72f2ecfa0156e34df0be1d1a43"} Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.739417 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:39:52 crc kubenswrapper[4125]: E0312 13:39:52.823275 4125 remote_runtime.go:385] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-apiserver-cert-syncer_kube-apiserver-crc_openshift-kube-apiserver_53c1db1508241fbac1bedf9130341ffe_0 in pod sandbox 697b4a85368117d1d37e0a159aa6f0d0120abacb377ede75707265f996f54fcf from index: no such id: '5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b'" containerID="5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.823523 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b"} err="rpc error: code = Unknown desc = failed to delete container k8s_kube-apiserver-cert-syncer_kube-apiserver-crc_openshift-kube-apiserver_53c1db1508241fbac1bedf9130341ffe_0 in pod sandbox 697b4a85368117d1d37e0a159aa6f0d0120abacb377ede75707265f996f54fcf from index: no such id: '5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b'" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.823546 4125 scope.go:117] "RemoveContainer" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.823650 4125 scope.go:117] "RemoveContainer" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.833876 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" event={"ID":"6d67253e-2acd-4bc1-8185-793587da4f17","Type":"ContainerStarted","Data":"3a8ae18182948d6b617a734a20a67c608bc544de4bf57a4119de1e2782bcf6d9"} Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.854735 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-7xghp_51a02bbf-2d40-4f84-868a-d399ea18a846/approver/0.log" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.855804 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"6a8a5c8335355b41fd4861cd71f0b58cc3a9115d1a5f739dbc7a8e5e82e329fc"} Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.886279 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/1.log" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.899598 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/0.log" Mar 12 13:39:52 crc kubenswrapper[4125]: I0312 13:39:52.899912 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"39ebdae230cdbbdee5d4d29f2b22052e045fb4e9bdb1ddc921c70774a4858df5"} Mar 12 13:39:53 crc kubenswrapper[4125]: E0312 13:39:53.307469 4125 remote_runtime.go:385] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-apiserver_kube-apiserver-crc_openshift-kube-apiserver_53c1db1508241fbac1bedf9130341ffe_0 in pod sandbox 697b4a85368117d1d37e0a159aa6f0d0120abacb377ede75707265f996f54fcf from index: no such id: '8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee'" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.310321 4125 scope.go:117] "RemoveContainer" containerID="ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408" Mar 12 13:39:53 crc kubenswrapper[4125]: E0312 13:39:53.311294 4125 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-apiserver_kube-apiserver-crc_openshift-kube-apiserver_53c1db1508241fbac1bedf9130341ffe_0 in pod sandbox 697b4a85368117d1d37e0a159aa6f0d0120abacb377ede75707265f996f54fcf from index: no such id: '8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee'" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" Mar 12 13:39:53 crc kubenswrapper[4125]: E0312 13:39:53.326588 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\": container with ID starting with ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408 not found: ID does not exist" containerID="ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.326674 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408"} err="failed to get container status \"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\": rpc error: code = NotFound desc = could not find container \"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\": container with ID starting with ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408 not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.326692 4125 scope.go:117] "RemoveContainer" containerID="1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.336401 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf"} err="failed to get container status \"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\": rpc error: code = NotFound desc = could not find container \"1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf\": container with ID starting with 1be50879152883465df5eb451914bc0e06cb9ccbc1eb820173c8e94c91eaeadf not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.336458 4125 scope.go:117] "RemoveContainer" containerID="cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.347132 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba"} err="failed to get container status \"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\": rpc error: code = NotFound desc = could not find container \"cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba\": container with ID starting with cb43122bebbce5f78ca708dbf991e32579a493e20b21e81266430acd2d89ddba not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.347575 4125 scope.go:117] "RemoveContainer" containerID="968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" Mar 12 13:39:53 crc kubenswrapper[4125]: E0312 13:39:53.349055 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\": container with ID starting with 968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291 not found: ID does not exist" containerID="968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.349111 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291"} err="failed to get container status \"968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\": rpc error: code = NotFound desc = could not find container \"968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291\": container with ID starting with 968b417a75008522a2e5ac73755a44ac11b254af91c6c808a299393d3f0c9291 not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.349124 4125 scope.go:117] "RemoveContainer" containerID="462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.355629 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45"} err="failed to get container status \"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\": rpc error: code = NotFound desc = could not find container \"462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45\": container with ID starting with 462dad29efb764f880ae77c1f23b85bb8e0de2d5b3e9cfe150c94ae1ede12b45 not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.355673 4125 scope.go:117] "RemoveContainer" containerID="5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" Mar 12 13:39:53 crc kubenswrapper[4125]: E0312 13:39:53.356043 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\": container with ID starting with 5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b not found: ID does not exist" containerID="5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.356090 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b"} err="failed to get container status \"5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\": rpc error: code = NotFound desc = could not find container \"5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b\": container with ID starting with 5a36b79a0d3540ec3af07d60e18bf608f9f76e68e45405aa47e17715f17e3a3b not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.356104 4125 scope.go:117] "RemoveContainer" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" Mar 12 13:39:53 crc kubenswrapper[4125]: E0312 13:39:53.363690 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\": container with ID starting with 8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee not found: ID does not exist" containerID="8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.363789 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee"} err="failed to get container status \"8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\": rpc error: code = NotFound desc = could not find container \"8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee\": container with ID starting with 8dfca2dd66951d9196c1dd2b6e89e088e04fce424b44140a7177c34cc19d64ee not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.363935 4125 scope.go:117] "RemoveContainer" containerID="ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.365913 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408"} err="failed to get container status \"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\": rpc error: code = NotFound desc = could not find container \"ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408\": container with ID starting with ee044e4c487fc69b38fb3c08a02d94250804e918d80d431d7e4cd8c60ddea408 not found: ID does not exist" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.365952 4125 scope.go:117] "RemoveContainer" containerID="78c792e326186d5224a3a883603f4948e9553db7e6abf9de8146d154ac958b88" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.598612 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.598687 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.658034 4125 scope.go:117] "RemoveContainer" containerID="ac3dfa4ed7931e5462fcdd7627e4282ba7aa0fd1c33cc8f485d6bfc2ea90ad2b" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.847956 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.848036 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.900679 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.901011 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.901709 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:39:53 crc kubenswrapper[4125]: I0312 13:39:53.984642 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" event={"ID":"dd2f98e4-4656-4c95-8c6f-5959bd9f876a","Type":"ContainerStarted","Data":"00fc1a49be6f6ce49f26a0d690991ef4f410f9799447b97d80d35ade0960dd3a"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.006060 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerStarted","Data":"ae54151489cfc34992d37ed0b8ac57e695a64170c18915ba01de6399209ce42d"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.051626 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.078370 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"ea07787c394e1c3186bac25c2eee0e2f38fc50a7ed36572d001b610121ef0def"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.091981 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.092073 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.130403 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/0.log" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.130528 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.131791 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.135092 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.135152 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.185678 4125 scope.go:117] "RemoveContainer" containerID="94803870c39c663aac3c4df56ed06883072a310c09c31b80ac8c3f4c99915832" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.186158 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerStarted","Data":"25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.249451 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-network-operator_network-operator-767c585db5-zd56b_cc291782-27d2-4a74-af79-c7dcb31535d2/network-operator/1.log" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.250255 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerStarted","Data":"704a66f9a93dd37ad7a56d794e85a1023dbfdc028ea7a2ce3c15244cc372fc1d"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.262666 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerStarted","Data":"d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7"} Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.325765 4125 scope.go:117] "RemoveContainer" containerID="9c2dc36f0c3ee44e1f4c7d09861313a36668b4d06de91c2b5855b11f060bbb11" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.431148 4125 scope.go:117] "RemoveContainer" containerID="33070182a8af3183056bec261fa194a186f80ccef2f2ce51e6f26ee610e4ec86" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.468978 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.565869 4125 scope.go:117] "RemoveContainer" containerID="040cb9f62a36a88ec3355c60a27eaf5953d6fd9f6f7f113859993a305d3ba2e6" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.683259 4125 scope.go:117] "RemoveContainer" containerID="03986766a13ed650c14fd79f9e5b20f3fe8b23aa47c6521ddf17adfb8b570506" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.748235 4125 scope.go:117] "RemoveContainer" containerID="fad9cea35e0ebc15a97c9f69995245b3677742d9f62c2632e4840da1eef2d0a3" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.835796 4125 scope.go:117] "RemoveContainer" containerID="9abe5af36bb650e822fc8617d763cb1ac72e09098227d9187b59e949bcd51a26" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.883587 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.890805 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:39:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:39:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:39:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.890960 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.893444 4125 scope.go:117] "RemoveContainer" containerID="e626fe31e9fc0d6f2e34cae2f75ebd1df96daffc967397efe4465fb73926e0dd" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.901914 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.902003 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:54 crc kubenswrapper[4125]: I0312 13:39:54.965965 4125 scope.go:117] "RemoveContainer" containerID="606590d421466d5fb63038809a2ecba9accc142178b68d087c9ed02dfcf80ca8" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.101432 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.101537 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.288979 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.302546 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerStarted","Data":"740bf76161fec26f5a24bce398a3932f4dddc3105a1392e7362ce9200176a764"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.308800 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-network-operator_network-operator-767c585db5-zd56b_cc291782-27d2-4a74-af79-c7dcb31535d2/network-operator/1.log" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.315258 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.315318 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.318978 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerStarted","Data":"bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.323669 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" event={"ID":"ed024e5d-8fc2-4c22-803d-73f3c9795f19","Type":"ContainerStarted","Data":"95b905498fc7b69a4cbcfdba75595babc1bf4a5d3b7902ad547352bd2c3bb523"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.328383 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerStarted","Data":"0f6e5f54ac6d5074ad57349154715c6834d0d6a57a215eb365926d06bde34837"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.334364 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerStarted","Data":"ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.338062 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/1.log" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.347051 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.348144 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerStarted","Data":"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.349484 4125 scope.go:117] "RemoveContainer" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.361295 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerStarted","Data":"e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.376490 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerStarted","Data":"012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc"} Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.398129 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-5d9b995f6b-fcgd7_71af81a9-7d43-49b2-9287-c375900aa905/kube-scheduler-operator-container/1.log" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.400236 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.903665 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.905800 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.911693 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:39:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:39:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:39:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:39:55 crc kubenswrapper[4125]: I0312 13:39:55.912026 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.261680 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.400114 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.400226 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.413754 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.414960 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"94e8a39ea660d88d01c6db5ba5e6d884","Type":"ContainerStarted","Data":"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca"} Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.430972 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerStarted","Data":"b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f"} Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.433591 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.433697 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.906639 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.906742 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.989413 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:39:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:39:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:39:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:39:56 crc kubenswrapper[4125]: I0312 13:39:56.989542 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.174177 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.174297 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.174362 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.174388 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.401297 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.401398 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.424312 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.444056 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"48128e8d38b5cbcd2691da698bd9cac3","Type":"ContainerStarted","Data":"dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4"} Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.445489 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.449354 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.451736 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.617519 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.984978 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:39:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:39:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:39:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.985348 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:39:57 crc kubenswrapper[4125]: I0312 13:39:57.991556 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.264396 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.402169 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.402281 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.403011 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.883367 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.967928 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:39:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:39:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:39:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.968036 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.987388 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Mar 12 13:39:58 crc kubenswrapper[4125]: I0312 13:39:58.994994 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.026127 4125 scope.go:117] "RemoveContainer" containerID="254ad9a98529033932dc1b9c446efaa247d53e9d673f4d28116134c8c0e44635" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.073743 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.073921 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.073949 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.235971 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.398307 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.400259 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.510650 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.655861 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.942638 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:39:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:39:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:39:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:39:59 crc kubenswrapper[4125]: I0312 13:39:59.942804 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.173524 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.173580 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.174601 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.174656 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.174746 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.175740 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"65798658ef7013bf85ec95fa390973b92dfcdc72f2ecfa0156e34df0be1d1a43"} pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.175848 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" containerID="cri-o://65798658ef7013bf85ec95fa390973b92dfcdc72f2ecfa0156e34df0be1d1a43" gracePeriod=30 Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.189996 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.190075 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.190110 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.190337 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.190483 4125 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" start-of-body= Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.190567 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.282460 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.307520 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": read tcp 10.217.0.2:40954->10.217.0.23:8443: read: connection reset by peer" start-of-body= Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.307767 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": read tcp 10.217.0.2:40954->10.217.0.23:8443: read: connection reset by peer" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.546361 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/2.log" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.548160 4125 generic.go:334] "Generic (PLEG): container finished" podID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerID="a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904" exitCode=1 Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.548420 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerDied","Data":"a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904"} Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.548478 4125 scope.go:117] "RemoveContainer" containerID="2e4460ec3c54f3b8ff53d17aae59a9884a7b46f8168d7c7ea0ebcb59478846ce" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.549536 4125 scope.go:117] "RemoveContainer" containerID="a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904" Mar 12 13:40:00 crc kubenswrapper[4125]: E0312 13:40:00.550165 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-8b455464d-f9xdt_openshift-marketplace(3482be94-0cdb-4e2a-889b-e5fac59fdbf5)\"" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.563882 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.644883 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.687040 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.891263 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:00 crc kubenswrapper[4125]: I0312 13:40:00.891499 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.015656 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-6sd5l" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.357450 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.576046 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/3.log" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.577572 4125 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="65798658ef7013bf85ec95fa390973b92dfcdc72f2ecfa0156e34df0be1d1a43" exitCode=255 Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.577654 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"65798658ef7013bf85ec95fa390973b92dfcdc72f2ecfa0156e34df0be1d1a43"} Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.577695 4125 scope.go:117] "RemoveContainer" containerID="228ac9185808ee44c5a4ed3c8b89e711041207001e85bfb9f027be017a3c4b12" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.581909 4125 generic.go:334] "Generic (PLEG): container finished" podID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerID="cfeb08a81fd6a8dca2eb53f6d7ae1bc9af487d431a775664a7d4cf86980cf131" exitCode=0 Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.582033 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" event={"ID":"4e18989a-5a3c-4b45-8821-4b91287eaf1e","Type":"ContainerDied","Data":"cfeb08a81fd6a8dca2eb53f6d7ae1bc9af487d431a775664a7d4cf86980cf131"} Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.582912 4125 scope.go:117] "RemoveContainer" containerID="cfeb08a81fd6a8dca2eb53f6d7ae1bc9af487d431a775664a7d4cf86980cf131" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.598296 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-5d9b995f6b-fcgd7_71af81a9-7d43-49b2-9287-c375900aa905/kube-scheduler-operator-container/1.log" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.598485 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerStarted","Data":"ef5f8f1f18fee9058fcce1db6edf32bd34486530eb09149694b2217baea305db"} Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.600708 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/2.log" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.731321 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.737680 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.871950 4125 kubelet.go:1922] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.889556 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:01 crc kubenswrapper[4125]: I0312 13:40:01.889679 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.175786 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.176367 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.610688 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/3.log" Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.612887 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a"} Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.613620 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.613704 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.617908 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.768517 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.886889 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:02 crc kubenswrapper[4125]: I0312 13:40:02.886999 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.181877 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.277214 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.278083 4125 scope.go:117] "RemoveContainer" containerID="a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904" Mar 12 13:40:03 crc kubenswrapper[4125]: E0312 13:40:03.279537 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-8b455464d-f9xdt_openshift-marketplace(3482be94-0cdb-4e2a-889b-e5fac59fdbf5)\"" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.362775 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.598255 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.598663 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.640213 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-649bd778b4-tt5tw_45a8038e-e7f2-4d93-a6f5-7753aa54e63f/control-plane-machine-set-operator/0.log" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.641228 4125 generic.go:334] "Generic (PLEG): container finished" podID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" containerID="9e4c3b4d22fca7e9ce4866f1e217758bde1b5d74c62a8896e7c1f725f4a8f102" exitCode=1 Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.641412 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" event={"ID":"45a8038e-e7f2-4d93-a6f5-7753aa54e63f","Type":"ContainerDied","Data":"9e4c3b4d22fca7e9ce4866f1e217758bde1b5d74c62a8896e7c1f725f4a8f102"} Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.644520 4125 scope.go:117] "RemoveContainer" containerID="9e4c3b4d22fca7e9ce4866f1e217758bde1b5d74c62a8896e7c1f725f4a8f102" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.647785 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" event={"ID":"4e18989a-5a3c-4b45-8821-4b91287eaf1e","Type":"ContainerStarted","Data":"e7f4bd1311eab3293265200b4c1c614a72838739548ea0684385195613366dc3"} Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.647905 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.647991 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.848460 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.848588 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.908038 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:03 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:03 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:03 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:03 crc kubenswrapper[4125]: I0312 13:40:03.908112 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.086458 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.086551 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.086617 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.086636 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.667032 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-649bd778b4-tt5tw_45a8038e-e7f2-4d93-a6f5-7753aa54e63f/control-plane-machine-set-operator/0.log" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.668750 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.668789 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.669277 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" event={"ID":"45a8038e-e7f2-4d93-a6f5-7753aa54e63f","Type":"ContainerStarted","Data":"2b8d7d11bf77c1915d1943d3a76b30286a69d3218f83e28ff4e534447f1e7828"} Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.669943 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.671008 4125 patch_prober.go:28] interesting pod/controller-manager-7559d9b74c-lxhxw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": dial tcp 10.217.0.68:8443: connect: connection refused" start-of-body= Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.671451 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": dial tcp 10.217.0.68:8443: connect: connection refused" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.744417 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.825958 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.884385 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.904080 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:04 crc kubenswrapper[4125]: I0312 13:40:04.904251 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.010455 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.024699 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.025765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.476988 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.669967 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.673631 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.674001 4125 patch_prober.go:28] interesting pod/controller-manager-7559d9b74c-lxhxw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": dial tcp 10.217.0.68:8443: connect: connection refused" start-of-body= Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.674485 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": dial tcp 10.217.0.68:8443: connect: connection refused" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.674095 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.674531 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.808028 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 12 13:40:05 crc kubenswrapper[4125]: I0312 13:40:05.962031 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.046538 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.046623 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.174867 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.175003 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.278033 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.444993 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"webhook-serving-cert" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.674775 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.675265 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.681287 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.932313 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.932690 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.940164 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:06 crc kubenswrapper[4125]: I0312 13:40:06.940359 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:07 crc kubenswrapper[4125]: I0312 13:40:07.605041 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:40:07 crc kubenswrapper[4125]: I0312 13:40:07.781712 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Mar 12 13:40:07 crc kubenswrapper[4125]: I0312 13:40:07.887441 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:07 crc kubenswrapper[4125]: I0312 13:40:07.888056 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:08 crc kubenswrapper[4125]: I0312 13:40:08.164644 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Mar 12 13:40:08 crc kubenswrapper[4125]: I0312 13:40:08.678071 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:40:08 crc kubenswrapper[4125]: I0312 13:40:08.826673 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Mar 12 13:40:08 crc kubenswrapper[4125]: I0312 13:40:08.890875 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:08 crc kubenswrapper[4125]: I0312 13:40:08.891020 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:08 crc kubenswrapper[4125]: I0312 13:40:08.952628 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.032999 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.098585 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.099692 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.099732 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.174594 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.174715 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.174783 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.174861 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.374901 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.702790 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.823060 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.847700 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.889702 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:09 crc kubenswrapper[4125]: I0312 13:40:09.889782 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.253721 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.267148 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.479398 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.633633 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.691913 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.730571 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.752746 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.890256 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.890651 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:10 crc kubenswrapper[4125]: I0312 13:40:10.947559 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.219771 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.310898 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.432377 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.474394 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.840445 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" event={"ID":"b61ce6b0-a70f-42b7-9435-3d6acba81ccf","Type":"ContainerStarted","Data":"d1ec317e62fc1d13842bf4827f5df1d5c0a5be989a45848fa20348d2a1aac0c1"} Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.891232 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:11 crc kubenswrapper[4125]: I0312 13:40:11.892582 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.174996 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.175333 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.176623 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.176659 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.176691 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.178404 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a"} pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.178469 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" containerID="cri-o://5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" gracePeriod=30 Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.220565 4125 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": read tcp 10.217.0.2:37492->10.217.0.23:8443: read: connection reset by peer" start-of-body= Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.225720 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": read tcp 10.217.0.2:37492->10.217.0.23:8443: read: connection reset by peer" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.424090 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:40:12 crc kubenswrapper[4125]: E0312 13:40:12.524939 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod530553aa_0a1d_423e_8a22_f5eb4bdbb883.slice/crio-5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.844543 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.873308 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/4.log" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.875150 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/3.log" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.880089 4125 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" exitCode=255 Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.880254 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a"} Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.880369 4125 scope.go:117] "RemoveContainer" containerID="65798658ef7013bf85ec95fa390973b92dfcdc72f2ecfa0156e34df0be1d1a43" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.887316 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.887374 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.888482 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" event={"ID":"b61ce6b0-a70f-42b7-9435-3d6acba81ccf","Type":"ContainerStarted","Data":"1b67f96870eed12ec75917fda181bfd818ce64944856ebc958a4044239519643"} Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.889016 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.891706 4125 patch_prober.go:28] interesting pod/oauth-openshift-6499cf79cf-qdfbh container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" start-of-body= Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.891767 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" Mar 12 13:40:12 crc kubenswrapper[4125]: I0312 13:40:12.931495 4125 reflector.go:351] Caches populated for *v1.Service from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.147025 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.162782 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-q786x" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.321322 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.576170 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-sv888" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.598916 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.598998 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.630287 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Mar 12 13:40:13 crc kubenswrapper[4125]: E0312 13:40:13.665956 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.843635 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.847671 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.847794 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.886393 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.886511 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.898081 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/4.log" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.899513 4125 scope.go:117] "RemoveContainer" containerID="5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" Mar 12 13:40:13 crc kubenswrapper[4125]: E0312 13:40:13.899993 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.899982 4125 patch_prober.go:28] interesting pod/oauth-openshift-6499cf79cf-qdfbh container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" start-of-body= Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.900042 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" Mar 12 13:40:13 crc kubenswrapper[4125]: I0312 13:40:13.981517 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.031359 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.086929 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.087046 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.087159 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.087362 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.087426 4125 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.091130 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="console-operator" containerStatusID={"Type":"cri-o","ID":"39ebdae230cdbbdee5d4d29f2b22052e045fb4e9bdb1ddc921c70774a4858df5"} pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" containerMessage="Container console-operator failed liveness probe, will be restarted" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.091505 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" containerID="cri-o://39ebdae230cdbbdee5d4d29f2b22052e045fb4e9bdb1ddc921c70774a4858df5" gracePeriod=30 Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.165608 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": read tcp 10.217.0.2:44954->10.217.0.62:8443: read: connection reset by peer" start-of-body= Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.165676 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": read tcp 10.217.0.2:44954->10.217.0.62:8443: read: connection reset by peer" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.236014 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.245723 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.420969 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.840374 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.888593 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.889038 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.916072 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/2.log" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.918519 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/1.log" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.918969 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerDied","Data":"39ebdae230cdbbdee5d4d29f2b22052e045fb4e9bdb1ddc921c70774a4858df5"} Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.919023 4125 scope.go:117] "RemoveContainer" containerID="c6d10a00de1ac306e3b2905f7c86dca8567df9d41f149845495c032671f7caf6" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.919162 4125 generic.go:334] "Generic (PLEG): container finished" podID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerID="39ebdae230cdbbdee5d4d29f2b22052e045fb4e9bdb1ddc921c70774a4858df5" exitCode=255 Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.926369 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.928229 4125 generic.go:334] "Generic (PLEG): container finished" podID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerID="e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810" exitCode=0 Mar 12 13:40:14 crc kubenswrapper[4125]: I0312 13:40:14.928346 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerDied","Data":"e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810"} Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.026289 4125 scope.go:117] "RemoveContainer" containerID="a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904" Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.296521 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="48128e8d38b5cbcd2691da698bd9cac3" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.450322 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.726593 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.890129 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.890256 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:15 crc kubenswrapper[4125]: I0312 13:40:15.948537 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/2.log" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.159630 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.371097 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.886797 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.886961 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.931944 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.932050 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.968525 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-f9xdt_3482be94-0cdb-4e2a-889b-e5fac59fdbf5/marketplace-operator/2.log" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.968672 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f"} Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.975654 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-5dbbc74dc9-cp5cd_e9127708-ccfd-4891-8a3a-f0cacb77e0f4/console-operator/2.log" Mar 12 13:40:16 crc kubenswrapper[4125]: I0312 13:40:16.975738 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"3bf133f9bcf7b769f28b1a75631153dd10fd0ed847750d83a8b21f24ee310174"} Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.396802 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.549799 4125 patch_prober.go:28] interesting pod/oauth-openshift-6499cf79cf-qdfbh container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" start-of-body= Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.549955 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.588081 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.702672 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.895339 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:17 crc kubenswrapper[4125]: I0312 13:40:17.895429 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.006690 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-6499cf79cf-qdfbh_b61ce6b0-a70f-42b7-9435-3d6acba81ccf/oauth-openshift/0.log" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.006864 4125 generic.go:334] "Generic (PLEG): container finished" podID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" containerID="1b67f96870eed12ec75917fda181bfd818ce64944856ebc958a4044239519643" exitCode=255 Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.007041 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" event={"ID":"b61ce6b0-a70f-42b7-9435-3d6acba81ccf","Type":"ContainerDied","Data":"1b67f96870eed12ec75917fda181bfd818ce64944856ebc958a4044239519643"} Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.007724 4125 scope.go:117] "RemoveContainer" containerID="1b67f96870eed12ec75917fda181bfd818ce64944856ebc958a4044239519643" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.013173 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerStarted","Data":"e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61"} Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.013900 4125 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.013981 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.014579 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.016581 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.016705 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.046446 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.059320 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.217721 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.300046 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.887387 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.887730 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:18 crc kubenswrapper[4125]: I0312 13:40:18.983618 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.031631 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-6499cf79cf-qdfbh_b61ce6b0-a70f-42b7-9435-3d6acba81ccf/oauth-openshift/0.log" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.032236 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" event={"ID":"b61ce6b0-a70f-42b7-9435-3d6acba81ccf","Type":"ContainerStarted","Data":"c9056e743df46ddcb39df2a742774a2d4d03a9e8d73ce3e3ef7dd5d68cfa1dff"} Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.032979 4125 patch_prober.go:28] interesting pod/marketplace-operator-8b455464d-f9xdt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.033175 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.13:8080/healthz\": dial tcp 10.217.0.13:8080: connect: connection refused" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.076584 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.328953 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.454151 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.886610 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.887079 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:19 crc kubenswrapper[4125]: I0312 13:40:19.933096 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.077520 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-7874c8775-kh4j9_ec1bae8b-3200-4ad9-b33b-cf8701f3027c/machine-approver-controller/0.log" Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.078516 4125 generic.go:334] "Generic (PLEG): container finished" podID="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" containerID="e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03" exitCode=255 Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.078673 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerDied","Data":"e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03"} Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.080119 4125 scope.go:117] "RemoveContainer" containerID="e103325372d17ce38841fe582b644171d426ad19fa7b1a22060ba823f8148f03" Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.082851 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.296422 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-79vsd" Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.579517 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.885604 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:20 crc kubenswrapper[4125]: I0312 13:40:20.885688 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.050725 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.098556 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-7874c8775-kh4j9_ec1bae8b-3200-4ad9-b33b-cf8701f3027c/machine-approver-controller/0.log" Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.102620 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"c58ce00b4066688db0435b3dea28a95b4d0a00bb0f172e2b38ac325d20a1900a"} Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.189028 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.315695 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.885646 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:21 crc kubenswrapper[4125]: I0312 13:40:21.886025 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:22 crc kubenswrapper[4125]: I0312 13:40:22.057427 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Mar 12 13:40:22 crc kubenswrapper[4125]: I0312 13:40:22.183666 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 12 13:40:22 crc kubenswrapper[4125]: I0312 13:40:22.537668 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Mar 12 13:40:22 crc kubenswrapper[4125]: I0312 13:40:22.799300 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Mar 12 13:40:22 crc kubenswrapper[4125]: I0312 13:40:22.890608 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:22 crc kubenswrapper[4125]: I0312 13:40:22.890730 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.033476 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.040571 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.086729 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.181772 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.284634 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.321266 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.321507 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.396732 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.598676 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.599015 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.620728 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.848536 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.848649 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.887622 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.887865 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:23 crc kubenswrapper[4125]: I0312 13:40:23.944305 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.218046 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.295190 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.307972 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-kpdvz" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.520052 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.679491 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.695568 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.752780 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.885602 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.885700 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:24 crc kubenswrapper[4125]: I0312 13:40:24.891272 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:24 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:24 crc kubenswrapper[4125]: > Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.434178 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.649053 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.680595 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.725990 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.788037 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.802534 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.896605 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:25 crc kubenswrapper[4125]: I0312 13:40:25.896948 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.046316 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.057040 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.143696 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.683076 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.886503 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.888611 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:26 crc kubenswrapper[4125]: I0312 13:40:26.953792 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.025894 4125 scope.go:117] "RemoveContainer" containerID="5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" Mar 12 13:40:27 crc kubenswrapper[4125]: E0312 13:40:27.026379 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.529274 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.712514 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.725703 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.887258 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.887360 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:27 crc kubenswrapper[4125]: I0312 13:40:27.953464 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Mar 12 13:40:28 crc kubenswrapper[4125]: I0312 13:40:28.570642 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Mar 12 13:40:28 crc kubenswrapper[4125]: I0312 13:40:28.608100 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Mar 12 13:40:28 crc kubenswrapper[4125]: I0312 13:40:28.691033 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Mar 12 13:40:28 crc kubenswrapper[4125]: I0312 13:40:28.836538 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Mar 12 13:40:28 crc kubenswrapper[4125]: I0312 13:40:28.887151 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:28 crc kubenswrapper[4125]: I0312 13:40:28.887312 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.192034 4125 generic.go:334] "Generic (PLEG): container finished" podID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerID="bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f" exitCode=0 Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.192148 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerDied","Data":"bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f"} Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.206873 4125 generic.go:334] "Generic (PLEG): container finished" podID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerID="29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f" exitCode=0 Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.206908 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerDied","Data":"29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f"} Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.230692 4125 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.296051 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.547475 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.712863 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.815663 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.887482 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:29 crc kubenswrapper[4125]: I0312 13:40:29.887628 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:30 crc kubenswrapper[4125]: I0312 13:40:30.056246 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Mar 12 13:40:30 crc kubenswrapper[4125]: I0312 13:40:30.472763 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Mar 12 13:40:30 crc kubenswrapper[4125]: I0312 13:40:30.885962 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:30 crc kubenswrapper[4125]: I0312 13:40:30.886056 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.223585 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8"} Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.227878 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerStarted","Data":"b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87"} Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.434359 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.434459 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.434503 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.434536 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" status="Running" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.434573 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.440951 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-twmwc" Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.886402 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:31 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:31 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:31 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:31 crc kubenswrapper[4125]: I0312 13:40:31.886515 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:32 crc kubenswrapper[4125]: I0312 13:40:32.610171 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:32.720115 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:32.828238 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:32.865006 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:32.887670 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:32.887748 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.542067 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.582987 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.596739 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.596939 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.598359 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.598712 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.607344 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.607497 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.848077 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.848155 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.877093 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.895437 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:33 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:33 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:33 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:33 crc kubenswrapper[4125]: I0312 13:40:33.895510 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.119715 4125 reflector.go:351] Caches populated for *v1.CSIDriver from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.326866 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.509301 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:34 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:34 crc kubenswrapper[4125]: > Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.780586 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:34 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:34 crc kubenswrapper[4125]: > Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.798638 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:34 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:34 crc kubenswrapper[4125]: > Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.877088 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.886137 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:34 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:34 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:34 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:34 crc kubenswrapper[4125]: I0312 13:40:34.886520 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:35 crc kubenswrapper[4125]: I0312 13:40:35.359782 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Mar 12 13:40:35 crc kubenswrapper[4125]: I0312 13:40:35.366170 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Mar 12 13:40:35 crc kubenswrapper[4125]: I0312 13:40:35.870231 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 12 13:40:35 crc kubenswrapper[4125]: I0312 13:40:35.891587 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:35 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:35 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:35 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:35 crc kubenswrapper[4125]: I0312 13:40:35.891703 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:35 crc kubenswrapper[4125]: I0312 13:40:35.974047 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Mar 12 13:40:36 crc kubenswrapper[4125]: I0312 13:40:36.195375 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 12 13:40:36 crc kubenswrapper[4125]: I0312 13:40:36.535424 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Mar 12 13:40:36 crc kubenswrapper[4125]: I0312 13:40:36.888537 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:36 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:36 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:36 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:36 crc kubenswrapper[4125]: I0312 13:40:36.888645 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:37 crc kubenswrapper[4125]: I0312 13:40:37.711014 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Mar 12 13:40:37 crc kubenswrapper[4125]: I0312 13:40:37.769112 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Mar 12 13:40:37 crc kubenswrapper[4125]: I0312 13:40:37.891493 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:37 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:37 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:37 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:37 crc kubenswrapper[4125]: I0312 13:40:37.891600 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:37 crc kubenswrapper[4125]: I0312 13:40:37.917781 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Mar 12 13:40:38 crc kubenswrapper[4125]: I0312 13:40:38.887527 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:38 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:38 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:38 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:38 crc kubenswrapper[4125]: I0312 13:40:38.887903 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:39 crc kubenswrapper[4125]: I0312 13:40:39.026070 4125 scope.go:117] "RemoveContainer" containerID="5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" Mar 12 13:40:39 crc kubenswrapper[4125]: E0312 13:40:39.026613 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:40:39 crc kubenswrapper[4125]: I0312 13:40:39.214974 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 12 13:40:39 crc kubenswrapper[4125]: I0312 13:40:39.363804 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Mar 12 13:40:39 crc kubenswrapper[4125]: I0312 13:40:39.835713 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Mar 12 13:40:39 crc kubenswrapper[4125]: I0312 13:40:39.891032 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:39 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:39 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:39 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:39 crc kubenswrapper[4125]: I0312 13:40:39.891281 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.294687 4125 generic.go:334] "Generic (PLEG): container finished" podID="bb917686-edfb-4158-86ad-6fce0abec64c" containerID="740bf76161fec26f5a24bce398a3932f4dddc3105a1392e7362ce9200176a764" exitCode=0 Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.295117 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerDied","Data":"740bf76161fec26f5a24bce398a3932f4dddc3105a1392e7362ce9200176a764"} Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.325164 4125 generic.go:334] "Generic (PLEG): container finished" podID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerID="25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7" exitCode=0 Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.325454 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerDied","Data":"25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7"} Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.342270 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.718089 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-dwn4s" Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.898739 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:40 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:40 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:40 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:40 crc kubenswrapper[4125]: I0312 13:40:40.898918 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:41 crc kubenswrapper[4125]: I0312 13:40:41.085437 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 12 13:40:41 crc kubenswrapper[4125]: I0312 13:40:41.238366 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Mar 12 13:40:41 crc kubenswrapper[4125]: I0312 13:40:41.320913 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Mar 12 13:40:41 crc kubenswrapper[4125]: I0312 13:40:41.891900 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:41 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:41 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:41 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:41 crc kubenswrapper[4125]: I0312 13:40:41.892088 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:42 crc kubenswrapper[4125]: I0312 13:40:42.349325 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerStarted","Data":"73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48"} Mar 12 13:40:42 crc kubenswrapper[4125]: I0312 13:40:42.357992 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerStarted","Data":"20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d"} Mar 12 13:40:42 crc kubenswrapper[4125]: I0312 13:40:42.638537 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Mar 12 13:40:42 crc kubenswrapper[4125]: I0312 13:40:42.885701 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:42 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:42 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:42 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:42 crc kubenswrapper[4125]: I0312 13:40:42.886452 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.174009 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.174105 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.233006 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.534973 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.598074 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.598166 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.705967 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.847738 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.847917 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.869694 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.886600 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:43 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:43 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:43 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:43 crc kubenswrapper[4125]: I0312 13:40:43.886695 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.042751 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.419435 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:44 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:44 crc kubenswrapper[4125]: > Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.505718 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:44 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:44 crc kubenswrapper[4125]: > Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.810275 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.814118 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:44 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:44 crc kubenswrapper[4125]: > Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.837307 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:44 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:44 crc kubenswrapper[4125]: > Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.887650 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:44 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:44 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:44 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:44 crc kubenswrapper[4125]: I0312 13:40:44.888914 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.045302 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.045364 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.184446 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.551631 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.591742 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.667619 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.888424 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:45 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:45 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:45 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:45 crc kubenswrapper[4125]: I0312 13:40:45.889892 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:46 crc kubenswrapper[4125]: I0312 13:40:46.073462 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:40:46 crc kubenswrapper[4125]: I0312 13:40:46.217355 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:46 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:46 crc kubenswrapper[4125]: > Mar 12 13:40:46 crc kubenswrapper[4125]: I0312 13:40:46.680453 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Mar 12 13:40:46 crc kubenswrapper[4125]: I0312 13:40:46.893291 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:46 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:46 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:46 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:46 crc kubenswrapper[4125]: I0312 13:40:46.893474 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.252557 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.289027 4125 reflector.go:351] Caches populated for *v1.Node from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.610571 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.887145 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:47 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:47 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.887344 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.892913 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Mar 12 13:40:47 crc kubenswrapper[4125]: I0312 13:40:47.966147 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Mar 12 13:40:48 crc kubenswrapper[4125]: I0312 13:40:48.515909 4125 reflector.go:351] Caches populated for *v1.RuntimeClass from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:40:48 crc kubenswrapper[4125]: I0312 13:40:48.886445 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:48 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:48 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:48 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:48 crc kubenswrapper[4125]: I0312 13:40:48.886991 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.410750 4125 generic.go:334] "Generic (PLEG): container finished" podID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerID="012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc" exitCode=0 Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.410876 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerDied","Data":"012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc"} Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.645573 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.702064 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.887671 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:49 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:49 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:49 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.887766 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:49 crc kubenswrapper[4125]: I0312 13:40:49.987785 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-58g82" Mar 12 13:40:50 crc kubenswrapper[4125]: I0312 13:40:50.888337 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:50 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:50 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:50 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:50 crc kubenswrapper[4125]: I0312 13:40:50.888884 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.027156 4125 scope.go:117] "RemoveContainer" containerID="5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" Mar 12 13:40:51 crc kubenswrapper[4125]: E0312 13:40:51.027987 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openshift-config-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=openshift-config-operator pod=openshift-config-operator-77658b5b66-dq5sc_openshift-config-operator(530553aa-0a1d-423e-8a22-f5eb4bdbb883)\"" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.429323 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerStarted","Data":"04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306"} Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.439013 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.707871 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.886246 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:51 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:51 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:51 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.886376 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.931191 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-ng44q" Mar 12 13:40:51 crc kubenswrapper[4125]: I0312 13:40:51.983277 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Mar 12 13:40:52 crc kubenswrapper[4125]: I0312 13:40:52.192996 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Mar 12 13:40:52 crc kubenswrapper[4125]: I0312 13:40:52.886135 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:52 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:52 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:52 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:52 crc kubenswrapper[4125]: I0312 13:40:52.886285 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.337469 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.339087 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.597969 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.598045 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.847511 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.847604 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.887128 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:53 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:53 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:53 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.887504 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.923759 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.955527 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Mar 12 13:40:53 crc kubenswrapper[4125]: I0312 13:40:53.983779 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.160779 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.257185 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.306893 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.602319 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:54 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:54 crc kubenswrapper[4125]: > Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.639533 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.765032 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:54 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:54 crc kubenswrapper[4125]: > Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.823525 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:54 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:54 crc kubenswrapper[4125]: > Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.884956 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:54 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:54 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:54 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:54 crc kubenswrapper[4125]: I0312 13:40:54.885070 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:55 crc kubenswrapper[4125]: I0312 13:40:55.886422 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:55 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:55 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:55 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:55 crc kubenswrapper[4125]: I0312 13:40:55.887475 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:56 crc kubenswrapper[4125]: I0312 13:40:56.263902 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" probeResult="failure" output=< Mar 12 13:40:56 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:40:56 crc kubenswrapper[4125]: > Mar 12 13:40:56 crc kubenswrapper[4125]: I0312 13:40:56.886489 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:56 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:56 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:56 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:56 crc kubenswrapper[4125]: I0312 13:40:56.887516 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:57 crc kubenswrapper[4125]: I0312 13:40:57.501264 4125 generic.go:334] "Generic (PLEG): container finished" podID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerID="ae54151489cfc34992d37ed0b8ac57e695a64170c18915ba01de6399209ce42d" exitCode=0 Mar 12 13:40:57 crc kubenswrapper[4125]: I0312 13:40:57.501352 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerDied","Data":"ae54151489cfc34992d37ed0b8ac57e695a64170c18915ba01de6399209ce42d"} Mar 12 13:40:57 crc kubenswrapper[4125]: I0312 13:40:57.672598 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Mar 12 13:40:57 crc kubenswrapper[4125]: I0312 13:40:57.889735 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:57 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:57 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:57 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:57 crc kubenswrapper[4125]: I0312 13:40:57.889920 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:58 crc kubenswrapper[4125]: I0312 13:40:58.169101 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Mar 12 13:40:58 crc kubenswrapper[4125]: I0312 13:40:58.869893 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Mar 12 13:40:58 crc kubenswrapper[4125]: I0312 13:40:58.890392 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:58 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:58 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:58 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:58 crc kubenswrapper[4125]: I0312 13:40:58.890533 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:59 crc kubenswrapper[4125]: I0312 13:40:59.729910 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Mar 12 13:40:59 crc kubenswrapper[4125]: I0312 13:40:59.888228 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:40:59 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:40:59 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:40:59 crc kubenswrapper[4125]: healthz check failed Mar 12 13:40:59 crc kubenswrapper[4125]: I0312 13:40:59.888663 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:40:59 crc kubenswrapper[4125]: I0312 13:40:59.892859 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Mar 12 13:41:00 crc kubenswrapper[4125]: I0312 13:41:00.362035 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Mar 12 13:41:00 crc kubenswrapper[4125]: I0312 13:41:00.513458 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Mar 12 13:41:00 crc kubenswrapper[4125]: I0312 13:41:00.558973 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerStarted","Data":"804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9"} Mar 12 13:41:00 crc kubenswrapper[4125]: I0312 13:41:00.886269 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:00 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:00 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:00 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:00 crc kubenswrapper[4125]: I0312 13:41:00.887283 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:01 crc kubenswrapper[4125]: I0312 13:41:01.889546 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:01 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:01 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:01 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:01 crc kubenswrapper[4125]: I0312 13:41:01.889635 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:02 crc kubenswrapper[4125]: I0312 13:41:02.887311 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:02 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:02 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:02 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:02 crc kubenswrapper[4125]: I0312 13:41:02.887417 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:03 crc kubenswrapper[4125]: I0312 13:41:03.372766 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:41:03 crc kubenswrapper[4125]: I0312 13:41:03.492467 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Mar 12 13:41:03 crc kubenswrapper[4125]: I0312 13:41:03.527039 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7287f" Mar 12 13:41:03 crc kubenswrapper[4125]: I0312 13:41:03.598546 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:41:03 crc kubenswrapper[4125]: I0312 13:41:03.598640 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:41:03 crc kubenswrapper[4125]: I0312 13:41:03.848384 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:03.848708 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:03.848752 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:03.880044 4125 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="console" containerStatusID={"Type":"cri-o","ID":"9c5d3925adb66fb4aa984ebb038669e1253dd1ae4d86122b5b7a97e70cd77667"} pod="openshift-console/console-84fccc7b6-mkncc" containerMessage="Container console failed startup probe, will be restarted" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:03.887422 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:03.887518 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.191170 4125 reflector.go:351] Caches populated for *v1.Pod from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.364488 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rmwfn" podStartSLOduration=53827398.14091935 podStartE2EDuration="14952h7m30.357890481s" podCreationTimestamp="2024-06-27 13:33:34 +0000 UTC" firstStartedPulling="2026-03-12 13:36:17.379103958 +0000 UTC m=+947.702489837" lastFinishedPulling="2026-03-12 13:40:29.596075108 +0000 UTC m=+1199.919460967" observedRunningTime="2026-03-12 13:40:31.42433292 +0000 UTC m=+1201.747719119" watchObservedRunningTime="2026-03-12 13:41:04.357890481 +0000 UTC m=+1234.681276290" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.365309 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=184.365273601 podStartE2EDuration="3m4.365273601s" podCreationTimestamp="2026-03-12 13:38:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:40:14.936253167 +0000 UTC m=+1185.259639596" watchObservedRunningTime="2026-03-12 13:41:04.365273601 +0000 UTC m=+1234.688659740" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.365499 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k9qqb" podStartSLOduration=53827266.581859045 podStartE2EDuration="14952h6m48.365483177s" podCreationTimestamp="2024-06-27 13:34:16 +0000 UTC" firstStartedPulling="2026-03-12 13:35:16.41014527 +0000 UTC m=+886.733531139" lastFinishedPulling="2026-03-12 13:40:58.193768991 +0000 UTC m=+1228.517155270" observedRunningTime="2026-03-12 13:41:00.628451057 +0000 UTC m=+1230.951837056" watchObservedRunningTime="2026-03-12 13:41:04.365483177 +0000 UTC m=+1234.688869056" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.366121 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podStartSLOduration=225.366102456 podStartE2EDuration="3m45.366102456s" podCreationTimestamp="2026-03-12 13:37:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:40:21.579526568 +0000 UTC m=+1191.902912507" watchObservedRunningTime="2026-03-12 13:41:04.366102456 +0000 UTC m=+1234.689488225" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.367010 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g4v97" podStartSLOduration=53827282.096119344 podStartE2EDuration="14952h6m46.366986473s" podCreationTimestamp="2024-06-27 13:34:18 +0000 UTC" firstStartedPulling="2026-03-12 13:35:16.403297469 +0000 UTC m=+886.726683418" lastFinishedPulling="2026-03-12 13:40:40.674164526 +0000 UTC m=+1210.997550545" observedRunningTime="2026-03-12 13:40:42.631059899 +0000 UTC m=+1212.954446008" watchObservedRunningTime="2026-03-12 13:41:04.366986473 +0000 UTC m=+1234.690372382" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.367973 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=186.367951203 podStartE2EDuration="3m6.367951203s" podCreationTimestamp="2026-03-12 13:37:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:39:27.080905644 +0000 UTC m=+1137.404291573" watchObservedRunningTime="2026-03-12 13:41:04.367951203 +0000 UTC m=+1234.691337202" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.375909 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-image-registry/image-registry-7dc8587b5-4h2pb","openshift-kube-apiserver/installer-9-crc","openshift-apiserver/apiserver-67cbf64bc9-fq4m9","openshift-image-registry/image-registry-86594ff457-6b77x"] Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.376034 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-11-crc","openshift-apiserver/apiserver-6cdf967d79-ffdf8","openshift-kube-apiserver/kube-apiserver-crc","openshift-multus/cni-sysctl-allowlist-ds-rjfwq"] Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.377533 4125 topology_manager.go:215] "Topology Admit Handler" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" podNamespace="openshift-apiserver" podName="apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.377552 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.379342 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="09143b32-bfcb-4682-a82f-e0bfa420e445" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386231 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="35c093da-a468-44a1-8ff0-09b09268828c" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386349 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c093da-a468-44a1-8ff0-09b09268828c" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386402 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386411 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386432 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" containerName="pruner" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386440 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" containerName="pruner" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386454 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver-check-endpoints" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386462 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver-check-endpoints" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386482 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386489 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386501 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerName="registry" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386508 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerName="registry" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386521 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="fix-audit-permissions" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386528 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="fix-audit-permissions" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386543 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" containerName="registry" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386550 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" containerName="registry" Mar 12 13:41:04 crc kubenswrapper[4125]: E0312 13:41:04.386563 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.386570 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390436 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390499 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390516 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" containerName="registry" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390530 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" containerName="openshift-apiserver-check-endpoints" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390541 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" containerName="registry" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390554 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" containerName="pruner" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390564 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.390573 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c093da-a468-44a1-8ff0-09b09268828c" containerName="installer" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.407527 4125 topology_manager.go:215] "Topology Admit Handler" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" podNamespace="openshift-multus" podName="cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.408231 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.411767 4125 topology_manager.go:215] "Topology Admit Handler" podUID="2bbbb77a-fabb-4250-a075-38a7c2a82752" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.411898 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.414274 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6499cf79cf-qdfbh"] Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.414305 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9","openshift-controller-manager/controller-manager-7559d9b74c-lxhxw"] Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.414583 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" containerID="cri-o://e7f4bd1311eab3293265200b4c1c614a72838739548ea0684385195613366dc3" gracePeriod=30 Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.415862 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.416418 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.416593 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.416703 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.417060 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" containerID="cri-o://00fc1a49be6f6ce49f26a0d690991ef4f410f9799447b97d80d35ade0960dd3a" gracePeriod=30 Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.417458 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-r9fjc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.429596 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.429651 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.429716 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.429667 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.429953 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.430040 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-dl9g2" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.430040 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-smth4" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.432628 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.433001 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.438399 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-sysctl-allowlist" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.445428 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.467488 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:04 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:04 crc kubenswrapper[4125]: > Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.469025 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:04 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:04 crc kubenswrapper[4125]: > Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523301 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/49b2f272-4389-4573-ac63-135cbe6ca129-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523430 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2bbbb77a-fabb-4250-a075-38a7c2a82752-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523463 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523491 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523515 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523566 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523593 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit-dir\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523761 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523799 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b2f272-4389-4573-ac63-135cbe6ca129-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523880 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2bbbb77a-fabb-4250-a075-38a7c2a82752-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523942 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-node-pullsecrets\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.523997 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.524034 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.532286 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmnd4\" (UniqueName: \"kubernetes.io/projected/49b2f272-4389-4573-ac63-135cbe6ca129-kube-api-access-kmnd4\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.532561 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/49b2f272-4389-4573-ac63-135cbe6ca129-ready\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.532705 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.532895 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.610283 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=62.610242534 podStartE2EDuration="1m2.610242534s" podCreationTimestamp="2026-03-12 13:40:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:04.606631053 +0000 UTC m=+1234.930017012" watchObservedRunningTime="2026-03-12 13:41:04.610242534 +0000 UTC m=+1234.933628643" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/49b2f272-4389-4573-ac63-135cbe6ca129-ready\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634262 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634292 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634316 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/49b2f272-4389-4573-ac63-135cbe6ca129-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634342 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2bbbb77a-fabb-4250-a075-38a7c2a82752-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634365 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634385 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634410 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634486 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634510 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit-dir\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634541 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634564 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b2f272-4389-4573-ac63-135cbe6ca129-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634589 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2bbbb77a-fabb-4250-a075-38a7c2a82752-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634616 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-node-pullsecrets\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634640 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634664 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.634691 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kmnd4\" (UniqueName: \"kubernetes.io/projected/49b2f272-4389-4573-ac63-135cbe6ca129-kube-api-access-kmnd4\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.642121 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b2f272-4389-4573-ac63-135cbe6ca129-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.643348 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-node-pullsecrets\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.643372 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.643428 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit-dir\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.643994 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.645605 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.662703 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.663230 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/49b2f272-4389-4573-ac63-135cbe6ca129-ready\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.663773 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.663878 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2bbbb77a-fabb-4250-a075-38a7c2a82752-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.682985 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/49b2f272-4389-4573-ac63-135cbe6ca129-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.683120 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.693546 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.720360 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.752125 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.767797 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmnd4\" (UniqueName: \"kubernetes.io/projected/49b2f272-4389-4573-ac63-135cbe6ca129-kube-api-access-kmnd4\") pod \"cni-sysctl-allowlist-ds-rjfwq\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.771394 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2bbbb77a-fabb-4250-a075-38a7c2a82752-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.816227 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.823338 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.823483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:04 crc kubenswrapper[4125]: W0312 13:41:04.887037 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49b2f272_4389_4573_ac63_135cbe6ca129.slice/crio-0485f9547c53ead05fa83e3b5da8528c100af5fef0ed49d3aae3df93d2c253b4 WatchSource:0}: Error finding container 0485f9547c53ead05fa83e3b5da8528c100af5fef0ed49d3aae3df93d2c253b4: Status 404 returned error can't find the container with id 0485f9547c53ead05fa83e3b5da8528c100af5fef0ed49d3aae3df93d2c253b4 Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.888391 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:04 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:04 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:04 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:04 crc kubenswrapper[4125]: I0312 13:41:04.888452 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.020185 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.020325 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.025732 4125 scope.go:117] "RemoveContainer" containerID="5490469e11bce594865a7f571da442158db21ef700ab8b26f2069d8f93c40e7a" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.323082 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.550400 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.593781 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" event={"ID":"49b2f272-4389-4573-ac63-135cbe6ca129","Type":"ContainerStarted","Data":"0485f9547c53ead05fa83e3b5da8528c100af5fef0ed49d3aae3df93d2c253b4"} Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.598874 4125 generic.go:334] "Generic (PLEG): container finished" podID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerID="00fc1a49be6f6ce49f26a0d690991ef4f410f9799447b97d80d35ade0960dd3a" exitCode=0 Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.598992 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" event={"ID":"dd2f98e4-4656-4c95-8c6f-5959bd9f876a","Type":"ContainerDied","Data":"00fc1a49be6f6ce49f26a0d690991ef4f410f9799447b97d80d35ade0960dd3a"} Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.599042 4125 scope.go:117] "RemoveContainer" containerID="e5e4ae2e28c6d0c65895c9e36ea602043bbd8bda8e4978a7f740eb0d6f142453" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.614743 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-77658b5b66-dq5sc_530553aa-0a1d-423e-8a22-f5eb4bdbb883/openshift-config-operator/4.log" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.616260 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"38f84c11e4b6500a278442292012fe106bdbb997347a1dd098cc4179275397d5"} Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.617715 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.627424 4125 generic.go:334] "Generic (PLEG): container finished" podID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerID="e7f4bd1311eab3293265200b4c1c614a72838739548ea0684385195613366dc3" exitCode=0 Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.627783 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" event={"ID":"4e18989a-5a3c-4b45-8821-4b91287eaf1e","Type":"ContainerDied","Data":"e7f4bd1311eab3293265200b4c1c614a72838739548ea0684385195613366dc3"} Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.742288 4125 scope.go:117] "RemoveContainer" containerID="cfeb08a81fd6a8dca2eb53f6d7ae1bc9af487d431a775664a7d4cf86980cf131" Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.888491 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:05 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:05 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:05 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:05 crc kubenswrapper[4125]: I0312 13:41:05.888599 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.039625 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3178c6ca-b9b2-446c-990f-8bf4a9f01b96" path="/var/lib/kubelet/pods/3178c6ca-b9b2-446c-990f-8bf4a9f01b96/volumes" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.045784 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35c093da-a468-44a1-8ff0-09b09268828c" path="/var/lib/kubelet/pods/35c093da-a468-44a1-8ff0-09b09268828c/volumes" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.051894 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d14510a-ac3d-4029-ae28-538bb2e94e32" path="/var/lib/kubelet/pods/8d14510a-ac3d-4029-ae28-538bb2e94e32/volumes" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.054443 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd34258c-0a6c-44b0-ba64-b411ac6bad46" path="/var/lib/kubelet/pods/fd34258c-0a6c-44b0-ba64-b411ac6bad46/volumes" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.081236 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.105017 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.125743 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251108 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251276 4125 topology_manager.go:215] "Topology Admit Handler" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: E0312 13:41:06.251542 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251560 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: E0312 13:41:06.251578 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251586 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: E0312 13:41:06.251599 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251606 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: E0312 13:41:06.251617 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251624 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251746 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251761 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251771 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.251783 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" containerName="controller-manager" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.253474 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.259654 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.259857 4125 topology_manager.go:215] "Topology Admit Handler" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" podNamespace="openshift-controller-manager" podName="controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.260731 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.275312 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:06 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:06 crc kubenswrapper[4125]: > Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.283007 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e18989a-5a3c-4b45-8821-4b91287eaf1e-serving-cert\") pod \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.283439 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-client-ca\") pod \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.283595 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvxcs\" (UniqueName: \"kubernetes.io/projected/4e18989a-5a3c-4b45-8821-4b91287eaf1e-kube-api-access-fvxcs\") pod \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.283714 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-serving-cert\") pod \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.283934 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-client-ca\") pod \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.284514 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58mvf\" (UniqueName: \"kubernetes.io/projected/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-kube-api-access-58mvf\") pod \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.284656 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-config\") pod \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\" (UID: \"dd2f98e4-4656-4c95-8c6f-5959bd9f876a\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.284770 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-config\") pod \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.286554 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-proxy-ca-bundles\") pod \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\" (UID: \"4e18989a-5a3c-4b45-8821-4b91287eaf1e\") " Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.286758 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.286922 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-client-ca\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287057 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-config\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287442 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzln9\" (UniqueName: \"kubernetes.io/projected/017a4afc-9c30-4dc3-974a-05c3d1384017-kube-api-access-rzln9\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287482 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287519 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287548 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287570 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/017a4afc-9c30-4dc3-974a-05c3d1384017-serving-cert\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.287599 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.286562 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-client-ca" (OuterVolumeSpecName: "client-ca") pod "4e18989a-5a3c-4b45-8821-4b91287eaf1e" (UID: "4e18989a-5a3c-4b45-8821-4b91287eaf1e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.288676 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-client-ca" (OuterVolumeSpecName: "client-ca") pod "dd2f98e4-4656-4c95-8c6f-5959bd9f876a" (UID: "dd2f98e4-4656-4c95-8c6f-5959bd9f876a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.298145 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4e18989a-5a3c-4b45-8821-4b91287eaf1e" (UID: "4e18989a-5a3c-4b45-8821-4b91287eaf1e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.301010 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-config" (OuterVolumeSpecName: "config") pod "dd2f98e4-4656-4c95-8c6f-5959bd9f876a" (UID: "dd2f98e4-4656-4c95-8c6f-5959bd9f876a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.301586 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-config" (OuterVolumeSpecName: "config") pod "4e18989a-5a3c-4b45-8821-4b91287eaf1e" (UID: "4e18989a-5a3c-4b45-8821-4b91287eaf1e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.310501 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e18989a-5a3c-4b45-8821-4b91287eaf1e-kube-api-access-fvxcs" (OuterVolumeSpecName: "kube-api-access-fvxcs") pod "4e18989a-5a3c-4b45-8821-4b91287eaf1e" (UID: "4e18989a-5a3c-4b45-8821-4b91287eaf1e"). InnerVolumeSpecName "kube-api-access-fvxcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.317693 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.318338 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e18989a-5a3c-4b45-8821-4b91287eaf1e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4e18989a-5a3c-4b45-8821-4b91287eaf1e" (UID: "4e18989a-5a3c-4b45-8821-4b91287eaf1e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.319340 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dd2f98e4-4656-4c95-8c6f-5959bd9f876a" (UID: "dd2f98e4-4656-4c95-8c6f-5959bd9f876a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.319613 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-kube-api-access-58mvf" (OuterVolumeSpecName: "kube-api-access-58mvf") pod "dd2f98e4-4656-4c95-8c6f-5959bd9f876a" (UID: "dd2f98e4-4656-4c95-8c6f-5959bd9f876a"). InnerVolumeSpecName "kube-api-access-58mvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.390666 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rzln9\" (UniqueName: \"kubernetes.io/projected/017a4afc-9c30-4dc3-974a-05c3d1384017-kube-api-access-rzln9\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.390751 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.390803 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.390920 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/017a4afc-9c30-4dc3-974a-05c3d1384017-serving-cert\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.390945 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.390976 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391007 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391028 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-client-ca\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391071 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-config\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391120 4125 reconciler_common.go:300] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391133 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e18989a-5a3c-4b45-8821-4b91287eaf1e-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391147 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391162 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-fvxcs\" (UniqueName: \"kubernetes.io/projected/4e18989a-5a3c-4b45-8821-4b91287eaf1e-kube-api-access-fvxcs\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391173 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391183 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391197 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-58mvf\" (UniqueName: \"kubernetes.io/projected/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-kube-api-access-58mvf\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391249 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd2f98e4-4656-4c95-8c6f-5959bd9f876a-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.391262 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e18989a-5a3c-4b45-8821-4b91287eaf1e-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.392758 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.396958 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.398747 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.399791 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-config\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.400265 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-client-ca\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.408073 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.408077 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/017a4afc-9c30-4dc3-974a-05c3d1384017-serving-cert\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.424752 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.436024 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.440605 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzln9\" (UniqueName: \"kubernetes.io/projected/017a4afc-9c30-4dc3-974a-05c3d1384017-kube-api-access-rzln9\") pod \"route-controller-manager-5fccd57d48-6m9cg\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.492122 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-11-crc"] Mar 12 13:41:06 crc kubenswrapper[4125]: W0312 13:41:06.505465 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod2bbbb77a_fabb_4250_a075_38a7c2a82752.slice/crio-1ba10a737a7f5e7a08066769e6e95aa5ad7b89a7a5bcc48c193dbd15f1da822e WatchSource:0}: Error finding container 1ba10a737a7f5e7a08066769e6e95aa5ad7b89a7a5bcc48c193dbd15f1da822e: Status 404 returned error can't find the container with id 1ba10a737a7f5e7a08066769e6e95aa5ad7b89a7a5bcc48c193dbd15f1da822e Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.506458 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-6cdf967d79-ffdf8"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.603573 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.674707 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.684693 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" event={"ID":"4e18989a-5a3c-4b45-8821-4b91287eaf1e","Type":"ContainerDied","Data":"f23e82f3c0b766599813e9145ee071e0b4440155ed5f6c21aaf40d5562190d4e"} Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.684757 4125 scope.go:117] "RemoveContainer" containerID="e7f4bd1311eab3293265200b4c1c614a72838739548ea0684385195613366dc3" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.684933 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7559d9b74c-lxhxw" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.724685 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" event={"ID":"49b2f272-4389-4573-ac63-135cbe6ca129","Type":"ContainerStarted","Data":"f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924"} Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.725644 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.755146 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-11-crc" event={"ID":"2bbbb77a-fabb-4250-a075-38a7c2a82752","Type":"ContainerStarted","Data":"1ba10a737a7f5e7a08066769e6e95aa5ad7b89a7a5bcc48c193dbd15f1da822e"} Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.767340 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" event={"ID":"dd2f98e4-4656-4c95-8c6f-5959bd9f876a","Type":"ContainerDied","Data":"9a4668c1cadfef1e1cfb5c60fd3d67f78983fa21555193a1cb4c00a4ae2f6ecc"} Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.767443 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.799320 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerStarted","Data":"4d75b74c1f3e39851e73c68cbc74c473015b90f2e8a65be53f85e43032c8bc86"} Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.812468 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7559d9b74c-lxhxw"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.835231 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7559d9b74c-lxhxw"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.901957 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:06 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:06 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:06 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.902065 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.914310 4125 scope.go:117] "RemoveContainer" containerID="00fc1a49be6f6ce49f26a0d690991ef4f410f9799447b97d80d35ade0960dd3a" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.932802 4125 patch_prober.go:28] interesting pod/route-controller-manager-584c5db66f-kcmc9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: i/o timeout" start-of-body= Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.932927 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: i/o timeout" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.939496 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" podStartSLOduration=54.939446901 podStartE2EDuration="54.939446901s" podCreationTimestamp="2026-03-12 13:40:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:06.870565182 +0000 UTC m=+1237.193951111" watchObservedRunningTime="2026-03-12 13:41:06.939446901 +0000 UTC m=+1237.262832800" Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.979459 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9"] Mar 12 13:41:06 crc kubenswrapper[4125]: I0312 13:41:06.996700 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-584c5db66f-kcmc9"] Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.105003 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.381384 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rjfwq"] Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.816951 4125 generic.go:334] "Generic (PLEG): container finished" podID="bab054c9-6c83-40ee-896d-6459b22a6b4b" containerID="42f1002fce2a70ef17eb609ed6c519fbcde59c3e30f5f6539607b0e8fe98213e" exitCode=0 Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.817798 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerDied","Data":"42f1002fce2a70ef17eb609ed6c519fbcde59c3e30f5f6539607b0e8fe98213e"} Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.887044 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:07 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:07 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:07 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.887415 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.975530 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh"] Mar 12 13:41:07 crc kubenswrapper[4125]: I0312 13:41:07.976002 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg"] Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.065165 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e18989a-5a3c-4b45-8821-4b91287eaf1e" path="/var/lib/kubelet/pods/4e18989a-5a3c-4b45-8821-4b91287eaf1e/volumes" Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.067161 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd2f98e4-4656-4c95-8c6f-5959bd9f876a" path="/var/lib/kubelet/pods/dd2f98e4-4656-4c95-8c6f-5959bd9f876a/volumes" Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.322526 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.845498 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" event={"ID":"d3992789-6f8b-4806-8ce0-261a7623ca46","Type":"ContainerStarted","Data":"cf0c3066930a7ea28656a514bb3ebd66161f4ed2ff8ed7db66190309db8f370f"} Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.845896 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" event={"ID":"d3992789-6f8b-4806-8ce0-261a7623ca46","Type":"ContainerStarted","Data":"c11a66f18959f4d052940022a94a3309c6ecde2643a989a2b56b8abc198ced46"} Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.847646 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.850918 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-11-crc" event={"ID":"2bbbb77a-fabb-4250-a075-38a7c2a82752","Type":"ContainerStarted","Data":"df0218c32434328ced5b1e440a9e21d0002b52858d34dfb90ca32c204ecff0fc"} Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.855059 4125 patch_prober.go:28] interesting pod/controller-manager-7fdc5fd4dd-zdxlh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" start-of-body= Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.855127 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.863429 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" event={"ID":"017a4afc-9c30-4dc3-974a-05c3d1384017","Type":"ContainerStarted","Data":"50bc70f11c788ac2ff82783495b350392a81cc7e98b9f7c462821291b6fdc22d"} Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.863466 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" event={"ID":"017a4afc-9c30-4dc3-974a-05c3d1384017","Type":"ContainerStarted","Data":"f34a5dca1703e26ee4d890eeb701920bdeb2bee37d27ff144b2a2b900db72619"} Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.883802 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" gracePeriod=30 Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.890627 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:08 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:08 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:08 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:08 crc kubenswrapper[4125]: I0312 13:41:08.890707 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.147779 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-11-crc" podStartSLOduration=18.147654513 podStartE2EDuration="18.147654513s" podCreationTimestamp="2026-03-12 13:40:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:09.132524604 +0000 UTC m=+1239.455910553" watchObservedRunningTime="2026-03-12 13:41:09.147654513 +0000 UTC m=+1239.471040512" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.157662 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podStartSLOduration=65.157613802 podStartE2EDuration="1m5.157613802s" podCreationTimestamp="2026-03-12 13:40:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:08.910259344 +0000 UTC m=+1239.233645483" watchObservedRunningTime="2026-03-12 13:41:09.157613802 +0000 UTC m=+1239.480999941" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.194440 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" podStartSLOduration=63.194382844 podStartE2EDuration="1m3.194382844s" podCreationTimestamp="2026-03-12 13:40:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:09.191940978 +0000 UTC m=+1239.515327017" watchObservedRunningTime="2026-03-12 13:41:09.194382844 +0000 UTC m=+1239.517768723" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.894760 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:09 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:09 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:09 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.894881 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerStarted","Data":"cbcab8cd5a54dc8d7f7df355680c50752e2e85f9c4166f8ccda95ee82cbb45c1"} Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.895924 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerStarted","Data":"1362b99663017571fe8abaffe9e10f5af0846868ea7d123a0bf9111d551c3bf5"} Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.896034 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.899794 4125 generic.go:334] "Generic (PLEG): container finished" podID="2bbbb77a-fabb-4250-a075-38a7c2a82752" containerID="df0218c32434328ced5b1e440a9e21d0002b52858d34dfb90ca32c204ecff0fc" exitCode=0 Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.901057 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-11-crc" event={"ID":"2bbbb77a-fabb-4250-a075-38a7c2a82752","Type":"ContainerDied","Data":"df0218c32434328ced5b1e440a9e21d0002b52858d34dfb90ca32c204ecff0fc"} Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.901261 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.902027 4125 patch_prober.go:28] interesting pod/controller-manager-7fdc5fd4dd-zdxlh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" start-of-body= Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.902281 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.904985 4125 patch_prober.go:28] interesting pod/route-controller-manager-5fccd57d48-6m9cg container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": dial tcp 10.217.0.75:8443: connect: connection refused" start-of-body= Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.905037 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": dial tcp 10.217.0.75:8443: connect: connection refused" Mar 12 13:41:09 crc kubenswrapper[4125]: I0312 13:41:09.963939 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podStartSLOduration=202.963896011 podStartE2EDuration="3m22.963896011s" podCreationTimestamp="2026-03-12 13:37:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:09.959151943 +0000 UTC m=+1240.282537922" watchObservedRunningTime="2026-03-12 13:41:09.963896011 +0000 UTC m=+1240.287281920" Mar 12 13:41:10 crc kubenswrapper[4125]: I0312 13:41:10.889232 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:10 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:10 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:10 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:10 crc kubenswrapper[4125]: I0312 13:41:10.889382 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:10 crc kubenswrapper[4125]: I0312 13:41:10.917142 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:41:10 crc kubenswrapper[4125]: I0312 13:41:10.925713 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.398152 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.564286 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.715857 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2bbbb77a-fabb-4250-a075-38a7c2a82752-kube-api-access\") pod \"2bbbb77a-fabb-4250-a075-38a7c2a82752\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.715974 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2bbbb77a-fabb-4250-a075-38a7c2a82752-kubelet-dir\") pod \"2bbbb77a-fabb-4250-a075-38a7c2a82752\" (UID: \"2bbbb77a-fabb-4250-a075-38a7c2a82752\") " Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.716332 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2bbbb77a-fabb-4250-a075-38a7c2a82752-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2bbbb77a-fabb-4250-a075-38a7c2a82752" (UID: "2bbbb77a-fabb-4250-a075-38a7c2a82752"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.724085 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bbbb77a-fabb-4250-a075-38a7c2a82752-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2bbbb77a-fabb-4250-a075-38a7c2a82752" (UID: "2bbbb77a-fabb-4250-a075-38a7c2a82752"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.817543 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2bbbb77a-fabb-4250-a075-38a7c2a82752-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.817610 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2bbbb77a-fabb-4250-a075-38a7c2a82752-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.890306 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:11 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:11 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:11 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.890438 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.918948 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-11-crc" event={"ID":"2bbbb77a-fabb-4250-a075-38a7c2a82752","Type":"ContainerDied","Data":"1ba10a737a7f5e7a08066769e6e95aa5ad7b89a7a5bcc48c193dbd15f1da822e"} Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.919045 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ba10a737a7f5e7a08066769e6e95aa5ad7b89a7a5bcc48c193dbd15f1da822e" Mar 12 13:41:11 crc kubenswrapper[4125]: I0312 13:41:11.919322 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 12 13:41:12 crc kubenswrapper[4125]: I0312 13:41:12.888412 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:12 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:12 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:12 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:12 crc kubenswrapper[4125]: I0312 13:41:12.888583 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:13 crc kubenswrapper[4125]: I0312 13:41:13.601118 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:41:13 crc kubenswrapper[4125]: I0312 13:41:13.601545 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:41:13 crc kubenswrapper[4125]: I0312 13:41:13.893548 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:13 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:13 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:13 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:13 crc kubenswrapper[4125]: I0312 13:41:13.893642 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.817849 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.818285 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.832405 4125 patch_prober.go:28] interesting pod/apiserver-6cdf967d79-ffdf8 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]log ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 12 13:41:14 crc kubenswrapper[4125]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectcache ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startinformers ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 12 13:41:14 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:41:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.832504 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:14 crc kubenswrapper[4125]: E0312 13:41:14.833552 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:14 crc kubenswrapper[4125]: E0312 13:41:14.837534 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:14 crc kubenswrapper[4125]: E0312 13:41:14.844505 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:14 crc kubenswrapper[4125]: E0312 13:41:14.844596 4125 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.848509 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:14 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:14 crc kubenswrapper[4125]: > Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.886963 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:14 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:14 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:14 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.887043 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.889762 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:14 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:14 crc kubenswrapper[4125]: > Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.915145 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:41:14 crc kubenswrapper[4125]: I0312 13:41:14.915571 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="bf055e84f32193b9c1c21b0c34a61f01" containerName="startup-monitor" containerID="cri-o://b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288" gracePeriod=5 Mar 12 13:41:15 crc kubenswrapper[4125]: I0312 13:41:15.887112 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:15 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:15 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:15 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:15 crc kubenswrapper[4125]: I0312 13:41:15.888382 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:16 crc kubenswrapper[4125]: I0312 13:41:16.350323 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:16 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:16 crc kubenswrapper[4125]: > Mar 12 13:41:16 crc kubenswrapper[4125]: I0312 13:41:16.886726 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:16 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:16 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:16 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:16 crc kubenswrapper[4125]: I0312 13:41:16.886932 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:17 crc kubenswrapper[4125]: I0312 13:41:17.886023 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:17 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:17 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:17 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:17 crc kubenswrapper[4125]: I0312 13:41:17.886142 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:18 crc kubenswrapper[4125]: I0312 13:41:18.885921 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:18 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:18 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:18 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:18 crc kubenswrapper[4125]: I0312 13:41:18.886118 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.705151 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/installer-11-crc"] Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.705738 4125 topology_manager.go:215] "Topology Admit Handler" podUID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" podNamespace="openshift-kube-controller-manager" podName="installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: E0312 13:41:19.711026 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bf055e84f32193b9c1c21b0c34a61f01" containerName="startup-monitor" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.711056 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf055e84f32193b9c1c21b0c34a61f01" containerName="startup-monitor" Mar 12 13:41:19 crc kubenswrapper[4125]: E0312 13:41:19.711098 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2bbbb77a-fabb-4250-a075-38a7c2a82752" containerName="pruner" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.711109 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bbbb77a-fabb-4250-a075-38a7c2a82752" containerName="pruner" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.711674 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bbbb77a-fabb-4250-a075-38a7c2a82752" containerName="pruner" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.712144 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf055e84f32193b9c1c21b0c34a61f01" containerName="startup-monitor" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.718678 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.740980 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-dl9g2" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.741430 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.806325 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/installer-11-crc"] Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.831172 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kubelet-dir\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.831341 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kube-api-access\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.831381 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-var-lock\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.844483 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.858438 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.887988 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:19 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:19 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:19 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.888997 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.932640 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kubelet-dir\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.932761 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kube-api-access\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.933084 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kubelet-dir\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.933171 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-var-lock\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:19 crc kubenswrapper[4125]: I0312 13:41:19.933290 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-var-lock\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:20 crc kubenswrapper[4125]: I0312 13:41:20.036975 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kube-api-access\") pod \"installer-11-crc\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:20 crc kubenswrapper[4125]: I0312 13:41:20.102871 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:20 crc kubenswrapper[4125]: I0312 13:41:20.893078 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:20 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:20 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:20 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:20 crc kubenswrapper[4125]: I0312 13:41:20.893169 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.079014 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_bf055e84f32193b9c1c21b0c34a61f01/startup-monitor/0.log" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.079388 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.079099 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_bf055e84f32193b9c1c21b0c34a61f01/startup-monitor/0.log" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.079570 4125 generic.go:334] "Generic (PLEG): container finished" podID="bf055e84f32193b9c1c21b0c34a61f01" containerID="b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288" exitCode=137 Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.079641 4125 scope.go:117] "RemoveContainer" containerID="b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.154570 4125 scope.go:117] "RemoveContainer" containerID="b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288" Mar 12 13:41:21 crc kubenswrapper[4125]: E0312 13:41:21.155425 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288\": container with ID starting with b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288 not found: ID does not exist" containerID="b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.155493 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288"} err="failed to get container status \"b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288\": rpc error: code = NotFound desc = could not find container \"b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288\": container with ID starting with b7c611c8bffe11bb82d6db66853df4d492f6025562e6cbd5e5f48a61e9a34288 not found: ID does not exist" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.158442 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/installer-11-crc"] Mar 12 13:41:21 crc kubenswrapper[4125]: W0312 13:41:21.185199 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod69bf4bb8_2a08_4fcb_991c_3aee35fb95f8.slice/crio-43e95d8b0bfba0673d5386534e6176488a785fbeac63aa34d0f5a3ea179dfd32 WatchSource:0}: Error finding container 43e95d8b0bfba0673d5386534e6176488a785fbeac63aa34d0f5a3ea179dfd32: Status 404 returned error can't find the container with id 43e95d8b0bfba0673d5386534e6176488a785fbeac63aa34d0f5a3ea179dfd32 Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.266954 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-lock\") pod \"bf055e84f32193b9c1c21b0c34a61f01\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267031 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-resource-dir\") pod \"bf055e84f32193b9c1c21b0c34a61f01\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267071 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-manifests\") pod \"bf055e84f32193b9c1c21b0c34a61f01\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267139 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-log\") pod \"bf055e84f32193b9c1c21b0c34a61f01\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267196 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-pod-resource-dir\") pod \"bf055e84f32193b9c1c21b0c34a61f01\" (UID: \"bf055e84f32193b9c1c21b0c34a61f01\") " Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267638 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "bf055e84f32193b9c1c21b0c34a61f01" (UID: "bf055e84f32193b9c1c21b0c34a61f01"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267680 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-lock" (OuterVolumeSpecName: "var-lock") pod "bf055e84f32193b9c1c21b0c34a61f01" (UID: "bf055e84f32193b9c1c21b0c34a61f01"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267711 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-log" (OuterVolumeSpecName: "var-log") pod "bf055e84f32193b9c1c21b0c34a61f01" (UID: "bf055e84f32193b9c1c21b0c34a61f01"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.267793 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-manifests" (OuterVolumeSpecName: "manifests") pod "bf055e84f32193b9c1c21b0c34a61f01" (UID: "bf055e84f32193b9c1c21b0c34a61f01"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.310108 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "bf055e84f32193b9c1c21b0c34a61f01" (UID: "bf055e84f32193b9c1c21b0c34a61f01"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.369153 4125 reconciler_common.go:300] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.369236 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.369255 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.369268 4125 reconciler_common.go:300] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-manifests\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.369278 4125 reconciler_common.go:300] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bf055e84f32193b9c1c21b0c34a61f01-var-log\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.889110 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:21 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:21 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:21 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:21 crc kubenswrapper[4125]: I0312 13:41:21.889271 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.035753 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf055e84f32193b9c1c21b0c34a61f01" path="/var/lib/kubelet/pods/bf055e84f32193b9c1c21b0c34a61f01/volumes" Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.037774 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.065709 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.065759 4125 kubelet.go:2639] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="f1a7e42a-bf16-4dc6-bbca-1e0aec66d939" Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.075462 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.075523 4125 kubelet.go:2663] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="f1a7e42a-bf16-4dc6-bbca-1e0aec66d939" Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.087392 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-11-crc" event={"ID":"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8","Type":"ContainerStarted","Data":"43e95d8b0bfba0673d5386534e6176488a785fbeac63aa34d0f5a3ea179dfd32"} Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.089375 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.886349 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:22 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:22 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:22 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:22 crc kubenswrapper[4125]: I0312 13:41:22.887146 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.102039 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-11-crc" event={"ID":"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8","Type":"ContainerStarted","Data":"7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7"} Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.105185 4125 generic.go:334] "Generic (PLEG): container finished" podID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerID="0f6e5f54ac6d5074ad57349154715c6834d0d6a57a215eb365926d06bde34837" exitCode=0 Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.105278 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerDied","Data":"0f6e5f54ac6d5074ad57349154715c6834d0d6a57a215eb365926d06bde34837"} Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.498026 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.598448 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.598531 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.621110 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8jhz6" Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.885780 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:23 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:23 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:23 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:23 crc kubenswrapper[4125]: I0312 13:41:23.886194 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:24 crc kubenswrapper[4125]: I0312 13:41:24.459303 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:24 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:24 crc kubenswrapper[4125]: > Mar 12 13:41:24 crc kubenswrapper[4125]: I0312 13:41:24.629443 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/installer-11-crc" podStartSLOduration=5.629321363 podStartE2EDuration="5.629321363s" podCreationTimestamp="2026-03-12 13:41:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:24.360968188 +0000 UTC m=+1254.684354267" watchObservedRunningTime="2026-03-12 13:41:24.629321363 +0000 UTC m=+1254.952707402" Mar 12 13:41:24 crc kubenswrapper[4125]: E0312 13:41:24.835763 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:24 crc kubenswrapper[4125]: E0312 13:41:24.839323 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:24 crc kubenswrapper[4125]: E0312 13:41:24.841932 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:24 crc kubenswrapper[4125]: E0312 13:41:24.842319 4125 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" Mar 12 13:41:24 crc kubenswrapper[4125]: I0312 13:41:24.889720 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:24 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:24 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:24 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:24 crc kubenswrapper[4125]: I0312 13:41:24.889943 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:25 crc kubenswrapper[4125]: I0312 13:41:25.151437 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerStarted","Data":"688690825c46f663e5a8bb2bb39ceab651e21decaf94ca342ebe2c0f14ee4f5c"} Mar 12 13:41:25 crc kubenswrapper[4125]: I0312 13:41:25.216487 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dcqzh" podStartSLOduration=53827264.1301418 podStartE2EDuration="14952h7m11.216389613s" podCreationTimestamp="2024-06-27 13:34:14 +0000 UTC" firstStartedPulling="2026-03-12 13:35:16.41435017 +0000 UTC m=+886.737735929" lastFinishedPulling="2026-03-12 13:41:23.500597942 +0000 UTC m=+1253.823983741" observedRunningTime="2026-03-12 13:41:25.196669332 +0000 UTC m=+1255.520055281" watchObservedRunningTime="2026-03-12 13:41:25.216389613 +0000 UTC m=+1255.539775442" Mar 12 13:41:25 crc kubenswrapper[4125]: I0312 13:41:25.235355 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:41:25 crc kubenswrapper[4125]: I0312 13:41:25.388898 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:41:25 crc kubenswrapper[4125]: I0312 13:41:25.887346 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:25 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:25 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:25 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:25 crc kubenswrapper[4125]: I0312 13:41:25.887545 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:26 crc kubenswrapper[4125]: I0312 13:41:26.885984 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:26 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:26 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:26 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:26 crc kubenswrapper[4125]: I0312 13:41:26.886327 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:27 crc kubenswrapper[4125]: I0312 13:41:27.896990 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:27 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:27 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:27 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:27 crc kubenswrapper[4125]: I0312 13:41:27.897343 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:28 crc kubenswrapper[4125]: I0312 13:41:28.902576 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:28 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:28 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:28 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:28 crc kubenswrapper[4125]: I0312 13:41:28.902753 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:29 crc kubenswrapper[4125]: I0312 13:41:29.038429 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" containerID="cri-o://9c5d3925adb66fb4aa984ebb038669e1253dd1ae4d86122b5b7a97e70cd77667" gracePeriod=15 Mar 12 13:41:29 crc kubenswrapper[4125]: I0312 13:41:29.886607 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:29 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:29 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:29 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:29 crc kubenswrapper[4125]: I0312 13:41:29.886963 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:30 crc kubenswrapper[4125]: I0312 13:41:30.194993 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84fccc7b6-mkncc_b233d916-bfe3-4ae5-ae39-6b574d1aa05e/console/0.log" Mar 12 13:41:30 crc kubenswrapper[4125]: I0312 13:41:30.195118 4125 generic.go:334] "Generic (PLEG): container finished" podID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerID="9c5d3925adb66fb4aa984ebb038669e1253dd1ae4d86122b5b7a97e70cd77667" exitCode=2 Mar 12 13:41:30 crc kubenswrapper[4125]: I0312 13:41:30.195177 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84fccc7b6-mkncc" event={"ID":"b233d916-bfe3-4ae5-ae39-6b574d1aa05e","Type":"ContainerDied","Data":"9c5d3925adb66fb4aa984ebb038669e1253dd1ae4d86122b5b7a97e70cd77667"} Mar 12 13:41:30 crc kubenswrapper[4125]: I0312 13:41:30.195253 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84fccc7b6-mkncc" event={"ID":"b233d916-bfe3-4ae5-ae39-6b574d1aa05e","Type":"ContainerStarted","Data":"4084a97de098c0bfdf0c87f17eba0ba708025fd1287e417cf06bd377c11722da"} Mar 12 13:41:30 crc kubenswrapper[4125]: I0312 13:41:30.887317 4125 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 12 13:41:30 crc kubenswrapper[4125]: [-]has-synced failed: reason withheld Mar 12 13:41:30 crc kubenswrapper[4125]: [+]process-running ok Mar 12 13:41:30 crc kubenswrapper[4125]: healthz check failed Mar 12 13:41:30 crc kubenswrapper[4125]: I0312 13:41:30.887730 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.436591 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.437520 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.437705 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.437964 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.438136 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.889589 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:41:31 crc kubenswrapper[4125]: I0312 13:41:31.895530 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.569276 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.598311 4125 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.598389 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.846468 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.846770 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.846906 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.849036 4125 patch_prober.go:28] interesting pod/console-84fccc7b6-mkncc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Mar 12 13:41:33 crc kubenswrapper[4125]: I0312 13:41:33.849186 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Mar 12 13:41:34 crc kubenswrapper[4125]: E0312 13:41:34.827080 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:34 crc kubenswrapper[4125]: E0312 13:41:34.829684 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:34 crc kubenswrapper[4125]: E0312 13:41:34.833891 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 12 13:41:34 crc kubenswrapper[4125]: E0312 13:41:34.833928 4125 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" Mar 12 13:41:35 crc kubenswrapper[4125]: I0312 13:41:35.012316 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:41:35 crc kubenswrapper[4125]: I0312 13:41:35.012410 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:41:36 crc kubenswrapper[4125]: I0312 13:41:36.692923 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:36 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:36 crc kubenswrapper[4125]: > Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.133573 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-rjfwq_49b2f272-4389-4573-ac63-135cbe6ca129/kube-multus-additional-cni-plugins/0.log" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.134076 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.221308 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmnd4\" (UniqueName: \"kubernetes.io/projected/49b2f272-4389-4573-ac63-135cbe6ca129-kube-api-access-kmnd4\") pod \"49b2f272-4389-4573-ac63-135cbe6ca129\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.221410 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/49b2f272-4389-4573-ac63-135cbe6ca129-ready\") pod \"49b2f272-4389-4573-ac63-135cbe6ca129\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.221450 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/49b2f272-4389-4573-ac63-135cbe6ca129-cni-sysctl-allowlist\") pod \"49b2f272-4389-4573-ac63-135cbe6ca129\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.221473 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b2f272-4389-4573-ac63-135cbe6ca129-tuning-conf-dir\") pod \"49b2f272-4389-4573-ac63-135cbe6ca129\" (UID: \"49b2f272-4389-4573-ac63-135cbe6ca129\") " Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.221636 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49b2f272-4389-4573-ac63-135cbe6ca129-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "49b2f272-4389-4573-ac63-135cbe6ca129" (UID: "49b2f272-4389-4573-ac63-135cbe6ca129"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.222789 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49b2f272-4389-4573-ac63-135cbe6ca129-ready" (OuterVolumeSpecName: "ready") pod "49b2f272-4389-4573-ac63-135cbe6ca129" (UID: "49b2f272-4389-4573-ac63-135cbe6ca129"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.223262 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49b2f272-4389-4573-ac63-135cbe6ca129-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "49b2f272-4389-4573-ac63-135cbe6ca129" (UID: "49b2f272-4389-4573-ac63-135cbe6ca129"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.238609 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49b2f272-4389-4573-ac63-135cbe6ca129-kube-api-access-kmnd4" (OuterVolumeSpecName: "kube-api-access-kmnd4") pod "49b2f272-4389-4573-ac63-135cbe6ca129" (UID: "49b2f272-4389-4573-ac63-135cbe6ca129"). InnerVolumeSpecName "kube-api-access-kmnd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:39 crc kubenswrapper[4125]: E0312 13:41:39.256738 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49b2f272_4389_4573_ac63_135cbe6ca129.slice/crio-conmon-f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.277420 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-rjfwq_49b2f272-4389-4573-ac63-135cbe6ca129/kube-multus-additional-cni-plugins/0.log" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.277489 4125 generic.go:334] "Generic (PLEG): container finished" podID="49b2f272-4389-4573-ac63-135cbe6ca129" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" exitCode=137 Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.277524 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" event={"ID":"49b2f272-4389-4573-ac63-135cbe6ca129","Type":"ContainerDied","Data":"f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924"} Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.277553 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" event={"ID":"49b2f272-4389-4573-ac63-135cbe6ca129","Type":"ContainerDied","Data":"0485f9547c53ead05fa83e3b5da8528c100af5fef0ed49d3aae3df93d2c253b4"} Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.277579 4125 scope.go:117] "RemoveContainer" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.277687 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rjfwq" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.328404 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kmnd4\" (UniqueName: \"kubernetes.io/projected/49b2f272-4389-4573-ac63-135cbe6ca129-kube-api-access-kmnd4\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.328456 4125 reconciler_common.go:300] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/49b2f272-4389-4573-ac63-135cbe6ca129-ready\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.328475 4125 reconciler_common.go:300] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/49b2f272-4389-4573-ac63-135cbe6ca129-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.328487 4125 reconciler_common.go:300] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b2f272-4389-4573-ac63-135cbe6ca129-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.357278 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rjfwq"] Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.364376 4125 scope.go:117] "RemoveContainer" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" Mar 12 13:41:39 crc kubenswrapper[4125]: E0312 13:41:39.365015 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924\": container with ID starting with f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924 not found: ID does not exist" containerID="f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.365080 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924"} err="failed to get container status \"f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924\": rpc error: code = NotFound desc = could not find container \"f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924\": container with ID starting with f5c9caa392ec822543d34a82b1482480fb9a0eb7d7f8500a1a31106087d3a924 not found: ID does not exist" Mar 12 13:41:39 crc kubenswrapper[4125]: I0312 13:41:39.370355 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rjfwq"] Mar 12 13:41:40 crc kubenswrapper[4125]: I0312 13:41:40.035405 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" path="/var/lib/kubelet/pods/49b2f272-4389-4573-ac63-135cbe6ca129/volumes" Mar 12 13:41:43 crc kubenswrapper[4125]: I0312 13:41:43.627261 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:41:43 crc kubenswrapper[4125]: I0312 13:41:43.650068 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-8568c59db8-fspjn" Mar 12 13:41:43 crc kubenswrapper[4125]: I0312 13:41:43.853382 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:41:43 crc kubenswrapper[4125]: I0312 13:41:43.890281 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:41:43 crc kubenswrapper[4125]: I0312 13:41:43.897489 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-console/console-84fccc7b6-mkncc"] Mar 12 13:41:44 crc kubenswrapper[4125]: I0312 13:41:44.535305 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k9qqb"] Mar 12 13:41:44 crc kubenswrapper[4125]: I0312 13:41:44.535768 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" containerID="cri-o://804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9" gracePeriod=2 Mar 12 13:41:44 crc kubenswrapper[4125]: I0312 13:41:44.550695 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g4v97"] Mar 12 13:41:44 crc kubenswrapper[4125]: I0312 13:41:44.551587 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" containerID="cri-o://73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48" gracePeriod=2 Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.021203 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9 is running failed: container process not found" containerID="804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9" cmd=["grpc_health_probe","-addr=:50051"] Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.030687 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9 is running failed: container process not found" containerID="804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9" cmd=["grpc_health_probe","-addr=:50051"] Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.031108 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9 is running failed: container process not found" containerID="804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9" cmd=["grpc_health_probe","-addr=:50051"] Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.031141 4125 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-k9qqb" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.054248 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48 is running failed: container process not found" containerID="73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48" cmd=["grpc_health_probe","-addr=:50051"] Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.056806 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48 is running failed: container process not found" containerID="73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48" cmd=["grpc_health_probe","-addr=:50051"] Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.060635 4125 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48 is running failed: container process not found" containerID="73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48" cmd=["grpc_health_probe","-addr=:50051"] Mar 12 13:41:45 crc kubenswrapper[4125]: E0312 13:41:45.060734 4125 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-g4v97" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.324412 4125 generic.go:334] "Generic (PLEG): container finished" podID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerID="804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9" exitCode=0 Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.324528 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerDied","Data":"804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9"} Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.327735 4125 generic.go:334] "Generic (PLEG): container finished" podID="bb917686-edfb-4158-86ad-6fce0abec64c" containerID="73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48" exitCode=0 Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.328080 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerDied","Data":"73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48"} Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.328107 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4v97" event={"ID":"bb917686-edfb-4158-86ad-6fce0abec64c","Type":"ContainerDied","Data":"2763af634f4d669b92332b1dbf6affd01dcf18c5923f4fea2204666a30c0374c"} Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.328133 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2763af634f4d669b92332b1dbf6affd01dcf18c5923f4fea2204666a30c0374c" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.365621 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.443031 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-utilities\") pod \"bb917686-edfb-4158-86ad-6fce0abec64c\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.443097 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-catalog-content\") pod \"bb917686-edfb-4158-86ad-6fce0abec64c\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.443164 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwzcr\" (UniqueName: \"kubernetes.io/projected/bb917686-edfb-4158-86ad-6fce0abec64c-kube-api-access-mwzcr\") pod \"bb917686-edfb-4158-86ad-6fce0abec64c\" (UID: \"bb917686-edfb-4158-86ad-6fce0abec64c\") " Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.449450 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-utilities" (OuterVolumeSpecName: "utilities") pod "bb917686-edfb-4158-86ad-6fce0abec64c" (UID: "bb917686-edfb-4158-86ad-6fce0abec64c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.468708 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb917686-edfb-4158-86ad-6fce0abec64c-kube-api-access-mwzcr" (OuterVolumeSpecName: "kube-api-access-mwzcr") pod "bb917686-edfb-4158-86ad-6fce0abec64c" (UID: "bb917686-edfb-4158-86ad-6fce0abec64c"). InnerVolumeSpecName "kube-api-access-mwzcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.516052 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.548909 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-catalog-content\") pod \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.549464 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-utilities\") pod \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.549665 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n59fs\" (UniqueName: \"kubernetes.io/projected/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-kube-api-access-n59fs\") pod \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\" (UID: \"ccdf38cf-634a-41a2-9c8b-74bb86af80a7\") " Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.550342 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.550491 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mwzcr\" (UniqueName: \"kubernetes.io/projected/bb917686-edfb-4158-86ad-6fce0abec64c-kube-api-access-mwzcr\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.560613 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-utilities" (OuterVolumeSpecName: "utilities") pod "ccdf38cf-634a-41a2-9c8b-74bb86af80a7" (UID: "ccdf38cf-634a-41a2-9c8b-74bb86af80a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.577936 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-kube-api-access-n59fs" (OuterVolumeSpecName: "kube-api-access-n59fs") pod "ccdf38cf-634a-41a2-9c8b-74bb86af80a7" (UID: "ccdf38cf-634a-41a2-9c8b-74bb86af80a7"). InnerVolumeSpecName "kube-api-access-n59fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.622478 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rmwfn"] Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.622732 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rmwfn" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="registry-server" containerID="cri-o://b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87" gracePeriod=2 Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.652303 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.652352 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-n59fs\" (UniqueName: \"kubernetes.io/projected/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-kube-api-access-n59fs\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.899981 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb917686-edfb-4158-86ad-6fce0abec64c" (UID: "bb917686-edfb-4158-86ad-6fce0abec64c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:45 crc kubenswrapper[4125]: I0312 13:41:45.955998 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb917686-edfb-4158-86ad-6fce0abec64c-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.230923 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:46 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:46 crc kubenswrapper[4125]: > Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.263701 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ccdf38cf-634a-41a2-9c8b-74bb86af80a7" (UID: "ccdf38cf-634a-41a2-9c8b-74bb86af80a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.287362 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.335732 4125 generic.go:334] "Generic (PLEG): container finished" podID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerID="b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87" exitCode=0 Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.335873 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerDied","Data":"b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87"} Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.335909 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rmwfn" event={"ID":"9ad279b4-d9dc-42a8-a1c8-a002bd063482","Type":"ContainerDied","Data":"be67d2307407975fa80988e5323b61faf043ef1f8ce17b7f937137e83832bdf8"} Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.335934 4125 scope.go:117] "RemoveContainer" containerID="b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.336091 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rmwfn" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.354196 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4v97" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.354306 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k9qqb" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.354796 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k9qqb" event={"ID":"ccdf38cf-634a-41a2-9c8b-74bb86af80a7","Type":"ContainerDied","Data":"049cfa0c7f506db7d8a3962a506f6c7dbfe4b3489281fd37180c3bf06491ac72"} Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.362073 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") pod \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.362164 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-utilities\") pod \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.362236 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-catalog-content\") pod \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\" (UID: \"9ad279b4-d9dc-42a8-a1c8-a002bd063482\") " Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.362415 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccdf38cf-634a-41a2-9c8b-74bb86af80a7-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.367163 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-utilities" (OuterVolumeSpecName: "utilities") pod "9ad279b4-d9dc-42a8-a1c8-a002bd063482" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.380170 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp" (OuterVolumeSpecName: "kube-api-access-r7dbp") pod "9ad279b4-d9dc-42a8-a1c8-a002bd063482" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482"). InnerVolumeSpecName "kube-api-access-r7dbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.439137 4125 scope.go:117] "RemoveContainer" containerID="bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.464143 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.464196 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-r7dbp\" (UniqueName: \"kubernetes.io/projected/9ad279b4-d9dc-42a8-a1c8-a002bd063482-kube-api-access-r7dbp\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.505597 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ad279b4-d9dc-42a8-a1c8-a002bd063482" (UID: "9ad279b4-d9dc-42a8-a1c8-a002bd063482"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.561881 4125 scope.go:117] "RemoveContainer" containerID="d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.565542 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ad279b4-d9dc-42a8-a1c8-a002bd063482-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.571747 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g4v97"] Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.590759 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g4v97"] Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.629104 4125 scope.go:117] "RemoveContainer" containerID="b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87" Mar 12 13:41:46 crc kubenswrapper[4125]: E0312 13:41:46.631346 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87\": container with ID starting with b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87 not found: ID does not exist" containerID="b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.631408 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87"} err="failed to get container status \"b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87\": rpc error: code = NotFound desc = could not find container \"b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87\": container with ID starting with b3462ec3ecccca783e759d701034de888c3a6074065af37ce724cc2263906a87 not found: ID does not exist" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.631425 4125 scope.go:117] "RemoveContainer" containerID="bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f" Mar 12 13:41:46 crc kubenswrapper[4125]: E0312 13:41:46.632036 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f\": container with ID starting with bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f not found: ID does not exist" containerID="bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.632065 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f"} err="failed to get container status \"bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f\": rpc error: code = NotFound desc = could not find container \"bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f\": container with ID starting with bdcbac972870cae21b959cca005a9b6eb115e47625ea8b1ea22a2aa13ff3139f not found: ID does not exist" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.632079 4125 scope.go:117] "RemoveContainer" containerID="d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594" Mar 12 13:41:46 crc kubenswrapper[4125]: E0312 13:41:46.633028 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594\": container with ID starting with d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594 not found: ID does not exist" containerID="d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.633073 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594"} err="failed to get container status \"d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594\": rpc error: code = NotFound desc = could not find container \"d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594\": container with ID starting with d8350ccfaf8258763ceb736329e855a6ace1f5abf4a273753e0f24679e4d5594 not found: ID does not exist" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.633086 4125 scope.go:117] "RemoveContainer" containerID="804e97e0f43a64898481bb2d40125a76fb5991e8f2380788d44c274c75b1b1b9" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.667068 4125 scope.go:117] "RemoveContainer" containerID="ae54151489cfc34992d37ed0b8ac57e695a64170c18915ba01de6399209ce42d" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.715775 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k9qqb"] Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.729806 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k9qqb"] Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.748078 4125 scope.go:117] "RemoveContainer" containerID="56a5bc6b4dee41dcddef32e21164697c824160c5a4c34705a0ad0d4b6748294b" Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.774452 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rmwfn"] Mar 12 13:41:46 crc kubenswrapper[4125]: I0312 13:41:46.781086 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rmwfn"] Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.273551 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z4l9z"] Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.273948 4125 topology_manager.go:215] "Topology Admit Handler" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" podNamespace="openshift-marketplace" podName="certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274120 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274142 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274159 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274168 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274180 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274188 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274201 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="extract-content" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274271 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="extract-content" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274289 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="extract-content" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274298 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="extract-content" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274307 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="extract-utilities" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274314 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="extract-utilities" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274326 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="extract-utilities" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274333 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="extract-utilities" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274344 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="extract-utilities" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274352 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="extract-utilities" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274364 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="extract-content" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274371 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="extract-content" Mar 12 13:41:47 crc kubenswrapper[4125]: E0312 13:41:47.274393 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274402 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274564 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274582 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274595 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b2f272-4389-4573-ac63-135cbe6ca129" containerName="kube-multus-additional-cni-plugins" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.274610 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" containerName="registry-server" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.275491 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.309407 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z4l9z"] Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.378786 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-catalog-content\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.378906 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-utilities\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.378940 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xhxv\" (UniqueName: \"kubernetes.io/projected/2a9e1488-e852-4113-90a0-4177e0e57ee0-kube-api-access-8xhxv\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.479782 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-catalog-content\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.480290 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-utilities\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.480874 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8xhxv\" (UniqueName: \"kubernetes.io/projected/2a9e1488-e852-4113-90a0-4177e0e57ee0-kube-api-access-8xhxv\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.480403 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-catalog-content\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.480767 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-utilities\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.525945 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xhxv\" (UniqueName: \"kubernetes.io/projected/2a9e1488-e852-4113-90a0-4177e0e57ee0-kube-api-access-8xhxv\") pod \"certified-operators-z4l9z\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:47 crc kubenswrapper[4125]: I0312 13:41:47.592765 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.033921 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ad279b4-d9dc-42a8-a1c8-a002bd063482" path="/var/lib/kubelet/pods/9ad279b4-d9dc-42a8-a1c8-a002bd063482/volumes" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.037483 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb917686-edfb-4158-86ad-6fce0abec64c" path="/var/lib/kubelet/pods/bb917686-edfb-4158-86ad-6fce0abec64c/volumes" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.038255 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccdf38cf-634a-41a2-9c8b-74bb86af80a7" path="/var/lib/kubelet/pods/ccdf38cf-634a-41a2-9c8b-74bb86af80a7/volumes" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.230726 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z4l9z"] Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.373887 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerStarted","Data":"d911511ce5ba6cd77949e2d7666d91f7e960862ef86ff3fe038ebf7ce364fe7f"} Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.864490 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l4l79"] Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.864620 4125 topology_manager.go:215] "Topology Admit Handler" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" podNamespace="openshift-marketplace" podName="redhat-marketplace-l4l79" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.865884 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.904673 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-catalog-content\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.905031 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdsws\" (UniqueName: \"kubernetes.io/projected/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-kube-api-access-gdsws\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.905242 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-utilities\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:48 crc kubenswrapper[4125]: I0312 13:41:48.948088 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4l79"] Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.006293 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-utilities\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.006387 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-catalog-content\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.006435 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gdsws\" (UniqueName: \"kubernetes.io/projected/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-kube-api-access-gdsws\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.006967 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-utilities\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.007021 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-catalog-content\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.045195 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdsws\" (UniqueName: \"kubernetes.io/projected/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-kube-api-access-gdsws\") pod \"redhat-marketplace-l4l79\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.182475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.384667 4125 generic.go:334] "Generic (PLEG): container finished" podID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerID="ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c" exitCode=0 Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.385344 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerDied","Data":"ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c"} Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.459245 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rbnzk"] Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.459403 4125 topology_manager.go:215] "Topology Admit Handler" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" podNamespace="openshift-marketplace" podName="community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.460475 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.513756 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xnb2\" (UniqueName: \"kubernetes.io/projected/813f238a-6e8e-480c-b510-22c9c49689e3-kube-api-access-2xnb2\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.514359 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-utilities\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.514513 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-catalog-content\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.518023 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rbnzk"] Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.615065 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2xnb2\" (UniqueName: \"kubernetes.io/projected/813f238a-6e8e-480c-b510-22c9c49689e3-kube-api-access-2xnb2\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.615154 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-utilities\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.615189 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-catalog-content\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.615767 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-catalog-content\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.616424 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-utilities\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.655723 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xnb2\" (UniqueName: \"kubernetes.io/projected/813f238a-6e8e-480c-b510-22c9c49689e3-kube-api-access-2xnb2\") pod \"community-operators-rbnzk\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:49 crc kubenswrapper[4125]: I0312 13:41:49.804893 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:41:50 crc kubenswrapper[4125]: I0312 13:41:50.013787 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4l79"] Mar 12 13:41:50 crc kubenswrapper[4125]: I0312 13:41:50.395318 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerStarted","Data":"f10fd7717ae846ecd32352468ba2b98d97229d9c5473b1112ebcef63fb6e9849"} Mar 12 13:41:50 crc kubenswrapper[4125]: I0312 13:41:50.400010 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerStarted","Data":"4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87"} Mar 12 13:41:50 crc kubenswrapper[4125]: I0312 13:41:50.478640 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rbnzk"] Mar 12 13:41:50 crc kubenswrapper[4125]: W0312 13:41:50.495905 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod813f238a_6e8e_480c_b510_22c9c49689e3.slice/crio-a65f7702d7a67f8a62508d28d163fbf07fccf5e6321514bfa99ccffa75a13e8c WatchSource:0}: Error finding container a65f7702d7a67f8a62508d28d163fbf07fccf5e6321514bfa99ccffa75a13e8c: Status 404 returned error can't find the container with id a65f7702d7a67f8a62508d28d163fbf07fccf5e6321514bfa99ccffa75a13e8c Mar 12 13:41:51 crc kubenswrapper[4125]: I0312 13:41:51.407427 4125 generic.go:334] "Generic (PLEG): container finished" podID="813f238a-6e8e-480c-b510-22c9c49689e3" containerID="229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b" exitCode=0 Mar 12 13:41:51 crc kubenswrapper[4125]: I0312 13:41:51.407515 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerDied","Data":"229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b"} Mar 12 13:41:51 crc kubenswrapper[4125]: I0312 13:41:51.407543 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerStarted","Data":"a65f7702d7a67f8a62508d28d163fbf07fccf5e6321514bfa99ccffa75a13e8c"} Mar 12 13:41:51 crc kubenswrapper[4125]: I0312 13:41:51.411700 4125 generic.go:334] "Generic (PLEG): container finished" podID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerID="11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776" exitCode=0 Mar 12 13:41:51 crc kubenswrapper[4125]: I0312 13:41:51.412661 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerDied","Data":"11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776"} Mar 12 13:41:52 crc kubenswrapper[4125]: I0312 13:41:52.428941 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerStarted","Data":"706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5"} Mar 12 13:41:53 crc kubenswrapper[4125]: I0312 13:41:53.450102 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerStarted","Data":"ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb"} Mar 12 13:41:54 crc kubenswrapper[4125]: I0312 13:41:54.417333 4125 scope.go:117] "RemoveContainer" containerID="8066f3b20a26cdaa33f85973f1b4e4f5fa47ef57fcee496150d1801066272f41" Mar 12 13:41:55 crc kubenswrapper[4125]: I0312 13:41:55.982569 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-12-crc"] Mar 12 13:41:55 crc kubenswrapper[4125]: I0312 13:41:55.983003 4125 topology_manager.go:215] "Topology Admit Handler" podUID="e82cfcea-bcd5-4d25-9d17-4978f4452a3a" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-12-crc" Mar 12 13:41:55 crc kubenswrapper[4125]: I0312 13:41:55.983748 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.058782 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.058932 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.160329 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.160405 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.160794 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.338623 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" probeResult="failure" output=< Mar 12 13:41:56 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:41:56 crc kubenswrapper[4125]: > Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.517563 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-controller-manager/installer-11-crc"] Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.517982 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/installer-11-crc" podUID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" containerName="installer" containerID="cri-o://7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7" gracePeriod=30 Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.543763 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-12-crc"] Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.579779 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:56 crc kubenswrapper[4125]: I0312 13:41:56.602613 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.308195 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-12-crc"] Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.413916 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_installer-11-crc_69bf4bb8-2a08-4fcb-991c-3aee35fb95f8/installer/0.log" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.414315 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.497942 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-12-crc" event={"ID":"e82cfcea-bcd5-4d25-9d17-4978f4452a3a","Type":"ContainerStarted","Data":"fc08e19f1a7bc38a92738d4b111c2720d7a6d9ca5549bad7dd1d3010f3c9bb5f"} Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.504515 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_installer-11-crc_69bf4bb8-2a08-4fcb-991c-3aee35fb95f8/installer/0.log" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.504577 4125 generic.go:334] "Generic (PLEG): container finished" podID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" containerID="7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7" exitCode=1 Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.504607 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-11-crc" event={"ID":"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8","Type":"ContainerDied","Data":"7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7"} Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.504634 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-11-crc" event={"ID":"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8","Type":"ContainerDied","Data":"43e95d8b0bfba0673d5386534e6176488a785fbeac63aa34d0f5a3ea179dfd32"} Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.504685 4125 scope.go:117] "RemoveContainer" containerID="7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.504800 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-11-crc" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.590054 4125 scope.go:117] "RemoveContainer" containerID="7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7" Mar 12 13:41:57 crc kubenswrapper[4125]: E0312 13:41:57.590912 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7\": container with ID starting with 7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7 not found: ID does not exist" containerID="7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.590976 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7"} err="failed to get container status \"7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7\": rpc error: code = NotFound desc = could not find container \"7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7\": container with ID starting with 7d65f5fdeb75b957a1499d92f50951d5637f71074bacaa85a816e58a20dc0ee7 not found: ID does not exist" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.600461 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kubelet-dir\") pod \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.600556 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-var-lock\") pod \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.600609 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kube-api-access\") pod \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\" (UID: \"69bf4bb8-2a08-4fcb-991c-3aee35fb95f8\") " Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.600933 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" (UID: "69bf4bb8-2a08-4fcb-991c-3aee35fb95f8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.601023 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-var-lock" (OuterVolumeSpecName: "var-lock") pod "69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" (UID: "69bf4bb8-2a08-4fcb-991c-3aee35fb95f8"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.622863 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" (UID: "69bf4bb8-2a08-4fcb-991c-3aee35fb95f8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.702423 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.702498 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.702518 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.869933 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-controller-manager/installer-11-crc"] Mar 12 13:41:57 crc kubenswrapper[4125]: I0312 13:41:57.889422 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-controller-manager/installer-11-crc"] Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.033463 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" path="/var/lib/kubelet/pods/69bf4bb8-2a08-4fcb-991c-3aee35fb95f8/volumes" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.515039 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-12-crc" event={"ID":"e82cfcea-bcd5-4d25-9d17-4978f4452a3a","Type":"ContainerStarted","Data":"7cbd1ae643d3269317d0c80535bea4f014e4f8d3e78bf454059b547187b29f7c"} Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.563010 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-12-crc" podStartSLOduration=3.5629605890000002 podStartE2EDuration="3.562960589s" podCreationTimestamp="2026-03-12 13:41:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:41:58.558951583 +0000 UTC m=+1288.882337782" watchObservedRunningTime="2026-03-12 13:41:58.562960589 +0000 UTC m=+1288.886346588" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.671546 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/installer-12-crc"] Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.671687 4125 topology_manager.go:215] "Topology Admit Handler" podUID="334fb4b6-16b9-453a-9208-846feab2a2fa" podNamespace="openshift-kube-controller-manager" podName="installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: E0312 13:41:58.671947 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" containerName="installer" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.671966 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" containerName="installer" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.672098 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="69bf4bb8-2a08-4fcb-991c-3aee35fb95f8" containerName="installer" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.672641 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.701606 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/installer-12-crc"] Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.744406 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334fb4b6-16b9-453a-9208-846feab2a2fa-kube-api-access\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.744532 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-kubelet-dir\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.744559 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-var-lock\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.845295 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334fb4b6-16b9-453a-9208-846feab2a2fa-kube-api-access\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.845394 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-kubelet-dir\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.845425 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-var-lock\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.845516 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-var-lock\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.845560 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-kubelet-dir\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.878168 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334fb4b6-16b9-453a-9208-846feab2a2fa-kube-api-access\") pod \"installer-12-crc\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:58 crc kubenswrapper[4125]: I0312 13:41:58.994399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:41:59 crc kubenswrapper[4125]: I0312 13:41:59.524340 4125 generic.go:334] "Generic (PLEG): container finished" podID="e82cfcea-bcd5-4d25-9d17-4978f4452a3a" containerID="7cbd1ae643d3269317d0c80535bea4f014e4f8d3e78bf454059b547187b29f7c" exitCode=0 Mar 12 13:41:59 crc kubenswrapper[4125]: I0312 13:41:59.524419 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-12-crc" event={"ID":"e82cfcea-bcd5-4d25-9d17-4978f4452a3a","Type":"ContainerDied","Data":"7cbd1ae643d3269317d0c80535bea4f014e4f8d3e78bf454059b547187b29f7c"} Mar 12 13:42:00 crc kubenswrapper[4125]: I0312 13:42:00.854499 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/installer-12-crc"] Mar 12 13:42:00 crc kubenswrapper[4125]: W0312 13:42:00.923328 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod334fb4b6_16b9_453a_9208_846feab2a2fa.slice/crio-2daa2e881606d6ecba509a68be989bcaffc1982acd45dce1d3626632bc85e420 WatchSource:0}: Error finding container 2daa2e881606d6ecba509a68be989bcaffc1982acd45dce1d3626632bc85e420: Status 404 returned error can't find the container with id 2daa2e881606d6ecba509a68be989bcaffc1982acd45dce1d3626632bc85e420 Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.497528 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.542519 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-12-crc" event={"ID":"e82cfcea-bcd5-4d25-9d17-4978f4452a3a","Type":"ContainerDied","Data":"fc08e19f1a7bc38a92738d4b111c2720d7a6d9ca5549bad7dd1d3010f3c9bb5f"} Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.542586 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.542619 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc08e19f1a7bc38a92738d4b111c2720d7a6d9ca5549bad7dd1d3010f3c9bb5f" Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.544345 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-12-crc" event={"ID":"334fb4b6-16b9-453a-9208-846feab2a2fa","Type":"ContainerStarted","Data":"2daa2e881606d6ecba509a68be989bcaffc1982acd45dce1d3626632bc85e420"} Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.696367 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kubelet-dir\") pod \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.696531 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e82cfcea-bcd5-4d25-9d17-4978f4452a3a" (UID: "e82cfcea-bcd5-4d25-9d17-4978f4452a3a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.696553 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kube-api-access\") pod \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\" (UID: \"e82cfcea-bcd5-4d25-9d17-4978f4452a3a\") " Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.697180 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.703540 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e82cfcea-bcd5-4d25-9d17-4978f4452a3a" (UID: "e82cfcea-bcd5-4d25-9d17-4978f4452a3a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:01 crc kubenswrapper[4125]: I0312 13:42:01.798057 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e82cfcea-bcd5-4d25-9d17-4978f4452a3a-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:02 crc kubenswrapper[4125]: I0312 13:42:02.594620 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-12-crc" event={"ID":"334fb4b6-16b9-453a-9208-846feab2a2fa","Type":"ContainerStarted","Data":"cc53439518c5ce30d5f21f182449aa7d6203ce6faea070900253a04d3e70a104"} Mar 12 13:42:02 crc kubenswrapper[4125]: I0312 13:42:02.649235 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/installer-12-crc" podStartSLOduration=4.649117503 podStartE2EDuration="4.649117503s" podCreationTimestamp="2026-03-12 13:41:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:42:02.645500419 +0000 UTC m=+1292.968886359" watchObservedRunningTime="2026-03-12 13:42:02.649117503 +0000 UTC m=+1292.972503422" Mar 12 13:42:04 crc kubenswrapper[4125]: I0312 13:42:04.622539 4125 generic.go:334] "Generic (PLEG): container finished" podID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerID="ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb" exitCode=0 Mar 12 13:42:04 crc kubenswrapper[4125]: I0312 13:42:04.622689 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerDied","Data":"ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb"} Mar 12 13:42:05 crc kubenswrapper[4125]: I0312 13:42:05.238289 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:42:05 crc kubenswrapper[4125]: I0312 13:42:05.428274 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:42:05 crc kubenswrapper[4125]: I0312 13:42:05.965594 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dcqzh"] Mar 12 13:42:06 crc kubenswrapper[4125]: I0312 13:42:06.639139 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerStarted","Data":"0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563"} Mar 12 13:42:06 crc kubenswrapper[4125]: I0312 13:42:06.639251 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dcqzh" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" containerID="cri-o://688690825c46f663e5a8bb2bb39ceab651e21decaf94ca342ebe2c0f14ee4f5c" gracePeriod=2 Mar 12 13:42:06 crc kubenswrapper[4125]: I0312 13:42:06.714464 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l4l79" podStartSLOduration=5.187075939 podStartE2EDuration="18.714418328s" podCreationTimestamp="2026-03-12 13:41:48 +0000 UTC" firstStartedPulling="2026-03-12 13:41:51.413458597 +0000 UTC m=+1281.736844426" lastFinishedPulling="2026-03-12 13:42:04.940800936 +0000 UTC m=+1295.264186815" observedRunningTime="2026-03-12 13:42:06.711148585 +0000 UTC m=+1297.034534574" watchObservedRunningTime="2026-03-12 13:42:06.714418328 +0000 UTC m=+1297.037804228" Mar 12 13:42:07 crc kubenswrapper[4125]: I0312 13:42:07.667594 4125 generic.go:334] "Generic (PLEG): container finished" podID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerID="688690825c46f663e5a8bb2bb39ceab651e21decaf94ca342ebe2c0f14ee4f5c" exitCode=0 Mar 12 13:42:07 crc kubenswrapper[4125]: I0312 13:42:07.667763 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerDied","Data":"688690825c46f663e5a8bb2bb39ceab651e21decaf94ca342ebe2c0f14ee4f5c"} Mar 12 13:42:07 crc kubenswrapper[4125]: I0312 13:42:07.687387 4125 generic.go:334] "Generic (PLEG): container finished" podID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerID="4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87" exitCode=0 Mar 12 13:42:07 crc kubenswrapper[4125]: I0312 13:42:07.687440 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerDied","Data":"4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87"} Mar 12 13:42:07 crc kubenswrapper[4125]: I0312 13:42:07.850366 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.004525 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-catalog-content\") pod \"6db26b71-4e04-4688-a0c0-00e06e8c888d\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.004609 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-utilities\") pod \"6db26b71-4e04-4688-a0c0-00e06e8c888d\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.004687 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzb4s\" (UniqueName: \"kubernetes.io/projected/6db26b71-4e04-4688-a0c0-00e06e8c888d-kube-api-access-nzb4s\") pod \"6db26b71-4e04-4688-a0c0-00e06e8c888d\" (UID: \"6db26b71-4e04-4688-a0c0-00e06e8c888d\") " Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.005340 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-utilities" (OuterVolumeSpecName: "utilities") pod "6db26b71-4e04-4688-a0c0-00e06e8c888d" (UID: "6db26b71-4e04-4688-a0c0-00e06e8c888d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.014053 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6db26b71-4e04-4688-a0c0-00e06e8c888d-kube-api-access-nzb4s" (OuterVolumeSpecName: "kube-api-access-nzb4s") pod "6db26b71-4e04-4688-a0c0-00e06e8c888d" (UID: "6db26b71-4e04-4688-a0c0-00e06e8c888d"). InnerVolumeSpecName "kube-api-access-nzb4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.105678 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nzb4s\" (UniqueName: \"kubernetes.io/projected/6db26b71-4e04-4688-a0c0-00e06e8c888d-kube-api-access-nzb4s\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.105731 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.697101 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerStarted","Data":"7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7"} Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.705206 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dcqzh" event={"ID":"6db26b71-4e04-4688-a0c0-00e06e8c888d","Type":"ContainerDied","Data":"696d1fd3341d44c6b87bdc93d6ff15956037aa2458e9b94b4b68632fe8229857"} Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.705302 4125 scope.go:117] "RemoveContainer" containerID="688690825c46f663e5a8bb2bb39ceab651e21decaf94ca342ebe2c0f14ee4f5c" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.705429 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dcqzh" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.761566 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z4l9z" podStartSLOduration=3.125859368 podStartE2EDuration="21.76151375s" podCreationTimestamp="2026-03-12 13:41:47 +0000 UTC" firstStartedPulling="2026-03-12 13:41:49.392619488 +0000 UTC m=+1279.716005367" lastFinishedPulling="2026-03-12 13:42:08.02827386 +0000 UTC m=+1298.351659749" observedRunningTime="2026-03-12 13:42:08.748489534 +0000 UTC m=+1299.071875523" watchObservedRunningTime="2026-03-12 13:42:08.76151375 +0000 UTC m=+1299.084899649" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.799879 4125 scope.go:117] "RemoveContainer" containerID="0f6e5f54ac6d5074ad57349154715c6834d0d6a57a215eb365926d06bde34837" Mar 12 13:42:08 crc kubenswrapper[4125]: I0312 13:42:08.932253 4125 scope.go:117] "RemoveContainer" containerID="6b4d739a1f1a1f43a1f2f89445711eaf7d44a20124dedaea0758ded72747d1a0" Mar 12 13:42:09 crc kubenswrapper[4125]: I0312 13:42:09.009126 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6db26b71-4e04-4688-a0c0-00e06e8c888d" (UID: "6db26b71-4e04-4688-a0c0-00e06e8c888d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:09 crc kubenswrapper[4125]: I0312 13:42:09.026313 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6db26b71-4e04-4688-a0c0-00e06e8c888d-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:09 crc kubenswrapper[4125]: I0312 13:42:09.182782 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:42:09 crc kubenswrapper[4125]: I0312 13:42:09.184237 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:42:09 crc kubenswrapper[4125]: I0312 13:42:09.384051 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dcqzh"] Mar 12 13:42:09 crc kubenswrapper[4125]: I0312 13:42:09.391069 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dcqzh"] Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.054322 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" path="/var/lib/kubelet/pods/6db26b71-4e04-4688-a0c0-00e06e8c888d/volumes" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098450 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fcchf"] Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098582 4125 topology_manager.go:215] "Topology Admit Handler" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" podNamespace="openshift-marketplace" podName="redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: E0312 13:42:10.098732 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="extract-content" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098744 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="extract-content" Mar 12 13:42:10 crc kubenswrapper[4125]: E0312 13:42:10.098758 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e82cfcea-bcd5-4d25-9d17-4978f4452a3a" containerName="pruner" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098765 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82cfcea-bcd5-4d25-9d17-4978f4452a3a" containerName="pruner" Mar 12 13:42:10 crc kubenswrapper[4125]: E0312 13:42:10.098779 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098786 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" Mar 12 13:42:10 crc kubenswrapper[4125]: E0312 13:42:10.098804 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="extract-utilities" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098862 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="extract-utilities" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098986 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="6db26b71-4e04-4688-a0c0-00e06e8c888d" containerName="registry-server" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.098997 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82cfcea-bcd5-4d25-9d17-4978f4452a3a" containerName="pruner" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.100386 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.155988 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fcchf"] Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.258632 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-utilities\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.258725 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6p89g\" (UniqueName: \"kubernetes.io/projected/e4fa35cf-f315-4d16-bb86-47e737276e27-kube-api-access-6p89g\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.258852 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-catalog-content\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.360395 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-catalog-content\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.360519 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-utilities\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.360558 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6p89g\" (UniqueName: \"kubernetes.io/projected/e4fa35cf-f315-4d16-bb86-47e737276e27-kube-api-access-6p89g\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.361647 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-catalog-content\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.362074 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-utilities\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.406557 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6p89g\" (UniqueName: \"kubernetes.io/projected/e4fa35cf-f315-4d16-bb86-47e737276e27-kube-api-access-6p89g\") pod \"redhat-operators-fcchf\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.420593 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.479577 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-l4l79" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="registry-server" probeResult="failure" output=< Mar 12 13:42:10 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:42:10 crc kubenswrapper[4125]: > Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.515926 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/console-84fccc7b6-mkncc" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" containerID="cri-o://4084a97de098c0bfdf0c87f17eba0ba708025fd1287e417cf06bd377c11722da" gracePeriod=15 Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.746556 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84fccc7b6-mkncc_b233d916-bfe3-4ae5-ae39-6b574d1aa05e/console/1.log" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.748148 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84fccc7b6-mkncc_b233d916-bfe3-4ae5-ae39-6b574d1aa05e/console/0.log" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.748255 4125 generic.go:334] "Generic (PLEG): container finished" podID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerID="4084a97de098c0bfdf0c87f17eba0ba708025fd1287e417cf06bd377c11722da" exitCode=2 Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.748292 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84fccc7b6-mkncc" event={"ID":"b233d916-bfe3-4ae5-ae39-6b574d1aa05e","Type":"ContainerDied","Data":"4084a97de098c0bfdf0c87f17eba0ba708025fd1287e417cf06bd377c11722da"} Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.748330 4125 scope.go:117] "RemoveContainer" containerID="9c5d3925adb66fb4aa984ebb038669e1253dd1ae4d86122b5b7a97e70cd77667" Mar 12 13:42:10 crc kubenswrapper[4125]: I0312 13:42:10.837442 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fcchf"] Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.133130 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84fccc7b6-mkncc_b233d916-bfe3-4ae5-ae39-6b574d1aa05e/console/1.log" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.134362 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.190975 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.191354 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.191694 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.191740 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.191775 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.191858 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.191922 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") pod \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\" (UID: \"b233d916-bfe3-4ae5-ae39-6b574d1aa05e\") " Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.194532 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.195191 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.195979 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca" (OuterVolumeSpecName: "service-ca") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.196399 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config" (OuterVolumeSpecName: "console-config") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.205998 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.209041 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh" (OuterVolumeSpecName: "kube-api-access-lz9qh") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "kube-api-access-lz9qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.211483 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "b233d916-bfe3-4ae5-ae39-6b574d1aa05e" (UID: "b233d916-bfe3-4ae5-ae39-6b574d1aa05e"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.293554 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-lz9qh\" (UniqueName: \"kubernetes.io/projected/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-kube-api-access-lz9qh\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.293992 4125 reconciler_common.go:300] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.294311 4125 reconciler_common.go:300] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.294614 4125 reconciler_common.go:300] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-oauth-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.294768 4125 reconciler_common.go:300] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-service-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.294944 4125 reconciler_common.go:300] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-console-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.295143 4125 reconciler_common.go:300] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b233d916-bfe3-4ae5-ae39-6b574d1aa05e-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.757359 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84fccc7b6-mkncc_b233d916-bfe3-4ae5-ae39-6b574d1aa05e/console/1.log" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.757509 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84fccc7b6-mkncc" event={"ID":"b233d916-bfe3-4ae5-ae39-6b574d1aa05e","Type":"ContainerDied","Data":"116f902f7ec1a29ab141a90c55ec8e2238c0df7e80947d26572239e3bf033ed1"} Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.757547 4125 scope.go:117] "RemoveContainer" containerID="4084a97de098c0bfdf0c87f17eba0ba708025fd1287e417cf06bd377c11722da" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.757545 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84fccc7b6-mkncc" Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.761063 4125 generic.go:334] "Generic (PLEG): container finished" podID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerID="6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b" exitCode=0 Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.761130 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerDied","Data":"6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b"} Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.761159 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerStarted","Data":"c05d41f34b9c4b6e7b9622621d7bb72bb8e04c0164b91346ae58713346424a2f"} Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.895734 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-console/console-84fccc7b6-mkncc"] Mar 12 13:42:11 crc kubenswrapper[4125]: I0312 13:42:11.904894 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-84fccc7b6-mkncc"] Mar 12 13:42:12 crc kubenswrapper[4125]: I0312 13:42:12.032390 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" path="/var/lib/kubelet/pods/b233d916-bfe3-4ae5-ae39-6b574d1aa05e/volumes" Mar 12 13:42:13 crc kubenswrapper[4125]: I0312 13:42:13.781710 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerStarted","Data":"2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af"} Mar 12 13:42:17 crc kubenswrapper[4125]: I0312 13:42:17.596376 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:42:17 crc kubenswrapper[4125]: I0312 13:42:17.596506 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:42:17 crc kubenswrapper[4125]: I0312 13:42:17.807802 4125 generic.go:334] "Generic (PLEG): container finished" podID="813f238a-6e8e-480c-b510-22c9c49689e3" containerID="706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5" exitCode=0 Mar 12 13:42:17 crc kubenswrapper[4125]: I0312 13:42:17.807896 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerDied","Data":"706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5"} Mar 12 13:42:17 crc kubenswrapper[4125]: I0312 13:42:17.837474 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:42:17 crc kubenswrapper[4125]: I0312 13:42:17.949466 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.337859 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z4l9z"] Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.359616 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.459390 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.828303 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerStarted","Data":"e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993"} Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.829117 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z4l9z" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="registry-server" containerID="cri-o://7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7" gracePeriod=2 Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.889539 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rbnzk" podStartSLOduration=3.806436607 podStartE2EDuration="30.889495664s" podCreationTimestamp="2026-03-12 13:41:49 +0000 UTC" firstStartedPulling="2026-03-12 13:41:51.409548019 +0000 UTC m=+1281.732933778" lastFinishedPulling="2026-03-12 13:42:18.492606986 +0000 UTC m=+1308.815992835" observedRunningTime="2026-03-12 13:42:19.884409184 +0000 UTC m=+1310.207795103" watchObservedRunningTime="2026-03-12 13:42:19.889495664 +0000 UTC m=+1310.212881613" Mar 12 13:42:19 crc kubenswrapper[4125]: I0312 13:42:19.915670 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4l79"] Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.744111 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.833615 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xhxv\" (UniqueName: \"kubernetes.io/projected/2a9e1488-e852-4113-90a0-4177e0e57ee0-kube-api-access-8xhxv\") pod \"2a9e1488-e852-4113-90a0-4177e0e57ee0\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.834136 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-utilities\") pod \"2a9e1488-e852-4113-90a0-4177e0e57ee0\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.834386 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-catalog-content\") pod \"2a9e1488-e852-4113-90a0-4177e0e57ee0\" (UID: \"2a9e1488-e852-4113-90a0-4177e0e57ee0\") " Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.841112 4125 generic.go:334] "Generic (PLEG): container finished" podID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerID="7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7" exitCode=0 Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.841470 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l4l79" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="registry-server" containerID="cri-o://0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563" gracePeriod=2 Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.841880 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4l9z" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.841961 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerDied","Data":"7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7"} Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.842007 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4l9z" event={"ID":"2a9e1488-e852-4113-90a0-4177e0e57ee0","Type":"ContainerDied","Data":"d911511ce5ba6cd77949e2d7666d91f7e960862ef86ff3fe038ebf7ce364fe7f"} Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.842056 4125 scope.go:117] "RemoveContainer" containerID="7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.850027 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-utilities" (OuterVolumeSpecName: "utilities") pod "2a9e1488-e852-4113-90a0-4177e0e57ee0" (UID: "2a9e1488-e852-4113-90a0-4177e0e57ee0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.873864 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a9e1488-e852-4113-90a0-4177e0e57ee0-kube-api-access-8xhxv" (OuterVolumeSpecName: "kube-api-access-8xhxv") pod "2a9e1488-e852-4113-90a0-4177e0e57ee0" (UID: "2a9e1488-e852-4113-90a0-4177e0e57ee0"). InnerVolumeSpecName "kube-api-access-8xhxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.893016 4125 scope.go:117] "RemoveContainer" containerID="4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.946494 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-8xhxv\" (UniqueName: \"kubernetes.io/projected/2a9e1488-e852-4113-90a0-4177e0e57ee0-kube-api-access-8xhxv\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:20 crc kubenswrapper[4125]: I0312 13:42:20.946551 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.013029 4125 scope.go:117] "RemoveContainer" containerID="ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.075287 4125 scope.go:117] "RemoveContainer" containerID="7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7" Mar 12 13:42:21 crc kubenswrapper[4125]: E0312 13:42:21.080158 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7\": container with ID starting with 7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7 not found: ID does not exist" containerID="7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.080253 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7"} err="failed to get container status \"7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7\": rpc error: code = NotFound desc = could not find container \"7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7\": container with ID starting with 7f5762f1b37e94f5e57e95222f85e3bc1615fd47536a2f5a93b79756561e5bc7 not found: ID does not exist" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.080280 4125 scope.go:117] "RemoveContainer" containerID="4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87" Mar 12 13:42:21 crc kubenswrapper[4125]: E0312 13:42:21.081024 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87\": container with ID starting with 4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87 not found: ID does not exist" containerID="4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.081090 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87"} err="failed to get container status \"4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87\": rpc error: code = NotFound desc = could not find container \"4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87\": container with ID starting with 4f2c242a3b61e02fb414cae9adec86927f7ec2218c1a39788cfa520b77c23a87 not found: ID does not exist" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.081112 4125 scope.go:117] "RemoveContainer" containerID="ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c" Mar 12 13:42:21 crc kubenswrapper[4125]: E0312 13:42:21.083347 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c\": container with ID starting with ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c not found: ID does not exist" containerID="ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.083379 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c"} err="failed to get container status \"ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c\": rpc error: code = NotFound desc = could not find container \"ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c\": container with ID starting with ca5b42b3f729a5b954edbd126fa18c61af2122daf26f443a916cd08facea357c not found: ID does not exist" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.251444 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a9e1488-e852-4113-90a0-4177e0e57ee0" (UID: "2a9e1488-e852-4113-90a0-4177e0e57ee0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.252210 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a9e1488-e852-4113-90a0-4177e0e57ee0-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.378353 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.463603 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdsws\" (UniqueName: \"kubernetes.io/projected/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-kube-api-access-gdsws\") pod \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.463735 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-utilities\") pod \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.463773 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-catalog-content\") pod \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\" (UID: \"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68\") " Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.465508 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-utilities" (OuterVolumeSpecName: "utilities") pod "f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" (UID: "f6eecbb4-24d7-4b6c-af1f-6fddc1456a68"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.472487 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-kube-api-access-gdsws" (OuterVolumeSpecName: "kube-api-access-gdsws") pod "f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" (UID: "f6eecbb4-24d7-4b6c-af1f-6fddc1456a68"). InnerVolumeSpecName "kube-api-access-gdsws". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.510552 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z4l9z"] Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.524453 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z4l9z"] Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.565956 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-gdsws\" (UniqueName: \"kubernetes.io/projected/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-kube-api-access-gdsws\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.566300 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.640504 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" (UID: "f6eecbb4-24d7-4b6c-af1f-6fddc1456a68"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.668385 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.891468 4125 generic.go:334] "Generic (PLEG): container finished" podID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerID="0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563" exitCode=0 Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.891532 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4l79" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.891545 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerDied","Data":"0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563"} Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.891584 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4l79" event={"ID":"f6eecbb4-24d7-4b6c-af1f-6fddc1456a68","Type":"ContainerDied","Data":"f10fd7717ae846ecd32352468ba2b98d97229d9c5473b1112ebcef63fb6e9849"} Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.891621 4125 scope.go:117] "RemoveContainer" containerID="0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563" Mar 12 13:42:21 crc kubenswrapper[4125]: I0312 13:42:21.942315 4125 scope.go:117] "RemoveContainer" containerID="ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.003638 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4l79"] Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.013256 4125 scope.go:117] "RemoveContainer" containerID="11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.021008 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4l79"] Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.042136 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" path="/var/lib/kubelet/pods/2a9e1488-e852-4113-90a0-4177e0e57ee0/volumes" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.044654 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" path="/var/lib/kubelet/pods/f6eecbb4-24d7-4b6c-af1f-6fddc1456a68/volumes" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.050805 4125 scope.go:117] "RemoveContainer" containerID="0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563" Mar 12 13:42:22 crc kubenswrapper[4125]: E0312 13:42:22.051606 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563\": container with ID starting with 0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563 not found: ID does not exist" containerID="0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.051653 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563"} err="failed to get container status \"0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563\": rpc error: code = NotFound desc = could not find container \"0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563\": container with ID starting with 0a3dea68f62261ae962a5bcac1fa6835ea0c12fa5ae038a858b6ffe54f586563 not found: ID does not exist" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.051663 4125 scope.go:117] "RemoveContainer" containerID="ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb" Mar 12 13:42:22 crc kubenswrapper[4125]: E0312 13:42:22.052964 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb\": container with ID starting with ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb not found: ID does not exist" containerID="ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.052992 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb"} err="failed to get container status \"ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb\": rpc error: code = NotFound desc = could not find container \"ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb\": container with ID starting with ecd3c44f268a7eb4871b1b72da0b4d1d7b10b0cf2f9cb6a3ab586a0a85e91cbb not found: ID does not exist" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.053001 4125 scope.go:117] "RemoveContainer" containerID="11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776" Mar 12 13:42:22 crc kubenswrapper[4125]: E0312 13:42:22.053374 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776\": container with ID starting with 11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776 not found: ID does not exist" containerID="11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776" Mar 12 13:42:22 crc kubenswrapper[4125]: I0312 13:42:22.053396 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776"} err="failed to get container status \"11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776\": rpc error: code = NotFound desc = could not find container \"11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776\": container with ID starting with 11248565b7c9d86ebf83959406d49b1cc4d26278b2dee9c504b2414c3fd08776 not found: ID does not exist" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.132931 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/1.log" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.144782 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/0.log" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.145376 4125 generic.go:334] "Generic (PLEG): container finished" podID="7d51f445-054a-4e4f-a67b-a828f5a32511" containerID="9c3e595db3b1fe865bb35c919363f1aa729037b3064769f8a26dd8e09daa4bf4" exitCode=1 Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.145418 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerDied","Data":"9c3e595db3b1fe865bb35c919363f1aa729037b3064769f8a26dd8e09daa4bf4"} Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.145457 4125 scope.go:117] "RemoveContainer" containerID="c47ce1b61b78f947bbf881c4500564865b677f7ac60916f2651215a08d905da4" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.146299 4125 scope.go:117] "RemoveContainer" containerID="9c3e595db3b1fe865bb35c919363f1aa729037b3064769f8a26dd8e09daa4bf4" Mar 12 13:42:29 crc kubenswrapper[4125]: E0312 13:42:29.151335 4125 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ingress-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ingress-operator pod=ingress-operator-7d46d5bb6d-rrg6t_openshift-ingress-operator(7d51f445-054a-4e4f-a67b-a828f5a32511)\"" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.805641 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.806355 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:42:29 crc kubenswrapper[4125]: I0312 13:42:29.969094 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:42:30 crc kubenswrapper[4125]: I0312 13:42:30.154878 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/1.log" Mar 12 13:42:30 crc kubenswrapper[4125]: I0312 13:42:30.321713 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:42:30 crc kubenswrapper[4125]: I0312 13:42:30.400774 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rbnzk"] Mar 12 13:42:31 crc kubenswrapper[4125]: I0312 13:42:31.440004 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:42:31 crc kubenswrapper[4125]: I0312 13:42:31.440307 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:42:31 crc kubenswrapper[4125]: I0312 13:42:31.441121 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:42:31 crc kubenswrapper[4125]: I0312 13:42:31.441217 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:42:31 crc kubenswrapper[4125]: I0312 13:42:31.441454 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:42:32 crc kubenswrapper[4125]: I0312 13:42:32.168650 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rbnzk" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="registry-server" containerID="cri-o://e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993" gracePeriod=2 Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.063641 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.140625 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-catalog-content\") pod \"813f238a-6e8e-480c-b510-22c9c49689e3\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.140689 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xnb2\" (UniqueName: \"kubernetes.io/projected/813f238a-6e8e-480c-b510-22c9c49689e3-kube-api-access-2xnb2\") pod \"813f238a-6e8e-480c-b510-22c9c49689e3\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.140956 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-utilities\") pod \"813f238a-6e8e-480c-b510-22c9c49689e3\" (UID: \"813f238a-6e8e-480c-b510-22c9c49689e3\") " Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.143452 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-utilities" (OuterVolumeSpecName: "utilities") pod "813f238a-6e8e-480c-b510-22c9c49689e3" (UID: "813f238a-6e8e-480c-b510-22c9c49689e3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.161898 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/813f238a-6e8e-480c-b510-22c9c49689e3-kube-api-access-2xnb2" (OuterVolumeSpecName: "kube-api-access-2xnb2") pod "813f238a-6e8e-480c-b510-22c9c49689e3" (UID: "813f238a-6e8e-480c-b510-22c9c49689e3"). InnerVolumeSpecName "kube-api-access-2xnb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.193307 4125 generic.go:334] "Generic (PLEG): container finished" podID="813f238a-6e8e-480c-b510-22c9c49689e3" containerID="e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993" exitCode=0 Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.193364 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerDied","Data":"e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993"} Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.193402 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbnzk" event={"ID":"813f238a-6e8e-480c-b510-22c9c49689e3","Type":"ContainerDied","Data":"a65f7702d7a67f8a62508d28d163fbf07fccf5e6321514bfa99ccffa75a13e8c"} Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.193456 4125 scope.go:117] "RemoveContainer" containerID="e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.193670 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbnzk" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.245000 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2xnb2\" (UniqueName: \"kubernetes.io/projected/813f238a-6e8e-480c-b510-22c9c49689e3-kube-api-access-2xnb2\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.245097 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.315968 4125 scope.go:117] "RemoveContainer" containerID="706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.527302 4125 scope.go:117] "RemoveContainer" containerID="229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.648757 4125 scope.go:117] "RemoveContainer" containerID="e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993" Mar 12 13:42:33 crc kubenswrapper[4125]: E0312 13:42:33.652049 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993\": container with ID starting with e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993 not found: ID does not exist" containerID="e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.652321 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993"} err="failed to get container status \"e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993\": rpc error: code = NotFound desc = could not find container \"e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993\": container with ID starting with e356c00410fbc6e64b3a626de2307d4eb980e1c6996ff3c4928e9fa63489e993 not found: ID does not exist" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.652339 4125 scope.go:117] "RemoveContainer" containerID="706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5" Mar 12 13:42:33 crc kubenswrapper[4125]: E0312 13:42:33.658803 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5\": container with ID starting with 706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5 not found: ID does not exist" containerID="706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.658943 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5"} err="failed to get container status \"706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5\": rpc error: code = NotFound desc = could not find container \"706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5\": container with ID starting with 706f4457d1c2bfb4634b335612a58a3f820ba3ad9b29b444a0dfab371ded0da5 not found: ID does not exist" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.658963 4125 scope.go:117] "RemoveContainer" containerID="229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b" Mar 12 13:42:33 crc kubenswrapper[4125]: E0312 13:42:33.661078 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b\": container with ID starting with 229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b not found: ID does not exist" containerID="229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b" Mar 12 13:42:33 crc kubenswrapper[4125]: I0312 13:42:33.661141 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b"} err="failed to get container status \"229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b\": rpc error: code = NotFound desc = could not find container \"229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b\": container with ID starting with 229d1c36b6ddf9a849a8d8755a83a31d09bceed1bbea38e4f09d66ebfdb94a5b not found: ID does not exist" Mar 12 13:42:34 crc kubenswrapper[4125]: I0312 13:42:34.028038 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "813f238a-6e8e-480c-b510-22c9c49689e3" (UID: "813f238a-6e8e-480c-b510-22c9c49689e3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:42:34 crc kubenswrapper[4125]: I0312 13:42:34.067263 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/813f238a-6e8e-480c-b510-22c9c49689e3-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:34 crc kubenswrapper[4125]: I0312 13:42:34.290239 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rbnzk"] Mar 12 13:42:34 crc kubenswrapper[4125]: I0312 13:42:34.303631 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rbnzk"] Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.484985 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.485508 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager" containerID="cri-o://41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" gracePeriod=30 Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.485531 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" containerID="cri-o://7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" gracePeriod=30 Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.485566 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-recovery-controller" containerID="cri-o://45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" gracePeriod=30 Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.485678 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-cert-syncer" containerID="cri-o://b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" gracePeriod=30 Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.499163 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.499727 4125 topology_manager.go:215] "Topology Admit Handler" podUID="4faaac70bf21c7d77dcb526af466bffa" podNamespace="openshift-kube-controller-manager" podName="kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.500389 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="extract-utilities" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.500691 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="extract-utilities" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.500934 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="extract-content" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.501199 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="extract-content" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.501448 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.501678 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.501984 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-recovery-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.502210 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-recovery-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.502460 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="extract-utilities" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.502726 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="extract-utilities" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.503084 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.503379 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.503638 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.503930 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504104 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="extract-content" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504478 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="extract-content" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504511 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504523 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504542 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="extract-content" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504553 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="extract-content" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504565 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="extract-utilities" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504576 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="extract-utilities" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504595 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-cert-syncer" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504678 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-cert-syncer" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504697 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504708 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504721 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504731 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504748 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504758 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: E0312 13:42:35.504776 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.504789 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505072 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505097 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505114 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="cluster-policy-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505127 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505138 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505154 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6eecbb4-24d7-4b6c-af1f-6fddc1456a68" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505167 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a9e1488-e852-4113-90a0-4177e0e57ee0" containerName="registry-server" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505180 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-recovery-controller" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.505199 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="94e8a39ea660d88d01c6db5ba5e6d884" containerName="kube-controller-manager-cert-syncer" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.515663 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="b233d916-bfe3-4ae5-ae39-6b574d1aa05e" containerName="console" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.591774 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.592139 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.695201 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.695286 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.695420 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.695435 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.783630 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/kube-controller-manager-cert-syncer/0.log" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.785142 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.785804 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.793608 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-controller-manager/kube-controller-manager-crc" oldPodUID="94e8a39ea660d88d01c6db5ba5e6d884" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.897663 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-resource-dir\") pod \"94e8a39ea660d88d01c6db5ba5e6d884\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.897937 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-cert-dir\") pod \"94e8a39ea660d88d01c6db5ba5e6d884\" (UID: \"94e8a39ea660d88d01c6db5ba5e6d884\") " Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.897929 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "94e8a39ea660d88d01c6db5ba5e6d884" (UID: "94e8a39ea660d88d01c6db5ba5e6d884"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.898071 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "94e8a39ea660d88d01c6db5ba5e6d884" (UID: "94e8a39ea660d88d01c6db5ba5e6d884"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.898388 4125 reconciler_common.go:300] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-cert-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:35 crc kubenswrapper[4125]: I0312 13:42:35.898430 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/94e8a39ea660d88d01c6db5ba5e6d884-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.034453 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="813f238a-6e8e-480c-b510-22c9c49689e3" path="/var/lib/kubelet/pods/813f238a-6e8e-480c-b510-22c9c49689e3/volumes" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.036254 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94e8a39ea660d88d01c6db5ba5e6d884" path="/var/lib/kubelet/pods/94e8a39ea660d88d01c6db5ba5e6d884/volumes" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.217742 4125 generic.go:334] "Generic (PLEG): container finished" podID="334fb4b6-16b9-453a-9208-846feab2a2fa" containerID="cc53439518c5ce30d5f21f182449aa7d6203ce6faea070900253a04d3e70a104" exitCode=0 Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.217880 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-12-crc" event={"ID":"334fb4b6-16b9-453a-9208-846feab2a2fa","Type":"ContainerDied","Data":"cc53439518c5ce30d5f21f182449aa7d6203ce6faea070900253a04d3e70a104"} Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.222022 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/kube-controller-manager-cert-syncer/0.log" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.222664 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_94e8a39ea660d88d01c6db5ba5e6d884/cluster-policy-controller/0.log" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.223406 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.223489 4125 scope.go:117] "RemoveContainer" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.223397 4125 generic.go:334] "Generic (PLEG): container finished" podID="94e8a39ea660d88d01c6db5ba5e6d884" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" exitCode=0 Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.223705 4125 generic.go:334] "Generic (PLEG): container finished" podID="94e8a39ea660d88d01c6db5ba5e6d884" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" exitCode=0 Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.223805 4125 generic.go:334] "Generic (PLEG): container finished" podID="94e8a39ea660d88d01c6db5ba5e6d884" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" exitCode=2 Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.223923 4125 generic.go:334] "Generic (PLEG): container finished" podID="94e8a39ea660d88d01c6db5ba5e6d884" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" exitCode=0 Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.261059 4125 scope.go:117] "RemoveContainer" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.270914 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-controller-manager/kube-controller-manager-crc" oldPodUID="94e8a39ea660d88d01c6db5ba5e6d884" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.297457 4125 scope.go:117] "RemoveContainer" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.337283 4125 scope.go:117] "RemoveContainer" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.376669 4125 scope.go:117] "RemoveContainer" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.417052 4125 scope.go:117] "RemoveContainer" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" Mar 12 13:42:36 crc kubenswrapper[4125]: E0312 13:42:36.418084 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": container with ID starting with 7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca not found: ID does not exist" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.418136 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca"} err="failed to get container status \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": rpc error: code = NotFound desc = could not find container \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": container with ID starting with 7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.418148 4125 scope.go:117] "RemoveContainer" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" Mar 12 13:42:36 crc kubenswrapper[4125]: E0312 13:42:36.418726 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": container with ID starting with 45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba not found: ID does not exist" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.418769 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba"} err="failed to get container status \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": rpc error: code = NotFound desc = could not find container \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": container with ID starting with 45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.418782 4125 scope.go:117] "RemoveContainer" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" Mar 12 13:42:36 crc kubenswrapper[4125]: E0312 13:42:36.419937 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": container with ID starting with b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262 not found: ID does not exist" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.419970 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262"} err="failed to get container status \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": rpc error: code = NotFound desc = could not find container \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": container with ID starting with b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.419980 4125 scope.go:117] "RemoveContainer" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:42:36 crc kubenswrapper[4125]: E0312 13:42:36.420383 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": container with ID starting with 45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd not found: ID does not exist" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.420406 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd"} err="failed to get container status \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": rpc error: code = NotFound desc = could not find container \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": container with ID starting with 45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.420416 4125 scope.go:117] "RemoveContainer" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" Mar 12 13:42:36 crc kubenswrapper[4125]: E0312 13:42:36.421017 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": container with ID starting with 41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043 not found: ID does not exist" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.421077 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043"} err="failed to get container status \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": rpc error: code = NotFound desc = could not find container \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": container with ID starting with 41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.421100 4125 scope.go:117] "RemoveContainer" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.421663 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca"} err="failed to get container status \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": rpc error: code = NotFound desc = could not find container \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": container with ID starting with 7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.421684 4125 scope.go:117] "RemoveContainer" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.422146 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba"} err="failed to get container status \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": rpc error: code = NotFound desc = could not find container \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": container with ID starting with 45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.422162 4125 scope.go:117] "RemoveContainer" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.422542 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262"} err="failed to get container status \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": rpc error: code = NotFound desc = could not find container \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": container with ID starting with b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.422562 4125 scope.go:117] "RemoveContainer" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.424719 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd"} err="failed to get container status \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": rpc error: code = NotFound desc = could not find container \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": container with ID starting with 45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.424735 4125 scope.go:117] "RemoveContainer" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.425340 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043"} err="failed to get container status \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": rpc error: code = NotFound desc = could not find container \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": container with ID starting with 41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.425374 4125 scope.go:117] "RemoveContainer" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.425784 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca"} err="failed to get container status \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": rpc error: code = NotFound desc = could not find container \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": container with ID starting with 7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.425871 4125 scope.go:117] "RemoveContainer" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.426347 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba"} err="failed to get container status \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": rpc error: code = NotFound desc = could not find container \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": container with ID starting with 45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.426384 4125 scope.go:117] "RemoveContainer" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.426804 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262"} err="failed to get container status \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": rpc error: code = NotFound desc = could not find container \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": container with ID starting with b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.426950 4125 scope.go:117] "RemoveContainer" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.427511 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd"} err="failed to get container status \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": rpc error: code = NotFound desc = could not find container \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": container with ID starting with 45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.427552 4125 scope.go:117] "RemoveContainer" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.429200 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043"} err="failed to get container status \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": rpc error: code = NotFound desc = could not find container \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": container with ID starting with 41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.429259 4125 scope.go:117] "RemoveContainer" containerID="7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.429875 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca"} err="failed to get container status \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": rpc error: code = NotFound desc = could not find container \"7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca\": container with ID starting with 7d102a2a5365a985e99f289ec5423b41019ba1346168d9adec1bc3d2e6b60bca not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.429911 4125 scope.go:117] "RemoveContainer" containerID="45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.430326 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba"} err="failed to get container status \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": rpc error: code = NotFound desc = could not find container \"45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba\": container with ID starting with 45955b299ce497f02b4c1558bd16eaff1596e0e222cf69ea7130dacfb2dd73ba not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.430358 4125 scope.go:117] "RemoveContainer" containerID="b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.430975 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262"} err="failed to get container status \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": rpc error: code = NotFound desc = could not find container \"b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262\": container with ID starting with b55d724aa16413ab524ce19c5da7a495fc25070074f492618bb38859badf4262 not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.431008 4125 scope.go:117] "RemoveContainer" containerID="45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.431455 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd"} err="failed to get container status \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": rpc error: code = NotFound desc = could not find container \"45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd\": container with ID starting with 45d8d796271c737537cb4e5a1661200445f23a8fc4d529372e47ea8c04cf2acd not found: ID does not exist" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.431488 4125 scope.go:117] "RemoveContainer" containerID="41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043" Mar 12 13:42:36 crc kubenswrapper[4125]: I0312 13:42:36.431901 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043"} err="failed to get container status \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": rpc error: code = NotFound desc = could not find container \"41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043\": container with ID starting with 41078c5e30ac0653dace9115bd3c7ada249ae361cbda745d7a995bf8ee785043 not found: ID does not exist" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.561641 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.643006 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-kubelet-dir\") pod \"334fb4b6-16b9-453a-9208-846feab2a2fa\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.643208 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-var-lock\") pod \"334fb4b6-16b9-453a-9208-846feab2a2fa\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.643309 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334fb4b6-16b9-453a-9208-846feab2a2fa-kube-api-access\") pod \"334fb4b6-16b9-453a-9208-846feab2a2fa\" (UID: \"334fb4b6-16b9-453a-9208-846feab2a2fa\") " Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.644712 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "334fb4b6-16b9-453a-9208-846feab2a2fa" (UID: "334fb4b6-16b9-453a-9208-846feab2a2fa"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.644788 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-var-lock" (OuterVolumeSpecName: "var-lock") pod "334fb4b6-16b9-453a-9208-846feab2a2fa" (UID: "334fb4b6-16b9-453a-9208-846feab2a2fa"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.666133 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/334fb4b6-16b9-453a-9208-846feab2a2fa-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "334fb4b6-16b9-453a-9208-846feab2a2fa" (UID: "334fb4b6-16b9-453a-9208-846feab2a2fa"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.756992 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.757626 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/334fb4b6-16b9-453a-9208-846feab2a2fa-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:37 crc kubenswrapper[4125]: I0312 13:42:37.757648 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/334fb4b6-16b9-453a-9208-846feab2a2fa-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:42:38 crc kubenswrapper[4125]: I0312 13:42:38.240930 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/installer-12-crc" event={"ID":"334fb4b6-16b9-453a-9208-846feab2a2fa","Type":"ContainerDied","Data":"2daa2e881606d6ecba509a68be989bcaffc1982acd45dce1d3626632bc85e420"} Mar 12 13:42:38 crc kubenswrapper[4125]: I0312 13:42:38.240974 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-12-crc" Mar 12 13:42:38 crc kubenswrapper[4125]: I0312 13:42:38.240990 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2daa2e881606d6ecba509a68be989bcaffc1982acd45dce1d3626632bc85e420" Mar 12 13:42:43 crc kubenswrapper[4125]: I0312 13:42:43.026118 4125 scope.go:117] "RemoveContainer" containerID="9c3e595db3b1fe865bb35c919363f1aa729037b3064769f8a26dd8e09daa4bf4" Mar 12 13:42:44 crc kubenswrapper[4125]: I0312 13:42:44.288067 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/1.log" Mar 12 13:42:44 crc kubenswrapper[4125]: I0312 13:42:44.290082 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"6429e66ee648fe704b81785f4e302c9d298cd734b6e2bb050bc4c56bc61e890b"} Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.035900 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.053162 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="fa8cc3eb-c6e0-4bee-be7e-aeac41cfac05" Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.053974 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="fa8cc3eb-c6e0-4bee-be7e-aeac41cfac05" Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.094218 4125 kubelet.go:1922] "Deleted mirror pod because it is outdated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.094613 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.099438 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.155549 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.158805 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Mar 12 13:42:49 crc kubenswrapper[4125]: I0312 13:42:49.323693 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"dafdcab150cbb0db7b413fe7397257baf34496c2690817298445f30eec4cda36"} Mar 12 13:42:50 crc kubenswrapper[4125]: I0312 13:42:50.335952 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"c734d5e68fbd03360addb91e2bf98a73c5db6649ede92da77f7b51a827336da0"} Mar 12 13:42:50 crc kubenswrapper[4125]: I0312 13:42:50.336021 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"402d30aed83909fcb1269d16d81ab5eb3088691295c2863c81ad931be8171c7b"} Mar 12 13:42:51 crc kubenswrapper[4125]: I0312 13:42:51.363125 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"6277384789a699b4018a3e0a39837ccb4dfea920453f3921130ebd69436112d3"} Mar 12 13:42:52 crc kubenswrapper[4125]: I0312 13:42:52.372051 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"84bf66f430c1adeaeda73a0bb0929f28b28aec327c612ea17fa9ad2400a73c15"} Mar 12 13:42:52 crc kubenswrapper[4125]: I0312 13:42:52.409403 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=3.409350283 podStartE2EDuration="3.409350283s" podCreationTimestamp="2026-03-12 13:42:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:42:52.406179754 +0000 UTC m=+1342.729565693" watchObservedRunningTime="2026-03-12 13:42:52.409350283 +0000 UTC m=+1342.732736302" Mar 12 13:42:54 crc kubenswrapper[4125]: I0312 13:42:54.458124 4125 generic.go:334] "Generic (PLEG): container finished" podID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerID="2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af" exitCode=0 Mar 12 13:42:54 crc kubenswrapper[4125]: I0312 13:42:54.458339 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerDied","Data":"2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af"} Mar 12 13:42:56 crc kubenswrapper[4125]: I0312 13:42:56.472946 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerStarted","Data":"ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b"} Mar 12 13:42:56 crc kubenswrapper[4125]: I0312 13:42:56.506713 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fcchf" podStartSLOduration=3.290223483 podStartE2EDuration="46.506663537s" podCreationTimestamp="2026-03-12 13:42:10 +0000 UTC" firstStartedPulling="2026-03-12 13:42:11.771892037 +0000 UTC m=+1302.095277886" lastFinishedPulling="2026-03-12 13:42:54.988332171 +0000 UTC m=+1345.311717940" observedRunningTime="2026-03-12 13:42:56.503962343 +0000 UTC m=+1346.827348312" watchObservedRunningTime="2026-03-12 13:42:56.506663537 +0000 UTC m=+1346.830049416" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.157394 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.157704 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.157723 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.157741 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.164540 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.165918 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:42:59 crc kubenswrapper[4125]: I0312 13:42:59.515459 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:43:00 crc kubenswrapper[4125]: I0312 13:43:00.421916 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:43:00 crc kubenswrapper[4125]: I0312 13:43:00.422020 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:43:00 crc kubenswrapper[4125]: I0312 13:43:00.518562 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:43:01 crc kubenswrapper[4125]: I0312 13:43:01.610935 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fcchf" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="registry-server" probeResult="failure" output=< Mar 12 13:43:01 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:43:01 crc kubenswrapper[4125]: > Mar 12 13:43:11 crc kubenswrapper[4125]: I0312 13:43:11.561273 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fcchf" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="registry-server" probeResult="failure" output=< Mar 12 13:43:11 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:43:11 crc kubenswrapper[4125]: > Mar 12 13:43:20 crc kubenswrapper[4125]: I0312 13:43:20.605636 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:43:20 crc kubenswrapper[4125]: I0312 13:43:20.783745 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:43:20 crc kubenswrapper[4125]: I0312 13:43:20.844499 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fcchf"] Mar 12 13:43:21 crc kubenswrapper[4125]: I0312 13:43:21.672374 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fcchf" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="registry-server" containerID="cri-o://ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b" gracePeriod=2 Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.277671 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.353105 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-catalog-content\") pod \"e4fa35cf-f315-4d16-bb86-47e737276e27\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.353205 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6p89g\" (UniqueName: \"kubernetes.io/projected/e4fa35cf-f315-4d16-bb86-47e737276e27-kube-api-access-6p89g\") pod \"e4fa35cf-f315-4d16-bb86-47e737276e27\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.353316 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-utilities\") pod \"e4fa35cf-f315-4d16-bb86-47e737276e27\" (UID: \"e4fa35cf-f315-4d16-bb86-47e737276e27\") " Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.354029 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-utilities" (OuterVolumeSpecName: "utilities") pod "e4fa35cf-f315-4d16-bb86-47e737276e27" (UID: "e4fa35cf-f315-4d16-bb86-47e737276e27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.387351 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4fa35cf-f315-4d16-bb86-47e737276e27-kube-api-access-6p89g" (OuterVolumeSpecName: "kube-api-access-6p89g") pod "e4fa35cf-f315-4d16-bb86-47e737276e27" (UID: "e4fa35cf-f315-4d16-bb86-47e737276e27"). InnerVolumeSpecName "kube-api-access-6p89g". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.454700 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-6p89g\" (UniqueName: \"kubernetes.io/projected/e4fa35cf-f315-4d16-bb86-47e737276e27-kube-api-access-6p89g\") on node \"crc\" DevicePath \"\"" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.454757 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.684873 4125 generic.go:334] "Generic (PLEG): container finished" podID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerID="ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b" exitCode=0 Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.684940 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerDied","Data":"ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b"} Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.684971 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcchf" event={"ID":"e4fa35cf-f315-4d16-bb86-47e737276e27","Type":"ContainerDied","Data":"c05d41f34b9c4b6e7b9622621d7bb72bb8e04c0164b91346ae58713346424a2f"} Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.685011 4125 scope.go:117] "RemoveContainer" containerID="ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.685156 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fcchf" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.732280 4125 scope.go:117] "RemoveContainer" containerID="2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.863574 4125 scope.go:117] "RemoveContainer" containerID="6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.909458 4125 scope.go:117] "RemoveContainer" containerID="ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b" Mar 12 13:43:22 crc kubenswrapper[4125]: E0312 13:43:22.912280 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b\": container with ID starting with ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b not found: ID does not exist" containerID="ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.912402 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b"} err="failed to get container status \"ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b\": rpc error: code = NotFound desc = could not find container \"ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b\": container with ID starting with ca764f13b3f5d418341df302cd4dd2487c25a3ddf4bdd12dfc67a15faeeaa50b not found: ID does not exist" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.912437 4125 scope.go:117] "RemoveContainer" containerID="2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af" Mar 12 13:43:22 crc kubenswrapper[4125]: E0312 13:43:22.913954 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af\": container with ID starting with 2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af not found: ID does not exist" containerID="2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.914023 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af"} err="failed to get container status \"2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af\": rpc error: code = NotFound desc = could not find container \"2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af\": container with ID starting with 2e934735ff199df5a9ce6ab99d63c28b4950be27bf2f4236c9a66695386383af not found: ID does not exist" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.914044 4125 scope.go:117] "RemoveContainer" containerID="6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b" Mar 12 13:43:22 crc kubenswrapper[4125]: E0312 13:43:22.914712 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b\": container with ID starting with 6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b not found: ID does not exist" containerID="6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b" Mar 12 13:43:22 crc kubenswrapper[4125]: I0312 13:43:22.914770 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b"} err="failed to get container status \"6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b\": rpc error: code = NotFound desc = could not find container \"6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b\": container with ID starting with 6ef8abc296de3500b5aed68f7af98e5e1afb789eddef1ac0becdd860a8666c3b not found: ID does not exist" Mar 12 13:43:23 crc kubenswrapper[4125]: I0312 13:43:23.388127 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e4fa35cf-f315-4d16-bb86-47e737276e27" (UID: "e4fa35cf-f315-4d16-bb86-47e737276e27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:43:23 crc kubenswrapper[4125]: I0312 13:43:23.469576 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4fa35cf-f315-4d16-bb86-47e737276e27-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:43:23 crc kubenswrapper[4125]: I0312 13:43:23.671577 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fcchf"] Mar 12 13:43:23 crc kubenswrapper[4125]: I0312 13:43:23.690526 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fcchf"] Mar 12 13:43:24 crc kubenswrapper[4125]: I0312 13:43:24.034765 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" path="/var/lib/kubelet/pods/e4fa35cf-f315-4d16-bb86-47e737276e27/volumes" Mar 12 13:43:31 crc kubenswrapper[4125]: I0312 13:43:31.442990 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:43:31 crc kubenswrapper[4125]: I0312 13:43:31.443360 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:43:31 crc kubenswrapper[4125]: I0312 13:43:31.443411 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:43:31 crc kubenswrapper[4125]: I0312 13:43:31.443451 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:43:31 crc kubenswrapper[4125]: I0312 13:43:31.443491 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:43:55 crc kubenswrapper[4125]: I0312 13:43:55.021512 4125 scope.go:117] "RemoveContainer" containerID="249a2a8171d8bdd35e3ae42b4c0fcd4f4755976f998a414d0940b501fafbf666" Mar 12 13:44:31 crc kubenswrapper[4125]: I0312 13:44:31.444119 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:44:31 crc kubenswrapper[4125]: I0312 13:44:31.445110 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:44:31 crc kubenswrapper[4125]: I0312 13:44:31.445186 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:44:31 crc kubenswrapper[4125]: I0312 13:44:31.445364 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:44:31 crc kubenswrapper[4125]: I0312 13:44:31.445434 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.819068 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-10-crc"] Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.820097 4125 topology_manager.go:215] "Topology Admit Handler" podUID="3395180d-e7e5-42db-8bc0-c275a4389dc2" podNamespace="openshift-kube-apiserver" podName="installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: E0312 13:44:59.820657 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="extract-utilities" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.820705 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="extract-utilities" Mar 12 13:44:59 crc kubenswrapper[4125]: E0312 13:44:59.820764 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="334fb4b6-16b9-453a-9208-846feab2a2fa" containerName="installer" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.820781 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="334fb4b6-16b9-453a-9208-846feab2a2fa" containerName="installer" Mar 12 13:44:59 crc kubenswrapper[4125]: E0312 13:44:59.820902 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="registry-server" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.820933 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="registry-server" Mar 12 13:44:59 crc kubenswrapper[4125]: E0312 13:44:59.820964 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="extract-content" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.820979 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="extract-content" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.821719 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="334fb4b6-16b9-453a-9208-846feab2a2fa" containerName="installer" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.821758 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4fa35cf-f315-4d16-bb86-47e737276e27" containerName="registry-server" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.828064 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.848063 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.848068 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-4kgh8" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.857660 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-var-lock\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.858125 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3395180d-e7e5-42db-8bc0-c275a4389dc2-kube-api-access\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.858188 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-kubelet-dir\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.918730 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-10-crc"] Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.968050 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3395180d-e7e5-42db-8bc0-c275a4389dc2-kube-api-access\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.968225 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-kubelet-dir\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.968322 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-var-lock\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.968445 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-var-lock\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:44:59 crc kubenswrapper[4125]: I0312 13:44:59.968903 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-kubelet-dir\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.009142 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3395180d-e7e5-42db-8bc0-c275a4389dc2-kube-api-access\") pod \"installer-10-crc\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.164279 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.261367 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl"] Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.261510 4125 topology_manager.go:215] "Topology Admit Handler" podUID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.262380 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.267912 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.268134 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.274085 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh9cr\" (UniqueName: \"kubernetes.io/projected/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-kube-api-access-mh9cr\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.274307 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-config-volume\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.274351 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-secret-volume\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.280203 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl"] Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.375205 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-config-volume\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.375505 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-secret-volume\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.375559 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mh9cr\" (UniqueName: \"kubernetes.io/projected/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-kube-api-access-mh9cr\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.380040 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-config-volume\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.388352 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-secret-volume\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.402844 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh9cr\" (UniqueName: \"kubernetes.io/projected/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-kube-api-access-mh9cr\") pod \"collect-profiles-29555385-rxkwl\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.572919 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-10-crc"] Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.587277 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:00 crc kubenswrapper[4125]: I0312 13:45:00.908493 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl"] Mar 12 13:45:00 crc kubenswrapper[4125]: W0312 13:45:00.924911 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a90eb6a_7fc8_4544_b6a7_731623a1fcf6.slice/crio-4741c3089d76c5ed2ff228e4d30b85c73cb9514a0354064302619d63a2a52fba WatchSource:0}: Error finding container 4741c3089d76c5ed2ff228e4d30b85c73cb9514a0354064302619d63a2a52fba: Status 404 returned error can't find the container with id 4741c3089d76c5ed2ff228e4d30b85c73cb9514a0354064302619d63a2a52fba Mar 12 13:45:01 crc kubenswrapper[4125]: I0312 13:45:01.354211 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-10-crc" event={"ID":"3395180d-e7e5-42db-8bc0-c275a4389dc2","Type":"ContainerStarted","Data":"3e2fc80778d1f511e3128d4ee7b3a5e9d3e3c3f6a9118429dab684acedbd931b"} Mar 12 13:45:01 crc kubenswrapper[4125]: I0312 13:45:01.357403 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" event={"ID":"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6","Type":"ContainerStarted","Data":"1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574"} Mar 12 13:45:01 crc kubenswrapper[4125]: I0312 13:45:01.357471 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" event={"ID":"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6","Type":"ContainerStarted","Data":"4741c3089d76c5ed2ff228e4d30b85c73cb9514a0354064302619d63a2a52fba"} Mar 12 13:45:01 crc kubenswrapper[4125]: I0312 13:45:01.396007 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" podStartSLOduration=1.395867457 podStartE2EDuration="1.395867457s" podCreationTimestamp="2026-03-12 13:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:45:01.394728393 +0000 UTC m=+1471.718114362" watchObservedRunningTime="2026-03-12 13:45:01.395867457 +0000 UTC m=+1471.719253417" Mar 12 13:45:02 crc kubenswrapper[4125]: I0312 13:45:02.366134 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-10-crc" event={"ID":"3395180d-e7e5-42db-8bc0-c275a4389dc2","Type":"ContainerStarted","Data":"9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f"} Mar 12 13:45:02 crc kubenswrapper[4125]: I0312 13:45:02.370393 4125 generic.go:334] "Generic (PLEG): container finished" podID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" containerID="1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574" exitCode=0 Mar 12 13:45:02 crc kubenswrapper[4125]: I0312 13:45:02.370443 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" event={"ID":"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6","Type":"ContainerDied","Data":"1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574"} Mar 12 13:45:02 crc kubenswrapper[4125]: I0312 13:45:02.400726 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-10-crc" podStartSLOduration=3.400660577 podStartE2EDuration="3.400660577s" podCreationTimestamp="2026-03-12 13:44:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:45:02.398452346 +0000 UTC m=+1472.721838405" watchObservedRunningTime="2026-03-12 13:45:02.400660577 +0000 UTC m=+1472.724046566" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.653089 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.829506 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-config-volume\") pod \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.829902 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-secret-volume\") pod \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.830005 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh9cr\" (UniqueName: \"kubernetes.io/projected/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-kube-api-access-mh9cr\") pod \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\" (UID: \"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6\") " Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.830502 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-config-volume" (OuterVolumeSpecName: "config-volume") pod "3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" (UID: "3a90eb6a-7fc8-4544-b6a7-731623a1fcf6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.836474 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-kube-api-access-mh9cr" (OuterVolumeSpecName: "kube-api-access-mh9cr") pod "3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" (UID: "3a90eb6a-7fc8-4544-b6a7-731623a1fcf6"). InnerVolumeSpecName "kube-api-access-mh9cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.836603 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" (UID: "3a90eb6a-7fc8-4544-b6a7-731623a1fcf6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.931385 4125 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.931457 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mh9cr\" (UniqueName: \"kubernetes.io/projected/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-kube-api-access-mh9cr\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:03 crc kubenswrapper[4125]: I0312 13:45:03.931473 4125 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6-config-volume\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:04 crc kubenswrapper[4125]: I0312 13:45:04.400446 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" event={"ID":"3a90eb6a-7fc8-4544-b6a7-731623a1fcf6","Type":"ContainerDied","Data":"4741c3089d76c5ed2ff228e4d30b85c73cb9514a0354064302619d63a2a52fba"} Mar 12 13:45:04 crc kubenswrapper[4125]: I0312 13:45:04.400541 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4741c3089d76c5ed2ff228e4d30b85c73cb9514a0354064302619d63a2a52fba" Mar 12 13:45:04 crc kubenswrapper[4125]: I0312 13:45:04.400621 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 12 13:45:18 crc kubenswrapper[4125]: I0312 13:45:18.208679 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/installer-10-crc"] Mar 12 13:45:18 crc kubenswrapper[4125]: I0312 13:45:18.211295 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/installer-10-crc" podUID="3395180d-e7e5-42db-8bc0-c275a4389dc2" containerName="installer" containerID="cri-o://9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f" gracePeriod=30 Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.395781 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-11-crc"] Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.396459 4125 topology_manager.go:215] "Topology Admit Handler" podUID="b541f220-6272-4285-8250-4474714fb6cd" podNamespace="openshift-kube-apiserver" podName="installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: E0312 13:45:21.396694 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" containerName="collect-profiles" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.396723 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" containerName="collect-profiles" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.397006 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" containerName="collect-profiles" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.398101 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.437601 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-11-crc"] Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.560278 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-kubelet-dir\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.560661 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-var-lock\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.561029 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b541f220-6272-4285-8250-4474714fb6cd-kube-api-access\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.662486 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b541f220-6272-4285-8250-4474714fb6cd-kube-api-access\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.662603 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-kubelet-dir\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.662654 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-var-lock\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.662764 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-var-lock\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.663172 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-kubelet-dir\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.704050 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b541f220-6272-4285-8250-4474714fb6cd-kube-api-access\") pod \"installer-11-crc\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:21 crc kubenswrapper[4125]: I0312 13:45:21.741563 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:45:22 crc kubenswrapper[4125]: I0312 13:45:22.080951 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-11-crc"] Mar 12 13:45:22 crc kubenswrapper[4125]: I0312 13:45:22.546768 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-11-crc" event={"ID":"b541f220-6272-4285-8250-4474714fb6cd","Type":"ContainerStarted","Data":"5d0b48814ec9b2da9b42c5c8a68d89a61305239af996e1b4cbdd6a05e8d8ce0a"} Mar 12 13:45:23 crc kubenswrapper[4125]: I0312 13:45:23.555376 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-11-crc" event={"ID":"b541f220-6272-4285-8250-4474714fb6cd","Type":"ContainerStarted","Data":"f8f86fd8c18eedf0d9d920870ad5bef4c263479877e390f52ec724c192d2d74d"} Mar 12 13:45:23 crc kubenswrapper[4125]: I0312 13:45:23.617014 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-11-crc" podStartSLOduration=2.61692024 podStartE2EDuration="2.61692024s" podCreationTimestamp="2026-03-12 13:45:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:45:23.616506077 +0000 UTC m=+1493.939892046" watchObservedRunningTime="2026-03-12 13:45:23.61692024 +0000 UTC m=+1493.940306039" Mar 12 13:45:35 crc kubenswrapper[4125]: I0312 13:45:35.639544 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:45:35 crc kubenswrapper[4125]: I0312 13:45:35.641179 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:45:35 crc kubenswrapper[4125]: I0312 13:45:35.641743 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:45:35 crc kubenswrapper[4125]: I0312 13:45:35.642117 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:45:35 crc kubenswrapper[4125]: I0312 13:45:35.642453 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:45:36 crc kubenswrapper[4125]: E0312 13:45:36.034086 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3395180d_e7e5_42db_8bc0_c275a4389dc2.slice/crio-9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.416978 4125 patch_prober.go:28] interesting pod/dns-default-gbw49 container/dns namespace/openshift-dns: Readiness probe status=failure output="Get \"http://10.217.0.31:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.423171 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" containerName="dns" probeResult="failure" output="Get \"http://10.217.0.31:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 12 13:45:43 crc kubenswrapper[4125]: E0312 13:45:43.480207 4125 kubelet.go:2517] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="5.319s" Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.522288 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_installer-10-crc_3395180d-e7e5-42db-8bc0-c275a4389dc2/installer/0.log" Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.522394 4125 generic.go:334] "Generic (PLEG): container finished" podID="3395180d-e7e5-42db-8bc0-c275a4389dc2" containerID="9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f" exitCode=1 Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.522433 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-10-crc" event={"ID":"3395180d-e7e5-42db-8bc0-c275a4389dc2","Type":"ContainerDied","Data":"9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f"} Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.843006 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg"] Mar 12 13:45:43 crc kubenswrapper[4125]: I0312 13:45:43.843345 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" containerName="route-controller-manager" containerID="cri-o://50bc70f11c788ac2ff82783495b350392a81cc7e98b9f7c462821291b6fdc22d" gracePeriod=30 Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.534385 4125 generic.go:334] "Generic (PLEG): container finished" podID="017a4afc-9c30-4dc3-974a-05c3d1384017" containerID="50bc70f11c788ac2ff82783495b350392a81cc7e98b9f7c462821291b6fdc22d" exitCode=0 Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.536057 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" event={"ID":"017a4afc-9c30-4dc3-974a-05c3d1384017","Type":"ContainerDied","Data":"50bc70f11c788ac2ff82783495b350392a81cc7e98b9f7c462821291b6fdc22d"} Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.536309 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" event={"ID":"017a4afc-9c30-4dc3-974a-05c3d1384017","Type":"ContainerDied","Data":"f34a5dca1703e26ee4d890eeb701920bdeb2bee37d27ff144b2a2b900db72619"} Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.536748 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f34a5dca1703e26ee4d890eeb701920bdeb2bee37d27ff144b2a2b900db72619" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.553411 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.654023 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_installer-10-crc_3395180d-e7e5-42db-8bc0-c275a4389dc2/installer/0.log" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.654465 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.736883 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzln9\" (UniqueName: \"kubernetes.io/projected/017a4afc-9c30-4dc3-974a-05c3d1384017-kube-api-access-rzln9\") pod \"017a4afc-9c30-4dc3-974a-05c3d1384017\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.736957 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-client-ca\") pod \"017a4afc-9c30-4dc3-974a-05c3d1384017\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.736999 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3395180d-e7e5-42db-8bc0-c275a4389dc2-kube-api-access\") pod \"3395180d-e7e5-42db-8bc0-c275a4389dc2\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.737049 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/017a4afc-9c30-4dc3-974a-05c3d1384017-serving-cert\") pod \"017a4afc-9c30-4dc3-974a-05c3d1384017\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.737097 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-config\") pod \"017a4afc-9c30-4dc3-974a-05c3d1384017\" (UID: \"017a4afc-9c30-4dc3-974a-05c3d1384017\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.737134 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-var-lock\") pod \"3395180d-e7e5-42db-8bc0-c275a4389dc2\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.737152 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-kubelet-dir\") pod \"3395180d-e7e5-42db-8bc0-c275a4389dc2\" (UID: \"3395180d-e7e5-42db-8bc0-c275a4389dc2\") " Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.737454 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3395180d-e7e5-42db-8bc0-c275a4389dc2" (UID: "3395180d-e7e5-42db-8bc0-c275a4389dc2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.738064 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-client-ca" (OuterVolumeSpecName: "client-ca") pod "017a4afc-9c30-4dc3-974a-05c3d1384017" (UID: "017a4afc-9c30-4dc3-974a-05c3d1384017"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.738158 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-var-lock" (OuterVolumeSpecName: "var-lock") pod "3395180d-e7e5-42db-8bc0-c275a4389dc2" (UID: "3395180d-e7e5-42db-8bc0-c275a4389dc2"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.738747 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-config" (OuterVolumeSpecName: "config") pod "017a4afc-9c30-4dc3-974a-05c3d1384017" (UID: "017a4afc-9c30-4dc3-974a-05c3d1384017"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.743909 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3395180d-e7e5-42db-8bc0-c275a4389dc2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3395180d-e7e5-42db-8bc0-c275a4389dc2" (UID: "3395180d-e7e5-42db-8bc0-c275a4389dc2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.744335 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/017a4afc-9c30-4dc3-974a-05c3d1384017-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "017a4afc-9c30-4dc3-974a-05c3d1384017" (UID: "017a4afc-9c30-4dc3-974a-05c3d1384017"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.745633 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/017a4afc-9c30-4dc3-974a-05c3d1384017-kube-api-access-rzln9" (OuterVolumeSpecName: "kube-api-access-rzln9") pod "017a4afc-9c30-4dc3-974a-05c3d1384017" (UID: "017a4afc-9c30-4dc3-974a-05c3d1384017"). InnerVolumeSpecName "kube-api-access-rzln9". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839075 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839126 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3395180d-e7e5-42db-8bc0-c275a4389dc2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839144 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rzln9\" (UniqueName: \"kubernetes.io/projected/017a4afc-9c30-4dc3-974a-05c3d1384017-kube-api-access-rzln9\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839155 4125 reconciler_common.go:300] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-client-ca\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839167 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3395180d-e7e5-42db-8bc0-c275a4389dc2-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839177 4125 reconciler_common.go:300] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/017a4afc-9c30-4dc3-974a-05c3d1384017-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:44 crc kubenswrapper[4125]: I0312 13:45:44.839197 4125 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/017a4afc-9c30-4dc3-974a-05c3d1384017-config\") on node \"crc\" DevicePath \"\"" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.190588 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx"] Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.190771 4125 topology_manager.go:215] "Topology Admit Handler" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: E0312 13:45:45.191161 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" containerName="route-controller-manager" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.191198 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" containerName="route-controller-manager" Mar 12 13:45:45 crc kubenswrapper[4125]: E0312 13:45:45.191229 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3395180d-e7e5-42db-8bc0-c275a4389dc2" containerName="installer" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.191243 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="3395180d-e7e5-42db-8bc0-c275a4389dc2" containerName="installer" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.191521 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="3395180d-e7e5-42db-8bc0-c275a4389dc2" containerName="installer" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.191544 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" containerName="route-controller-manager" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.192503 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.278199 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx"] Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.346537 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.346754 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.346889 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.346952 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.448006 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.448131 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.448169 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.448228 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.449801 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.450588 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.459073 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.494556 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.522010 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.545588 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_installer-10-crc_3395180d-e7e5-42db-8bc0-c275a4389dc2/installer/0.log" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.545796 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.552027 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-10-crc" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.552078 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-10-crc" event={"ID":"3395180d-e7e5-42db-8bc0-c275a4389dc2","Type":"ContainerDied","Data":"3e2fc80778d1f511e3128d4ee7b3a5e9d3e3c3f6a9118429dab684acedbd931b"} Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.552157 4125 scope.go:117] "RemoveContainer" containerID="9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f" Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.658664 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/installer-10-crc"] Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.662928 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/installer-10-crc"] Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.689475 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg"] Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.704466 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fccd57d48-6m9cg"] Mar 12 13:45:45 crc kubenswrapper[4125]: I0312 13:45:45.854627 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx"] Mar 12 13:45:46 crc kubenswrapper[4125]: I0312 13:45:46.035619 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="017a4afc-9c30-4dc3-974a-05c3d1384017" path="/var/lib/kubelet/pods/017a4afc-9c30-4dc3-974a-05c3d1384017/volumes" Mar 12 13:45:46 crc kubenswrapper[4125]: I0312 13:45:46.036873 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3395180d-e7e5-42db-8bc0-c275a4389dc2" path="/var/lib/kubelet/pods/3395180d-e7e5-42db-8bc0-c275a4389dc2/volumes" Mar 12 13:45:46 crc kubenswrapper[4125]: E0312 13:45:46.215455 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3395180d_e7e5_42db_8bc0_c275a4389dc2.slice/crio-9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:45:46 crc kubenswrapper[4125]: I0312 13:45:46.555170 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" event={"ID":"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2","Type":"ContainerStarted","Data":"b8a97465b8825c21ef6e2eff52e83bffb18d84dbe7b67b65de6dc262d956bd94"} Mar 12 13:45:46 crc kubenswrapper[4125]: I0312 13:45:46.555519 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" event={"ID":"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2","Type":"ContainerStarted","Data":"87be2cd6678152a3b142951cfd7ca5b35b236de4d09af29f3d91759dec603f06"} Mar 12 13:45:46 crc kubenswrapper[4125]: I0312 13:45:46.556142 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:46 crc kubenswrapper[4125]: I0312 13:45:46.611163 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podStartSLOduration=3.611080956 podStartE2EDuration="3.611080956s" podCreationTimestamp="2026-03-12 13:45:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:45:46.608760284 +0000 UTC m=+1516.932146273" watchObservedRunningTime="2026-03-12 13:45:46.611080956 +0000 UTC m=+1516.934467015" Mar 12 13:45:47 crc kubenswrapper[4125]: I0312 13:45:47.018546 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 12 13:45:55 crc kubenswrapper[4125]: I0312 13:45:55.116029 4125 scope.go:117] "RemoveContainer" containerID="740bf76161fec26f5a24bce398a3932f4dddc3105a1392e7362ce9200176a764" Mar 12 13:45:56 crc kubenswrapper[4125]: E0312 13:45:56.377443 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3395180d_e7e5_42db_8bc0_c275a4389dc2.slice/crio-9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:46:06 crc kubenswrapper[4125]: E0312 13:46:06.539149 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3395180d_e7e5_42db_8bc0_c275a4389dc2.slice/crio-9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:46:16 crc kubenswrapper[4125]: E0312 13:46:16.708791 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3395180d_e7e5_42db_8bc0_c275a4389dc2.slice/crio-9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.149334 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/revision-pruner-7-crc"] Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.149496 4125 topology_manager.go:215] "Topology Admit Handler" podUID="67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" podNamespace="openshift-kube-scheduler" podName="revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.154131 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.163675 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler"/"installer-sa-dockercfg-9ln8g" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.164599 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler"/"kube-root-ca.crt" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.199367 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-7-crc"] Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.262557 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kubelet-dir\") pod \"revision-pruner-7-crc\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.262666 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kube-api-access\") pod \"revision-pruner-7-crc\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.364874 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kubelet-dir\") pod \"revision-pruner-7-crc\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.364991 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kube-api-access\") pod \"revision-pruner-7-crc\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.365072 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kubelet-dir\") pod \"revision-pruner-7-crc\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.395336 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kube-api-access\") pod \"revision-pruner-7-crc\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.490206 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.798618 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-7-crc"] Mar 12 13:46:21 crc kubenswrapper[4125]: I0312 13:46:21.950705 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-7-crc" event={"ID":"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f","Type":"ContainerStarted","Data":"c997f0cb21c68f15b2191605a56be713c789d8bcf03f47812b932d8ee7c3cb4b"} Mar 12 13:46:22 crc kubenswrapper[4125]: I0312 13:46:22.965063 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-7-crc" event={"ID":"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f","Type":"ContainerStarted","Data":"59861dfc10aec218817855a092000d44ac7976dd82b4dafd045f106746cdf178"} Mar 12 13:46:23 crc kubenswrapper[4125]: I0312 13:46:23.199040 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-scheduler/revision-pruner-7-crc" podStartSLOduration=2.198921507 podStartE2EDuration="2.198921507s" podCreationTimestamp="2026-03-12 13:46:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:46:23.186451931 +0000 UTC m=+1553.509838530" watchObservedRunningTime="2026-03-12 13:46:23.198921507 +0000 UTC m=+1553.522307706" Mar 12 13:46:24 crc kubenswrapper[4125]: I0312 13:46:24.988959 4125 generic.go:334] "Generic (PLEG): container finished" podID="67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" containerID="59861dfc10aec218817855a092000d44ac7976dd82b4dafd045f106746cdf178" exitCode=0 Mar 12 13:46:24 crc kubenswrapper[4125]: I0312 13:46:24.990662 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-7-crc" event={"ID":"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f","Type":"ContainerDied","Data":"59861dfc10aec218817855a092000d44ac7976dd82b4dafd045f106746cdf178"} Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.405191 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.571677 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kube-api-access\") pod \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.572040 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kubelet-dir\") pod \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\" (UID: \"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\") " Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.572415 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" (UID: "67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.573140 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.579530 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" (UID: "67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:46:26 crc kubenswrapper[4125]: I0312 13:46:26.675606 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:26 crc kubenswrapper[4125]: E0312 13:46:26.925482 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3395180d_e7e5_42db_8bc0_c275a4389dc2.slice/crio-9e89d1a557c4e675fa889ac45da8cf52bb4333ea85c8b5efd683b26a7128689f.scope\": RecentStats: unable to find data in memory cache]" Mar 12 13:46:27 crc kubenswrapper[4125]: I0312 13:46:27.011465 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-7-crc" event={"ID":"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f","Type":"ContainerDied","Data":"c997f0cb21c68f15b2191605a56be713c789d8bcf03f47812b932d8ee7c3cb4b"} Mar 12 13:46:27 crc kubenswrapper[4125]: I0312 13:46:27.011609 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c997f0cb21c68f15b2191605a56be713c789d8bcf03f47812b932d8ee7c3cb4b" Mar 12 13:46:27 crc kubenswrapper[4125]: I0312 13:46:27.011678 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.331198 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/revision-pruner-8-crc"] Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.331660 4125 topology_manager.go:215] "Topology Admit Handler" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" podNamespace="openshift-kube-scheduler" podName="revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: E0312 13:46:28.332008 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" containerName="pruner" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.332044 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" containerName="pruner" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.332226 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" containerName="pruner" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.333399 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.339165 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler"/"kube-root-ca.crt" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.340133 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0f74f48-a515-4cf0-9196-37bfb966b31f-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.340915 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b0f74f48-a515-4cf0-9196-37bfb966b31f-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.343746 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler"/"installer-sa-dockercfg-9ln8g" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.361535 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-8-crc"] Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.442111 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b0f74f48-a515-4cf0-9196-37bfb966b31f-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.442608 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0f74f48-a515-4cf0-9196-37bfb966b31f-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.442379 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b0f74f48-a515-4cf0-9196-37bfb966b31f-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.482578 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0f74f48-a515-4cf0-9196-37bfb966b31f-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:28 crc kubenswrapper[4125]: I0312 13:46:28.664164 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:29 crc kubenswrapper[4125]: I0312 13:46:29.062225 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-8-crc"] Mar 12 13:46:30 crc kubenswrapper[4125]: I0312 13:46:30.040569 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-8-crc" event={"ID":"b0f74f48-a515-4cf0-9196-37bfb966b31f","Type":"ContainerStarted","Data":"778c1af316d65eea79f7f1be6027a10608fafea8d8798a60a58e005953ec56f7"} Mar 12 13:46:30 crc kubenswrapper[4125]: I0312 13:46:30.934792 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/installer-8-crc"] Mar 12 13:46:30 crc kubenswrapper[4125]: I0312 13:46:30.934999 4125 topology_manager.go:215] "Topology Admit Handler" podUID="252d78ec-e97f-4fdb-9104-4464f1cb6172" podNamespace="openshift-kube-scheduler" podName="installer-8-crc" Mar 12 13:46:30 crc kubenswrapper[4125]: I0312 13:46:30.936071 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.002285 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/installer-8-crc"] Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.041767 4125 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.044550 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.044798 4125 topology_manager.go:215] "Topology Admit Handler" podUID="14346ec471451487d5c07e34f8f3457a" podNamespace="openshift-kube-apiserver" podName="kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.046321 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.046772 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.047499 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-check-endpoints" containerID="cri-o://dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4" gracePeriod=15 Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.046928 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver" containerID="cri-o://79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2" gracePeriod=15 Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.047954 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.048052 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-syncer" containerID="cri-o://d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7" gracePeriod=15 Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.048274 4125 topology_manager.go:215] "Topology Admit Handler" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" podNamespace="openshift-kube-apiserver" podName="kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.048439 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-insecure-readyz" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.047976 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f" gracePeriod=15 Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.048014 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c" gracePeriod=15 Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.049991 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-insecure-readyz" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.050020 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050028 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.050043 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-regeneration-controller" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050051 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-regeneration-controller" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.050062 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="setup" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050069 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="setup" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.050081 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-check-endpoints" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050088 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-check-endpoints" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.050097 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-syncer" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050104 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-syncer" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050226 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-insecure-readyz" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050239 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-regeneration-controller" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050287 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050304 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-cert-syncer" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.050311 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="48128e8d38b5cbcd2691da698bd9cac3" containerName="kube-apiserver-check-endpoints" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.059138 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-8-crc" event={"ID":"b0f74f48-a515-4cf0-9196-37bfb966b31f","Type":"ContainerStarted","Data":"90a68900e31251bdd7ff3fa99d753ba0add37c796dcaec2a9e573e9b8def289c"} Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.068497 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="48128e8d38b5cbcd2691da698bd9cac3" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.119985 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-kubelet-dir\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.125773 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-var-lock\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.126203 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.158730 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.166698 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-scheduler/revision-pruner-8-crc" podStartSLOduration=3.166639852 podStartE2EDuration="3.166639852s" podCreationTimestamp="2026-03-12 13:46:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:46:31.135728335 +0000 UTC m=+1561.459114284" watchObservedRunningTime="2026-03-12 13:46:31.166639852 +0000 UTC m=+1561.490025751" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229112 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-kubelet-dir\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229213 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229272 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-var-lock\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229343 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229344 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-kubelet-dir\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229512 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229548 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229544 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-var-lock\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229853 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.229966 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.230023 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.230062 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.230089 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.242995 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler/installer-8-crc: failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.246238 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access podName:252d78ec-e97f-4fdb-9104-4464f1cb6172 nodeName:}" failed. No retries permitted until 2026-03-12 13:46:31.744585814 +0000 UTC m=+1562.067971753 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access") pod "installer-8-crc" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172") : failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.248895 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/events\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{installer-8-crc.189c1c0c060b3615 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:installer-8-crc,UID:252d78ec-e97f-4fdb-9104-4464f1cb6172,APIVersion:v1,ResourceVersion:32881,FieldPath:,},Reason:FailedMount,Message:MountVolume.SetUp failed for volume \"kube-api-access\" : failed to fetch token: Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token\": dial tcp 192.168.130.11:6443: connect: connection refused,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:46:31.243716117 +0000 UTC m=+1561.567102146,LastTimestamp:2026-03-12 13:46:31.243716117 +0000 UTC m=+1561.567102146,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.331552 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.331628 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.331706 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.331804 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.331913 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.331939 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332150 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332202 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332762 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332803 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332923 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332976 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.332999 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.333010 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.333072 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.333106 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.411527 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: I0312 13:46:31.840706 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.842720 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler/installer-8-crc: failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:31 crc kubenswrapper[4125]: E0312 13:46:31.842797 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access podName:252d78ec-e97f-4fdb-9104-4464f1cb6172 nodeName:}" failed. No retries permitted until 2026-03-12 13:46:32.842777729 +0000 UTC m=+1563.166163518 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access") pod "installer-8-crc" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172") : failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.032655 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.066659 4125 generic.go:334] "Generic (PLEG): container finished" podID="b0f74f48-a515-4cf0-9196-37bfb966b31f" containerID="90a68900e31251bdd7ff3fa99d753ba0add37c796dcaec2a9e573e9b8def289c" exitCode=0 Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.066724 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-8-crc" event={"ID":"b0f74f48-a515-4cf0-9196-37bfb966b31f","Type":"ContainerDied","Data":"90a68900e31251bdd7ff3fa99d753ba0add37c796dcaec2a9e573e9b8def289c"} Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.070499 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.071364 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_48128e8d38b5cbcd2691da698bd9cac3/kube-apiserver-cert-syncer/0.log" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.072795 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.073914 4125 generic.go:334] "Generic (PLEG): container finished" podID="48128e8d38b5cbcd2691da698bd9cac3" containerID="dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4" exitCode=0 Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.074058 4125 generic.go:334] "Generic (PLEG): container finished" podID="48128e8d38b5cbcd2691da698bd9cac3" containerID="b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f" exitCode=0 Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.074124 4125 generic.go:334] "Generic (PLEG): container finished" podID="48128e8d38b5cbcd2691da698bd9cac3" containerID="ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c" exitCode=0 Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.074213 4125 generic.go:334] "Generic (PLEG): container finished" podID="48128e8d38b5cbcd2691da698bd9cac3" containerID="d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7" exitCode=2 Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.078665 4125 generic.go:334] "Generic (PLEG): container finished" podID="b541f220-6272-4285-8250-4474714fb6cd" containerID="f8f86fd8c18eedf0d9d920870ad5bef4c263479877e390f52ec724c192d2d74d" exitCode=0 Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.078724 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-11-crc" event={"ID":"b541f220-6272-4285-8250-4474714fb6cd","Type":"ContainerDied","Data":"f8f86fd8c18eedf0d9d920870ad5bef4c263479877e390f52ec724c192d2d74d"} Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.080203 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.081553 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.083183 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.084367 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"14346ec471451487d5c07e34f8f3457a","Type":"ContainerStarted","Data":"a97fe34c547aa390f9726ce3f0a2b5db767da23688258863a580017da9eab09f"} Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.084413 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"14346ec471451487d5c07e34f8f3457a","Type":"ContainerStarted","Data":"d91e52498df26c14cfdb7bf8494c9ec0ce5ed6db937842bbcd27b82e4049c111"} Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.086433 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.087637 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.088697 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:32 crc kubenswrapper[4125]: I0312 13:46:32.865175 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:32 crc kubenswrapper[4125]: E0312 13:46:32.868035 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler/installer-8-crc: failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:32 crc kubenswrapper[4125]: E0312 13:46:32.868209 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access podName:252d78ec-e97f-4fdb-9104-4464f1cb6172 nodeName:}" failed. No retries permitted until 2026-03-12 13:46:34.868171382 +0000 UTC m=+1565.191557811 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access") pod "installer-8-crc" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172") : failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:33 crc kubenswrapper[4125]: I0312 13:46:33.969116 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:46:33 crc kubenswrapper[4125]: I0312 13:46:33.971956 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:33 crc kubenswrapper[4125]: I0312 13:46:33.973232 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:33 crc kubenswrapper[4125]: I0312 13:46:33.977783 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.019275 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-kubelet-dir\") pod \"b541f220-6272-4285-8250-4474714fb6cd\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.019371 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b541f220-6272-4285-8250-4474714fb6cd-kube-api-access\") pod \"b541f220-6272-4285-8250-4474714fb6cd\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.019448 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-var-lock\") pod \"b541f220-6272-4285-8250-4474714fb6cd\" (UID: \"b541f220-6272-4285-8250-4474714fb6cd\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.019463 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b541f220-6272-4285-8250-4474714fb6cd" (UID: "b541f220-6272-4285-8250-4474714fb6cd"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.019642 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-var-lock" (OuterVolumeSpecName: "var-lock") pod "b541f220-6272-4285-8250-4474714fb6cd" (UID: "b541f220-6272-4285-8250-4474714fb6cd"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.019999 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.020022 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b541f220-6272-4285-8250-4474714fb6cd-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.032154 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b541f220-6272-4285-8250-4474714fb6cd-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b541f220-6272-4285-8250-4474714fb6cd" (UID: "b541f220-6272-4285-8250-4474714fb6cd"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.078086 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.079913 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.082526 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.083516 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.095368 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_48128e8d38b5cbcd2691da698bd9cac3/kube-apiserver-cert-syncer/0.log" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.097328 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.100682 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.101758 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.103341 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.103599 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_48128e8d38b5cbcd2691da698bd9cac3/kube-apiserver-cert-syncer/0.log" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.104473 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.105073 4125 generic.go:334] "Generic (PLEG): container finished" podID="48128e8d38b5cbcd2691da698bd9cac3" containerID="79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2" exitCode=0 Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.105173 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.105265 4125 scope.go:117] "RemoveContainer" containerID="dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.107680 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-11-crc" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.107716 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-11-crc" event={"ID":"b541f220-6272-4285-8250-4474714fb6cd","Type":"ContainerDied","Data":"5d0b48814ec9b2da9b42c5c8a68d89a61305239af996e1b4cbdd6a05e8d8ce0a"} Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.107765 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d0b48814ec9b2da9b42c5c8a68d89a61305239af996e1b4cbdd6a05e8d8ce0a" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.109861 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.111331 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.112197 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-8-crc" event={"ID":"b0f74f48-a515-4cf0-9196-37bfb966b31f","Type":"ContainerDied","Data":"778c1af316d65eea79f7f1be6027a10608fafea8d8798a60a58e005953ec56f7"} Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.112214 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.112285 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="778c1af316d65eea79f7f1be6027a10608fafea8d8798a60a58e005953ec56f7" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.112710 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.114988 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.116050 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.117273 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.117904 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.118447 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.119064 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.119708 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.120471 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b0f74f48-a515-4cf0-9196-37bfb966b31f-kubelet-dir\") pod \"b0f74f48-a515-4cf0-9196-37bfb966b31f\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.120559 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0f74f48-a515-4cf0-9196-37bfb966b31f-kube-api-access\") pod \"b0f74f48-a515-4cf0-9196-37bfb966b31f\" (UID: \"b0f74f48-a515-4cf0-9196-37bfb966b31f\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.120924 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b541f220-6272-4285-8250-4474714fb6cd-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.120920 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b0f74f48-a515-4cf0-9196-37bfb966b31f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b0f74f48-a515-4cf0-9196-37bfb966b31f" (UID: "b0f74f48-a515-4cf0-9196-37bfb966b31f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.122413 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.123511 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.127345 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f74f48-a515-4cf0-9196-37bfb966b31f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b0f74f48-a515-4cf0-9196-37bfb966b31f" (UID: "b0f74f48-a515-4cf0-9196-37bfb966b31f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.145267 4125 scope.go:117] "RemoveContainer" containerID="b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.182885 4125 scope.go:117] "RemoveContainer" containerID="ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.222289 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-resource-dir\") pod \"48128e8d38b5cbcd2691da698bd9cac3\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.222431 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "48128e8d38b5cbcd2691da698bd9cac3" (UID: "48128e8d38b5cbcd2691da698bd9cac3"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.222465 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-audit-dir\") pod \"48128e8d38b5cbcd2691da698bd9cac3\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.222522 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-cert-dir\") pod \"48128e8d38b5cbcd2691da698bd9cac3\" (UID: \"48128e8d38b5cbcd2691da698bd9cac3\") " Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.222586 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "48128e8d38b5cbcd2691da698bd9cac3" (UID: "48128e8d38b5cbcd2691da698bd9cac3"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.222750 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "48128e8d38b5cbcd2691da698bd9cac3" (UID: "48128e8d38b5cbcd2691da698bd9cac3"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.223033 4125 reconciler_common.go:300] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-cert-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.223054 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b0f74f48-a515-4cf0-9196-37bfb966b31f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.223066 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.223078 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0f74f48-a515-4cf0-9196-37bfb966b31f-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.223088 4125 reconciler_common.go:300] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48128e8d38b5cbcd2691da698bd9cac3-audit-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.227022 4125 scope.go:117] "RemoveContainer" containerID="d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.302044 4125 scope.go:117] "RemoveContainer" containerID="79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.357405 4125 scope.go:117] "RemoveContainer" containerID="00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.440613 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.441660 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.444910 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.445024 4125 scope.go:117] "RemoveContainer" containerID="dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.446156 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.446376 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4\": container with ID starting with dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4 not found: ID does not exist" containerID="dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.446489 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4"} err="failed to get container status \"dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4\": rpc error: code = NotFound desc = could not find container \"dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4\": container with ID starting with dfb244491641cbbc51e371fc9607b84660b7adeecc7938e3412b9e23b1a481f4 not found: ID does not exist" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.446568 4125 scope.go:117] "RemoveContainer" containerID="b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.447104 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f\": container with ID starting with b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f not found: ID does not exist" containerID="b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.447149 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f"} err="failed to get container status \"b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f\": rpc error: code = NotFound desc = could not find container \"b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f\": container with ID starting with b65c3ecc073b140c63dd3fb55ee85bf7ed47c5976160d949fac635e9813a939f not found: ID does not exist" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.447235 4125 scope.go:117] "RemoveContainer" containerID="ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.448342 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c\": container with ID starting with ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c not found: ID does not exist" containerID="ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.448376 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c"} err="failed to get container status \"ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c\": rpc error: code = NotFound desc = could not find container \"ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c\": container with ID starting with ff9b9d33e3f3bed6fbe2821c28e4e1abd1c9436e7e4b59faa79cdf4534121b0c not found: ID does not exist" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.448386 4125 scope.go:117] "RemoveContainer" containerID="d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.449172 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.449491 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7\": container with ID starting with d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7 not found: ID does not exist" containerID="d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.449521 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7"} err="failed to get container status \"d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7\": rpc error: code = NotFound desc = could not find container \"d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7\": container with ID starting with d62c14db5c571475abb226adc2ac6c6ab66ad5f400bdb1bf8646121bd9b2f9a7 not found: ID does not exist" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.449533 4125 scope.go:117] "RemoveContainer" containerID="79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.450370 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2\": container with ID starting with 79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2 not found: ID does not exist" containerID="79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.450397 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2"} err="failed to get container status \"79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2\": rpc error: code = NotFound desc = could not find container \"79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2\": container with ID starting with 79e6431ebefcb6901cf601d7357f67728f46b1d8103eb828018592bb42b303b2 not found: ID does not exist" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.450408 4125 scope.go:117] "RemoveContainer" containerID="00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.450806 4125 status_manager.go:853] "Failed to get status for pod" podUID="48128e8d38b5cbcd2691da698bd9cac3" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.451524 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2\": container with ID starting with 00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2 not found: ID does not exist" containerID="00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.451557 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2"} err="failed to get container status \"00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2\": rpc error: code = NotFound desc = could not find container \"00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2\": container with ID starting with 00fc005ebf13acc986987b3a02954fc3bdedbeb00bb13e70f55fb0e978e3a8d2 not found: ID does not exist" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.451909 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.455021 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.759744 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.761393 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.764609 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.765770 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.769167 4125 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.769230 4125 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.772013 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="200ms" Mar 12 13:46:34 crc kubenswrapper[4125]: I0312 13:46:34.933446 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.934650 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler/installer-8-crc: failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.936076 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access podName:252d78ec-e97f-4fdb-9104-4464f1cb6172 nodeName:}" failed. No retries permitted until 2026-03-12 13:46:38.936052092 +0000 UTC m=+1569.259438021 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access") pod "installer-8-crc" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172") : failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:34 crc kubenswrapper[4125]: E0312 13:46:34.976497 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="400ms" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.214323 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?resourceVersion=0&timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.215315 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.216181 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.217324 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.218438 4125 kubelet_node_status.go:594] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.218454 4125 kubelet_node_status.go:581] "Unable to update node status" err="update node status exceeds retry count" Mar 12 13:46:35 crc kubenswrapper[4125]: E0312 13:46:35.378723 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="800ms" Mar 12 13:46:35 crc kubenswrapper[4125]: I0312 13:46:35.642963 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" status="Running" Mar 12 13:46:35 crc kubenswrapper[4125]: I0312 13:46:35.643079 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:46:35 crc kubenswrapper[4125]: I0312 13:46:35.643107 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:46:35 crc kubenswrapper[4125]: I0312 13:46:35.643147 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:46:35 crc kubenswrapper[4125]: I0312 13:46:35.643180 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:46:36 crc kubenswrapper[4125]: E0312 13:46:36.755453 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="1.6s" Mar 12 13:46:36 crc kubenswrapper[4125]: I0312 13:46:36.791020 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48128e8d38b5cbcd2691da698bd9cac3" path="/var/lib/kubelet/pods/48128e8d38b5cbcd2691da698bd9cac3/volumes" Mar 12 13:46:38 crc kubenswrapper[4125]: E0312 13:46:38.357684 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="3.2s" Mar 12 13:46:38 crc kubenswrapper[4125]: I0312 13:46:38.966940 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:38 crc kubenswrapper[4125]: E0312 13:46:38.970545 4125 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler/installer-8-crc: failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:38 crc kubenswrapper[4125]: E0312 13:46:38.970979 4125 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access podName:252d78ec-e97f-4fdb-9104-4464f1cb6172 nodeName:}" failed. No retries permitted until 2026-03-12 13:46:46.970946079 +0000 UTC m=+1577.294332348 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access") pod "installer-8-crc" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172") : failed to fetch token: Post "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token": dial tcp 192.168.130.11:6443: connect: connection refused Mar 12 13:46:40 crc kubenswrapper[4125]: E0312 13:46:40.360498 4125 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/events\": dial tcp 192.168.130.11:6443: connect: connection refused" event="&Event{ObjectMeta:{installer-8-crc.189c1c0c060b3615 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:installer-8-crc,UID:252d78ec-e97f-4fdb-9104-4464f1cb6172,APIVersion:v1,ResourceVersion:32881,FieldPath:,},Reason:FailedMount,Message:MountVolume.SetUp failed for volume \"kube-api-access\" : failed to fetch token: Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa/token\": dial tcp 192.168.130.11:6443: connect: connection refused,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-12 13:46:31.243716117 +0000 UTC m=+1561.567102146,LastTimestamp:2026-03-12 13:46:31.243716117 +0000 UTC m=+1561.567102146,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 12 13:46:41 crc kubenswrapper[4125]: E0312 13:46:41.560477 4125 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.130.11:6443: connect: connection refused" interval="6.4s" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.025639 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.027713 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.028656 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.029396 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.030207 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.031643 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.032906 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.077674 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.078107 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:42 crc kubenswrapper[4125]: E0312 13:46:42.081127 4125 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.082127 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.830750 4125 generic.go:334] "Generic (PLEG): container finished" podID="a3f6a3e226d5c60ea73cb7fac85e9195" containerID="02292f1a05fbf04500219ff248172820061bf5862d55f9fec29d78ed41bbe6ce" exitCode=0 Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.831219 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerDied","Data":"02292f1a05fbf04500219ff248172820061bf5862d55f9fec29d78ed41bbe6ce"} Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.831424 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"44c08d98eaf9de8a1bf19c3dae9576d7c97d0bf9d0b89e825e3392e88823f86d"} Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.832452 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.832520 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.834007 4125 status_manager.go:853] "Failed to get status for pod" podUID="14346ec471451487d5c07e34f8f3457a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: E0312 13:46:42.835044 4125 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.836136 4125 status_manager.go:853] "Failed to get status for pod" podUID="b541f220-6272-4285-8250-4474714fb6cd" pod="openshift-kube-apiserver/installer-11-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-11-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.837626 4125 status_manager.go:853] "Failed to get status for pod" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:42 crc kubenswrapper[4125]: I0312 13:46:42.839493 4125 status_manager.go:853] "Failed to get status for pod" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" pod="openshift-kube-scheduler/revision-pruner-8-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-8-crc\": dial tcp 192.168.130.11:6443: connect: connection refused" Mar 12 13:46:43 crc kubenswrapper[4125]: I0312 13:46:43.841127 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"7241c42ed7ec4ba1a5dbd96e45d6e56c2e226fc4d7c83fe956ef55b82fe7db52"} Mar 12 13:46:43 crc kubenswrapper[4125]: I0312 13:46:43.841484 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"be0bd8583f87c2b717a7f9a0a67a7cda2edafb3ccaf83fe26a917e5da4b57791"} Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.850210 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/kube-controller-manager/0.log" Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.851387 4125 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="402d30aed83909fcb1269d16d81ab5eb3088691295c2863c81ad931be8171c7b" exitCode=1 Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.851529 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"402d30aed83909fcb1269d16d81ab5eb3088691295c2863c81ad931be8171c7b"} Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.852174 4125 scope.go:117] "RemoveContainer" containerID="402d30aed83909fcb1269d16d81ab5eb3088691295c2863c81ad931be8171c7b" Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.858842 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"97dba7a7dbdb88773afa6686e1de78eb07f90fabb11f2e0dbdbb993b4863b60a"} Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.859182 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"6b02cb4769c4910eecdf3ec4812248018cf15076e879cee5e8fb5b8fede51804"} Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.859322 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"82bebccd1422082906a4c3620d06521aa301beaba426a904df8a7cc0dffbf78b"} Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.859450 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:44 crc kubenswrapper[4125]: I0312 13:46:44.859491 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:45 crc kubenswrapper[4125]: I0312 13:46:45.869682 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/kube-controller-manager/0.log" Mar 12 13:46:45 crc kubenswrapper[4125]: I0312 13:46:45.869864 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"7eda087178c2e01694b120a44854784995d1143fa5782ff5c1fcbb5392cc15d7"} Mar 12 13:46:47 crc kubenswrapper[4125]: I0312 13:46:47.009527 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:47 crc kubenswrapper[4125]: I0312 13:46:47.082553 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:47 crc kubenswrapper[4125]: I0312 13:46:47.083126 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:47 crc kubenswrapper[4125]: I0312 13:46:47.088036 4125 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]log ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-kube-apiserver-admission-initializer ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-api-request-count-filter ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-startkubeinformers ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/priority-and-fairness-config-consumer ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/priority-and-fairness-filter ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-apiextensions-informers ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-apiextensions-controllers ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/crd-informer-synced ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-service-ip-repair-controllers ok Mar 12 13:46:47 crc kubenswrapper[4125]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Mar 12 13:46:47 crc kubenswrapper[4125]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/priority-and-fairness-config-producer ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-system-namespaces-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/bootstrap-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-cluster-authentication-info-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-legacy-token-tracking-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/start-kube-aggregator-informers ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/apiservice-registration-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/apiservice-status-available-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/apiservice-wait-for-first-sync ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/kube-apiserver-autoregistration ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]autoregister-completion ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/apiservice-openapi-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/apiservice-openapiv3-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: [+]poststarthook/apiservice-discovery-controller ok Mar 12 13:46:47 crc kubenswrapper[4125]: healthz check failed Mar 12 13:46:47 crc kubenswrapper[4125]: I0312 13:46:47.088139 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:46:49 crc kubenswrapper[4125]: I0312 13:46:49.161354 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:46:49 crc kubenswrapper[4125]: I0312 13:46:49.161704 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:46:49 crc kubenswrapper[4125]: I0312 13:46:49.171158 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:46:51 crc kubenswrapper[4125]: I0312 13:46:51.216569 4125 kubelet.go:1922] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:51 crc kubenswrapper[4125]: I0312 13:46:51.249510 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="a3f6a3e226d5c60ea73cb7fac85e9195" podUID="0a37233d-5d3b-4938-8c39-90601053ac58" Mar 12 13:46:51 crc kubenswrapper[4125]: I0312 13:46:51.906368 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:46:51 crc kubenswrapper[4125]: I0312 13:46:51.906406 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:51 crc kubenswrapper[4125]: I0312 13:46:51.907125 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.038700 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"installer-8-crc\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.064356 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="a3f6a3e226d5c60ea73cb7fac85e9195" podUID="0a37233d-5d3b-4938-8c39-90601053ac58" Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.296974 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.923065 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.923544 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="315bd398-2bab-4e78-95c8-f14da5849566" Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.923730 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-8-crc" event={"ID":"252d78ec-e97f-4fdb-9104-4464f1cb6172","Type":"ContainerStarted","Data":"5c8422a73efefb91c5e471931019517be400c52f37592e3a581e057ec8cf2187"} Mar 12 13:46:52 crc kubenswrapper[4125]: I0312 13:46:52.928595 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="a3f6a3e226d5c60ea73cb7fac85e9195" podUID="0a37233d-5d3b-4938-8c39-90601053ac58" Mar 12 13:46:53 crc kubenswrapper[4125]: I0312 13:46:53.935223 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-8-crc" event={"ID":"252d78ec-e97f-4fdb-9104-4464f1cb6172","Type":"ContainerStarted","Data":"3153454057d8b849ab1157a13b0bab70a8888ba4da37784c7cdc7b5dff6efb75"} Mar 12 13:46:55 crc kubenswrapper[4125]: I0312 13:46:55.252183 4125 scope.go:117] "RemoveContainer" containerID="73f8d09ffdbe95cd4b4e9637ae4f83595b1559bb47723e7d8a89d4b166518e48" Mar 12 13:46:59 crc kubenswrapper[4125]: I0312 13:46:59.181281 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 12 13:47:01 crc kubenswrapper[4125]: I0312 13:47:01.950921 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Mar 12 13:47:02 crc kubenswrapper[4125]: I0312 13:47:02.655221 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Mar 12 13:47:02 crc kubenswrapper[4125]: I0312 13:47:02.802154 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.054743 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-79vsd" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.241870 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.404879 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.413729 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.468921 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.623699 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.684912 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.703613 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.797227 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Mar 12 13:47:03 crc kubenswrapper[4125]: I0312 13:47:03.911382 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.006918 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.144385 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.163104 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.509048 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.828917 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.863449 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.905317 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.908186 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Mar 12 13:47:04 crc kubenswrapper[4125]: I0312 13:47:04.943585 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.105561 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.161561 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.190949 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.208501 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.229721 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.240666 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.365939 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.470508 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.471767 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.518662 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.586492 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.829124 4125 reflector.go:351] Caches populated for *v1.CSIDriver from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.858016 4125 reflector.go:351] Caches populated for *v1.RuntimeClass from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:47:05 crc kubenswrapper[4125]: I0312 13:47:05.887983 4125 reflector.go:351] Caches populated for *v1.Service from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.021729 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.033899 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.082584 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.112499 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.154735 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.240228 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.389626 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.402349 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.546423 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.699929 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.815224 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.824945 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Mar 12 13:47:06 crc kubenswrapper[4125]: I0312 13:47:06.828490 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.045340 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.181316 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.185062 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.221522 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.262764 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.320705 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.449518 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.634538 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Mar 12 13:47:07 crc kubenswrapper[4125]: I0312 13:47:07.835427 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.084509 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.188396 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.326188 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.331462 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.440974 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.544228 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.564736 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.634138 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.649474 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.714755 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.774796 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.851524 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Mar 12 13:47:08 crc kubenswrapper[4125]: I0312 13:47:08.890975 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.057127 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.061010 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.084625 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.089486 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.090456 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.138614 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.195199 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.196316 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.273942 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.289226 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.337423 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.341306 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.357595 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.379867 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.412295 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.457095 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.587617 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.612547 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.726907 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.762488 4125 reflector.go:351] Caches populated for *v1.Pod from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.766087 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=38.766008161 podStartE2EDuration="38.766008161s" podCreationTimestamp="2026-03-12 13:46:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:46:51.332533594 +0000 UTC m=+1581.655919663" watchObservedRunningTime="2026-03-12 13:47:09.766008161 +0000 UTC m=+1600.089394550" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.770370 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-scheduler/installer-8-crc" podStartSLOduration=39.770326086 podStartE2EDuration="39.770326086s" podCreationTimestamp="2026-03-12 13:46:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:46:53.967488589 +0000 UTC m=+1584.290875838" watchObservedRunningTime="2026-03-12 13:47:09.770326086 +0000 UTC m=+1600.093712625" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.772125 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.772181 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.772219 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/installer-8-crc"] Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.788956 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:47:09 crc kubenswrapper[4125]: I0312 13:47:09.833338 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=18.833238797 podStartE2EDuration="18.833238797s" podCreationTimestamp="2026-03-12 13:46:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:47:09.824189474 +0000 UTC m=+1600.147575723" watchObservedRunningTime="2026-03-12 13:47:09.833238797 +0000 UTC m=+1600.156624876" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.059106 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.084308 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.102216 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.193366 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.231102 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.305704 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-58g82" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.354934 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.424179 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.451758 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.462283 4125 reflector.go:351] Caches populated for *v1.Node from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.587421 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.684307 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.716268 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.797131 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.862230 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.906502 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Mar 12 13:47:10 crc kubenswrapper[4125]: I0312 13:47:10.977574 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.125236 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.146610 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.222332 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.342480 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.343683 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler"/"kube-root-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.413393 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.418719 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.445582 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.449005 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.507729 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.532699 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.556670 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.626506 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.684689 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.810081 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.931127 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.936098 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Mar 12 13:47:11 crc kubenswrapper[4125]: I0312 13:47:11.953386 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.007907 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.090406 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.091004 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.095772 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.281994 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.303384 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.342386 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.367721 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.483625 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.596397 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-kpdvz" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.597372 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.607993 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.740807 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.767570 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Mar 12 13:47:12 crc kubenswrapper[4125]: I0312 13:47:12.836161 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-r9fjc" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.031089 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.031785 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="14346ec471451487d5c07e34f8f3457a" containerName="startup-monitor" containerID="cri-o://a97fe34c547aa390f9726ce3f0a2b5db767da23688258863a580017da9eab09f" gracePeriod=5 Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.041503 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.050530 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.110156 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.121465 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.132952 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"webhook-serving-cert" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.345743 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.362014 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.449876 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.495605 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.550442 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.567568 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.666137 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.666992 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.743384 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.873403 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Mar 12 13:47:13 crc kubenswrapper[4125]: I0312 13:47:13.893580 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.034036 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.191075 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.285346 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.492457 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.580480 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.638110 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.760330 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.787174 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.802208 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-6sd5l" Mar 12 13:47:14 crc kubenswrapper[4125]: I0312 13:47:14.912021 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-q786x" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.061462 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.062501 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.068025 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.089113 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.125370 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.258343 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.289922 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.380642 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.435873 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.540124 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.556117 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.593384 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.706175 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.876412 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.886322 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Mar 12 13:47:15 crc kubenswrapper[4125]: I0312 13:47:15.922577 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.019961 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.087702 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.091234 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.158022 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.192116 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.217930 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.238944 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-dwn4s" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.378459 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.383725 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.617582 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.666296 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler"/"installer-sa-dockercfg-9ln8g" Mar 12 13:47:16 crc kubenswrapper[4125]: I0312 13:47:16.939958 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.078778 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-ng44q" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.204019 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.246043 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.263021 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.302421 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.317970 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-twmwc" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.378385 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.455604 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.604560 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Mar 12 13:47:17 crc kubenswrapper[4125]: I0312 13:47:17.671235 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.052324 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.165593 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_14346ec471451487d5c07e34f8f3457a/startup-monitor/0.log" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.165734 4125 generic.go:334] "Generic (PLEG): container finished" podID="14346ec471451487d5c07e34f8f3457a" containerID="a97fe34c547aa390f9726ce3f0a2b5db767da23688258863a580017da9eab09f" exitCode=137 Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.186076 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.429766 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.675667 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.710073 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_14346ec471451487d5c07e34f8f3457a/startup-monitor/0.log" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.710237 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.792925 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.881371 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.882971 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-lock\") pod \"14346ec471451487d5c07e34f8f3457a\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.883384 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-lock" (OuterVolumeSpecName: "var-lock") pod "14346ec471451487d5c07e34f8f3457a" (UID: "14346ec471451487d5c07e34f8f3457a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.883776 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-pod-resource-dir\") pod \"14346ec471451487d5c07e34f8f3457a\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.884232 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-resource-dir\") pod \"14346ec471451487d5c07e34f8f3457a\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.884577 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "14346ec471451487d5c07e34f8f3457a" (UID: "14346ec471451487d5c07e34f8f3457a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.885061 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-log\") pod \"14346ec471451487d5c07e34f8f3457a\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.885480 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-log" (OuterVolumeSpecName: "var-log") pod "14346ec471451487d5c07e34f8f3457a" (UID: "14346ec471451487d5c07e34f8f3457a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.885527 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-manifests\") pod \"14346ec471451487d5c07e34f8f3457a\" (UID: \"14346ec471451487d5c07e34f8f3457a\") " Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.886055 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-manifests" (OuterVolumeSpecName: "manifests") pod "14346ec471451487d5c07e34f8f3457a" (UID: "14346ec471451487d5c07e34f8f3457a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.886355 4125 reconciler_common.go:300] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-log\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.886450 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.886481 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.895566 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "14346ec471451487d5c07e34f8f3457a" (UID: "14346ec471451487d5c07e34f8f3457a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.988481 4125 reconciler_common.go:300] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-manifests\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:18 crc kubenswrapper[4125]: I0312 13:47:18.989200 4125 reconciler_common.go:300] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/14346ec471451487d5c07e34f8f3457a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:19 crc kubenswrapper[4125]: I0312 13:47:19.190686 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_14346ec471451487d5c07e34f8f3457a/startup-monitor/0.log" Mar 12 13:47:19 crc kubenswrapper[4125]: I0312 13:47:19.190792 4125 scope.go:117] "RemoveContainer" containerID="a97fe34c547aa390f9726ce3f0a2b5db767da23688258863a580017da9eab09f" Mar 12 13:47:19 crc kubenswrapper[4125]: I0312 13:47:19.191042 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.037222 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14346ec471451487d5c07e34f8f3457a" path="/var/lib/kubelet/pods/14346ec471451487d5c07e34f8f3457a/volumes" Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.038079 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.067011 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.067091 4125 kubelet.go:2639] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="6fdfb25e-1492-4ebd-bbee-0d90a5d031d4" Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.074755 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.074974 4125 kubelet.go:2663] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="6fdfb25e-1492-4ebd-bbee-0d90a5d031d4" Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.421742 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-sv888" Mar 12 13:47:20 crc kubenswrapper[4125]: I0312 13:47:20.495097 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Mar 12 13:47:35 crc kubenswrapper[4125]: I0312 13:47:35.644162 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:47:35 crc kubenswrapper[4125]: I0312 13:47:35.644744 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:47:35 crc kubenswrapper[4125]: I0312 13:47:35.644795 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:47:35 crc kubenswrapper[4125]: I0312 13:47:35.644945 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:47:35 crc kubenswrapper[4125]: I0312 13:47:35.644988 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.384796 4125 scope.go:117] "RemoveContainer" containerID="50bc70f11c788ac2ff82783495b350392a81cc7e98b9f7c462821291b6fdc22d" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.395491 4125 kubelet.go:2439] "SyncLoop REMOVE" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.396087 4125 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.396331 4125 topology_manager.go:215] "Topology Admit Handler" podUID="6a57a7fb1944b43a6bd11a349520d301" podNamespace="openshift-kube-scheduler" podName="openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.396668 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="14346ec471451487d5c07e34f8f3457a" containerName="startup-monitor" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.396790 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="14346ec471451487d5c07e34f8f3457a" containerName="startup-monitor" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.396951 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.397087 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.397190 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="wait-for-host-port" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.397307 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="wait-for-host-port" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.397414 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-recovery-controller" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.397510 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-recovery-controller" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.397594 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b541f220-6272-4285-8250-4474714fb6cd" containerName="installer" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.397695 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="b541f220-6272-4285-8250-4474714fb6cd" containerName="installer" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.397784 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-cert-syncer" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.397929 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-cert-syncer" Mar 12 13:47:55 crc kubenswrapper[4125]: E0312 13:47:55.398033 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" containerName="pruner" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398112 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" containerName="pruner" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398411 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" containerName="pruner" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398532 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="14346ec471451487d5c07e34f8f3457a" containerName="startup-monitor" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398619 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-recovery-controller" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398707 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398791 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="b541f220-6272-4285-8250-4474714fb6cd" containerName="installer" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398922 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-recovery-controller" containerID="cri-o://1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" gracePeriod=30 Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398939 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-cert-syncer" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398733 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler" containerID="cri-o://525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" gracePeriod=30 Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.398972 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="631cdb37fbb54e809ecc5e719aebd371" containerName="kube-scheduler-cert-syncer" containerID="cri-o://b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" gracePeriod=30 Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.490669 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.490754 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.594231 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.594431 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.594480 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.594525 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.666377 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_631cdb37fbb54e809ecc5e719aebd371/kube-scheduler-cert-syncer/0.log" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.668067 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.672235 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" oldPodUID="631cdb37fbb54e809ecc5e719aebd371" podUID="6a57a7fb1944b43a6bd11a349520d301" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.797525 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/631cdb37fbb54e809ecc5e719aebd371-resource-dir\") pod \"631cdb37fbb54e809ecc5e719aebd371\" (UID: \"631cdb37fbb54e809ecc5e719aebd371\") " Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.797670 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/631cdb37fbb54e809ecc5e719aebd371-cert-dir\") pod \"631cdb37fbb54e809ecc5e719aebd371\" (UID: \"631cdb37fbb54e809ecc5e719aebd371\") " Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.798235 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/631cdb37fbb54e809ecc5e719aebd371-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "631cdb37fbb54e809ecc5e719aebd371" (UID: "631cdb37fbb54e809ecc5e719aebd371"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.798330 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/631cdb37fbb54e809ecc5e719aebd371-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "631cdb37fbb54e809ecc5e719aebd371" (UID: "631cdb37fbb54e809ecc5e719aebd371"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.899704 4125 reconciler_common.go:300] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/631cdb37fbb54e809ecc5e719aebd371-resource-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:55 crc kubenswrapper[4125]: I0312 13:47:55.899791 4125 reconciler_common.go:300] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/631cdb37fbb54e809ecc5e719aebd371-cert-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.036233 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="631cdb37fbb54e809ecc5e719aebd371" path="/var/lib/kubelet/pods/631cdb37fbb54e809ecc5e719aebd371/volumes" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.566996 4125 generic.go:334] "Generic (PLEG): container finished" podID="252d78ec-e97f-4fdb-9104-4464f1cb6172" containerID="3153454057d8b849ab1157a13b0bab70a8888ba4da37784c7cdc7b5dff6efb75" exitCode=0 Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.567100 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-8-crc" event={"ID":"252d78ec-e97f-4fdb-9104-4464f1cb6172","Type":"ContainerDied","Data":"3153454057d8b849ab1157a13b0bab70a8888ba4da37784c7cdc7b5dff6efb75"} Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.571873 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_631cdb37fbb54e809ecc5e719aebd371/kube-scheduler-cert-syncer/0.log" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.573020 4125 generic.go:334] "Generic (PLEG): container finished" podID="631cdb37fbb54e809ecc5e719aebd371" containerID="1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" exitCode=0 Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.573085 4125 generic.go:334] "Generic (PLEG): container finished" podID="631cdb37fbb54e809ecc5e719aebd371" containerID="b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" exitCode=2 Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.573107 4125 generic.go:334] "Generic (PLEG): container finished" podID="631cdb37fbb54e809ecc5e719aebd371" containerID="525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" exitCode=0 Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.573188 4125 scope.go:117] "RemoveContainer" containerID="1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.573428 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.608082 4125 status_manager.go:863] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" oldPodUID="631cdb37fbb54e809ecc5e719aebd371" podUID="6a57a7fb1944b43a6bd11a349520d301" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.616318 4125 scope.go:117] "RemoveContainer" containerID="b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.657724 4125 scope.go:117] "RemoveContainer" containerID="525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.693866 4125 scope.go:117] "RemoveContainer" containerID="e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.732521 4125 scope.go:117] "RemoveContainer" containerID="1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" Mar 12 13:47:56 crc kubenswrapper[4125]: E0312 13:47:56.733424 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": container with ID starting with 1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2 not found: ID does not exist" containerID="1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.733514 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2"} err="failed to get container status \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": rpc error: code = NotFound desc = could not find container \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": container with ID starting with 1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2 not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.733541 4125 scope.go:117] "RemoveContainer" containerID="b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" Mar 12 13:47:56 crc kubenswrapper[4125]: E0312 13:47:56.735163 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": container with ID starting with b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc not found: ID does not exist" containerID="b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.735231 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc"} err="failed to get container status \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": rpc error: code = NotFound desc = could not find container \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": container with ID starting with b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.735269 4125 scope.go:117] "RemoveContainer" containerID="525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" Mar 12 13:47:56 crc kubenswrapper[4125]: E0312 13:47:56.736139 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": container with ID starting with 525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149 not found: ID does not exist" containerID="525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.736165 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149"} err="failed to get container status \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": rpc error: code = NotFound desc = could not find container \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": container with ID starting with 525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149 not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.736176 4125 scope.go:117] "RemoveContainer" containerID="e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae" Mar 12 13:47:56 crc kubenswrapper[4125]: E0312 13:47:56.737906 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": container with ID starting with e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae not found: ID does not exist" containerID="e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.738118 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae"} err="failed to get container status \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": rpc error: code = NotFound desc = could not find container \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": container with ID starting with e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.738222 4125 scope.go:117] "RemoveContainer" containerID="1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.739239 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2"} err="failed to get container status \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": rpc error: code = NotFound desc = could not find container \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": container with ID starting with 1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2 not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.739329 4125 scope.go:117] "RemoveContainer" containerID="b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.740456 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc"} err="failed to get container status \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": rpc error: code = NotFound desc = could not find container \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": container with ID starting with b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.740491 4125 scope.go:117] "RemoveContainer" containerID="525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.741226 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149"} err="failed to get container status \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": rpc error: code = NotFound desc = could not find container \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": container with ID starting with 525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149 not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.741350 4125 scope.go:117] "RemoveContainer" containerID="e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.741969 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae"} err="failed to get container status \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": rpc error: code = NotFound desc = could not find container \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": container with ID starting with e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.742008 4125 scope.go:117] "RemoveContainer" containerID="1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.742599 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2"} err="failed to get container status \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": rpc error: code = NotFound desc = could not find container \"1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2\": container with ID starting with 1c3d6e213bf4418439ac907a037dc5480e22da0bc3c5ea2cb6705247a54545c2 not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.742700 4125 scope.go:117] "RemoveContainer" containerID="b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.743465 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc"} err="failed to get container status \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": rpc error: code = NotFound desc = could not find container \"b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc\": container with ID starting with b1f807d73bf80beb1059b615a2a8a94c5730afb08fa217c888ca7c9a4f6739bc not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.743499 4125 scope.go:117] "RemoveContainer" containerID="525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.744062 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149"} err="failed to get container status \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": rpc error: code = NotFound desc = could not find container \"525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149\": container with ID starting with 525723b515153b95cf8db9c18b7a472dcd53b425cb7881ecfadcfd6dbf0ca149 not found: ID does not exist" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.744167 4125 scope.go:117] "RemoveContainer" containerID="e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae" Mar 12 13:47:56 crc kubenswrapper[4125]: I0312 13:47:56.744752 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae"} err="failed to get container status \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": rpc error: code = NotFound desc = could not find container \"e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae\": container with ID starting with e2ea043f2a6fb5e1379e625ce01b59e7da7b07eed95c58ec70f658a45aab9dae not found: ID does not exist" Mar 12 13:47:57 crc kubenswrapper[4125]: I0312 13:47:57.932739 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.036712 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-var-lock\") pod \"252d78ec-e97f-4fdb-9104-4464f1cb6172\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.036936 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-var-lock" (OuterVolumeSpecName: "var-lock") pod "252d78ec-e97f-4fdb-9104-4464f1cb6172" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.038965 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") pod \"252d78ec-e97f-4fdb-9104-4464f1cb6172\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.039390 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-kubelet-dir\") pod \"252d78ec-e97f-4fdb-9104-4464f1cb6172\" (UID: \"252d78ec-e97f-4fdb-9104-4464f1cb6172\") " Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.039476 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "252d78ec-e97f-4fdb-9104-4464f1cb6172" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.040072 4125 reconciler_common.go:300] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-var-lock\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.040183 4125 reconciler_common.go:300] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/252d78ec-e97f-4fdb-9104-4464f1cb6172-kubelet-dir\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.048902 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "252d78ec-e97f-4fdb-9104-4464f1cb6172" (UID: "252d78ec-e97f-4fdb-9104-4464f1cb6172"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.142330 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/252d78ec-e97f-4fdb-9104-4464f1cb6172-kube-api-access\") on node \"crc\" DevicePath \"\"" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.598910 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/installer-8-crc" event={"ID":"252d78ec-e97f-4fdb-9104-4464f1cb6172","Type":"ContainerDied","Data":"5c8422a73efefb91c5e471931019517be400c52f37592e3a581e057ec8cf2187"} Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.598965 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c8422a73efefb91c5e471931019517be400c52f37592e3a581e057ec8cf2187" Mar 12 13:47:58 crc kubenswrapper[4125]: I0312 13:47:58.599024 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-8-crc" Mar 12 13:47:58 crc kubenswrapper[4125]: E0312 13:47:58.722150 4125 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod252d78ec_e97f_4fdb_9104_4464f1cb6172.slice\": RecentStats: unable to find data in memory cache]" Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.025079 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.055298 4125 kubelet.go:1917] "Trying to delete pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="5e53e26d-e94d-45dc-b706-677ed667c8ce" Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.055369 4125 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="5e53e26d-e94d-45dc-b706-677ed667c8ce" Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.076648 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.095969 4125 kubelet.go:1922] "Deleted mirror pod because it is outdated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.104589 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.178681 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.194761 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 12 13:48:08 crc kubenswrapper[4125]: W0312 13:48:08.228035 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a57a7fb1944b43a6bd11a349520d301.slice/crio-0f1315675d35faf28523e30e8806aca49e25ddc391fda9b15f721a8d19f1334a WatchSource:0}: Error finding container 0f1315675d35faf28523e30e8806aca49e25ddc391fda9b15f721a8d19f1334a: Status 404 returned error can't find the container with id 0f1315675d35faf28523e30e8806aca49e25ddc391fda9b15f721a8d19f1334a Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.699483 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47"} Mar 12 13:48:08 crc kubenswrapper[4125]: I0312 13:48:08.699554 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"0f1315675d35faf28523e30e8806aca49e25ddc391fda9b15f721a8d19f1334a"} Mar 12 13:48:35 crc kubenswrapper[4125]: I0312 13:48:35.645436 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:48:35 crc kubenswrapper[4125]: I0312 13:48:35.646019 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:48:35 crc kubenswrapper[4125]: I0312 13:48:35.646070 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:48:35 crc kubenswrapper[4125]: I0312 13:48:35.646090 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:48:35 crc kubenswrapper[4125]: I0312 13:48:35.646132 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Pending" Mar 12 13:48:38 crc kubenswrapper[4125]: I0312 13:48:38.931120 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_6a57a7fb1944b43a6bd11a349520d301/wait-for-host-port/0.log" Mar 12 13:48:38 crc kubenswrapper[4125]: I0312 13:48:38.931235 4125 generic.go:334] "Generic (PLEG): container finished" podID="6a57a7fb1944b43a6bd11a349520d301" containerID="627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47" exitCode=124 Mar 12 13:48:38 crc kubenswrapper[4125]: I0312 13:48:38.931322 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerDied","Data":"627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47"} Mar 12 13:48:39 crc kubenswrapper[4125]: I0312 13:48:39.941085 4125 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_6a57a7fb1944b43a6bd11a349520d301/wait-for-host-port/0.log" Mar 12 13:48:39 crc kubenswrapper[4125]: I0312 13:48:39.943056 4125 generic.go:334] "Generic (PLEG): container finished" podID="6a57a7fb1944b43a6bd11a349520d301" containerID="2f3429b7143780fa666162f39d3e2b5a960959d699e805bc63c5631a441922ee" exitCode=0 Mar 12 13:48:39 crc kubenswrapper[4125]: I0312 13:48:39.943345 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerDied","Data":"2f3429b7143780fa666162f39d3e2b5a960959d699e805bc63c5631a441922ee"} Mar 12 13:48:39 crc kubenswrapper[4125]: I0312 13:48:39.943516 4125 scope.go:117] "RemoveContainer" containerID="627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47" Mar 12 13:48:39 crc kubenswrapper[4125]: I0312 13:48:39.944349 4125 scope.go:117] "RemoveContainer" containerID="627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47" Mar 12 13:48:40 crc kubenswrapper[4125]: E0312 13:48:40.017592 4125 remote_runtime.go:385] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_wait-for-host-port_openshift-kube-scheduler-crc_openshift-kube-scheduler_6a57a7fb1944b43a6bd11a349520d301_0 in pod sandbox 0f1315675d35faf28523e30e8806aca49e25ddc391fda9b15f721a8d19f1334a from index: no such id: '627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47'" containerID="627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47" Mar 12 13:48:40 crc kubenswrapper[4125]: E0312 13:48:40.017695 4125 kuberuntime_container.go:858] failed to remove pod init container "wait-for-host-port": rpc error: code = Unknown desc = failed to delete container k8s_wait-for-host-port_openshift-kube-scheduler-crc_openshift-kube-scheduler_6a57a7fb1944b43a6bd11a349520d301_0 in pod sandbox 0f1315675d35faf28523e30e8806aca49e25ddc391fda9b15f721a8d19f1334a from index: no such id: '627b69d873c73ec90067d766c8a777e27b621347442cfcaf92b9740f3bad5a47'; Skipping pod "openshift-kube-scheduler-crc_openshift-kube-scheduler(6a57a7fb1944b43a6bd11a349520d301)" Mar 12 13:48:40 crc kubenswrapper[4125]: I0312 13:48:40.960381 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"5c18bb3bd9be0d319d737941f9d0021ff6268513489f9e260552e980746dc865"} Mar 12 13:48:40 crc kubenswrapper[4125]: I0312 13:48:40.960487 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"6edf5b21cd44ba3c1e49b6644bd9c924d52c9356830268bce82eda820db4e9e2"} Mar 12 13:48:41 crc kubenswrapper[4125]: I0312 13:48:41.969692 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"ee64f17e9575a578a8168a324c487f7fec6c2517167fd1faaf295d6e48b6ceb9"} Mar 12 13:48:41 crc kubenswrapper[4125]: I0312 13:48:41.971186 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:48:42 crc kubenswrapper[4125]: I0312 13:48:42.059899 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=34.059850119 podStartE2EDuration="34.059850119s" podCreationTimestamp="2026-03-12 13:48:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 13:48:42.056590928 +0000 UTC m=+1692.379976847" watchObservedRunningTime="2026-03-12 13:48:42.059850119 +0000 UTC m=+1692.383236068" Mar 12 13:49:28 crc kubenswrapper[4125]: I0312 13:49:28.193417 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 12 13:49:35 crc kubenswrapper[4125]: I0312 13:49:35.647074 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:49:35 crc kubenswrapper[4125]: I0312 13:49:35.648966 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:49:35 crc kubenswrapper[4125]: I0312 13:49:35.649132 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:49:35 crc kubenswrapper[4125]: I0312 13:49:35.649302 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:49:35 crc kubenswrapper[4125]: I0312 13:49:35.649535 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:50:35 crc kubenswrapper[4125]: I0312 13:50:35.650964 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:50:35 crc kubenswrapper[4125]: I0312 13:50:35.651556 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:50:35 crc kubenswrapper[4125]: I0312 13:50:35.651604 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:50:35 crc kubenswrapper[4125]: I0312 13:50:35.651650 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:50:35 crc kubenswrapper[4125]: I0312 13:50:35.651679 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:51:35 crc kubenswrapper[4125]: I0312 13:51:35.653029 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:51:35 crc kubenswrapper[4125]: I0312 13:51:35.653649 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:51:35 crc kubenswrapper[4125]: I0312 13:51:35.653674 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:51:35 crc kubenswrapper[4125]: I0312 13:51:35.653726 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:51:35 crc kubenswrapper[4125]: I0312 13:51:35.653751 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.537934 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fph2r"] Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.540594 4125 topology_manager.go:215] "Topology Admit Handler" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" podNamespace="openshift-marketplace" podName="redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: E0312 13:52:08.541119 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="252d78ec-e97f-4fdb-9104-4464f1cb6172" containerName="installer" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.541357 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="252d78ec-e97f-4fdb-9104-4464f1cb6172" containerName="installer" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.541706 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="252d78ec-e97f-4fdb-9104-4464f1cb6172" containerName="installer" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.543306 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.575546 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fph2r"] Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.605631 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-catalog-content\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.605741 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-utilities\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.605772 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l5hn\" (UniqueName: \"kubernetes.io/projected/bbabd08e-459e-499d-9883-ea17b7c9a8db-kube-api-access-7l5hn\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.707484 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-catalog-content\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.707674 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-utilities\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.707717 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-7l5hn\" (UniqueName: \"kubernetes.io/projected/bbabd08e-459e-499d-9883-ea17b7c9a8db-kube-api-access-7l5hn\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.708215 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-catalog-content\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.708645 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-utilities\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.735981 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l5hn\" (UniqueName: \"kubernetes.io/projected/bbabd08e-459e-499d-9883-ea17b7c9a8db-kube-api-access-7l5hn\") pod \"redhat-marketplace-fph2r\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:08 crc kubenswrapper[4125]: I0312 13:52:08.875483 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:09 crc kubenswrapper[4125]: I0312 13:52:09.297587 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fph2r"] Mar 12 13:52:09 crc kubenswrapper[4125]: I0312 13:52:09.994040 4125 generic.go:334] "Generic (PLEG): container finished" podID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerID="c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d" exitCode=0 Mar 12 13:52:09 crc kubenswrapper[4125]: I0312 13:52:09.994387 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerDied","Data":"c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d"} Mar 12 13:52:09 crc kubenswrapper[4125]: I0312 13:52:09.994430 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerStarted","Data":"26f530bfeaa3e4abb4fd1aa3fd1ea3a6c1022626e1015f31e154702555a927a5"} Mar 12 13:52:10 crc kubenswrapper[4125]: I0312 13:52:10.004763 4125 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 12 13:52:11 crc kubenswrapper[4125]: I0312 13:52:11.003250 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerStarted","Data":"fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041"} Mar 12 13:52:20 crc kubenswrapper[4125]: I0312 13:52:20.086364 4125 generic.go:334] "Generic (PLEG): container finished" podID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerID="fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041" exitCode=0 Mar 12 13:52:20 crc kubenswrapper[4125]: I0312 13:52:20.087028 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerDied","Data":"fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041"} Mar 12 13:52:22 crc kubenswrapper[4125]: I0312 13:52:22.111548 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerStarted","Data":"d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb"} Mar 12 13:52:22 crc kubenswrapper[4125]: I0312 13:52:22.182307 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fph2r" podStartSLOduration=3.555215365 podStartE2EDuration="14.182154537s" podCreationTimestamp="2026-03-12 13:52:08 +0000 UTC" firstStartedPulling="2026-03-12 13:52:09.996203059 +0000 UTC m=+1900.319588938" lastFinishedPulling="2026-03-12 13:52:20.623141801 +0000 UTC m=+1910.946528110" observedRunningTime="2026-03-12 13:52:22.174781691 +0000 UTC m=+1912.498168280" watchObservedRunningTime="2026-03-12 13:52:22.182154537 +0000 UTC m=+1912.505540946" Mar 12 13:52:28 crc kubenswrapper[4125]: I0312 13:52:28.877042 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:28 crc kubenswrapper[4125]: I0312 13:52:28.878096 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:29 crc kubenswrapper[4125]: I0312 13:52:29.050574 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:29 crc kubenswrapper[4125]: I0312 13:52:29.273940 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:29 crc kubenswrapper[4125]: I0312 13:52:29.534692 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fph2r"] Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.191264 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fph2r" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="registry-server" containerID="cri-o://d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb" gracePeriod=2 Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.669080 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.768493 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l5hn\" (UniqueName: \"kubernetes.io/projected/bbabd08e-459e-499d-9883-ea17b7c9a8db-kube-api-access-7l5hn\") pod \"bbabd08e-459e-499d-9883-ea17b7c9a8db\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.769481 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-utilities\") pod \"bbabd08e-459e-499d-9883-ea17b7c9a8db\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.770444 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-catalog-content\") pod \"bbabd08e-459e-499d-9883-ea17b7c9a8db\" (UID: \"bbabd08e-459e-499d-9883-ea17b7c9a8db\") " Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.770436 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-utilities" (OuterVolumeSpecName: "utilities") pod "bbabd08e-459e-499d-9883-ea17b7c9a8db" (UID: "bbabd08e-459e-499d-9883-ea17b7c9a8db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.771788 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.789345 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbabd08e-459e-499d-9883-ea17b7c9a8db-kube-api-access-7l5hn" (OuterVolumeSpecName: "kube-api-access-7l5hn") pod "bbabd08e-459e-499d-9883-ea17b7c9a8db" (UID: "bbabd08e-459e-499d-9883-ea17b7c9a8db"). InnerVolumeSpecName "kube-api-access-7l5hn". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.873018 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-7l5hn\" (UniqueName: \"kubernetes.io/projected/bbabd08e-459e-499d-9883-ea17b7c9a8db-kube-api-access-7l5hn\") on node \"crc\" DevicePath \"\"" Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.955101 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bbabd08e-459e-499d-9883-ea17b7c9a8db" (UID: "bbabd08e-459e-499d-9883-ea17b7c9a8db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:52:31 crc kubenswrapper[4125]: I0312 13:52:31.975182 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbabd08e-459e-499d-9883-ea17b7c9a8db-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.210488 4125 generic.go:334] "Generic (PLEG): container finished" podID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerID="d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb" exitCode=0 Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.211009 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerDied","Data":"d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb"} Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.211771 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fph2r" event={"ID":"bbabd08e-459e-499d-9883-ea17b7c9a8db","Type":"ContainerDied","Data":"26f530bfeaa3e4abb4fd1aa3fd1ea3a6c1022626e1015f31e154702555a927a5"} Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.212083 4125 scope.go:117] "RemoveContainer" containerID="d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.211138 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fph2r" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.279951 4125 scope.go:117] "RemoveContainer" containerID="fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.340434 4125 scope.go:117] "RemoveContainer" containerID="c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.378461 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fph2r"] Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.394995 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fph2r"] Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.428166 4125 scope.go:117] "RemoveContainer" containerID="d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb" Mar 12 13:52:32 crc kubenswrapper[4125]: E0312 13:52:32.429719 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb\": container with ID starting with d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb not found: ID does not exist" containerID="d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.429928 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb"} err="failed to get container status \"d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb\": rpc error: code = NotFound desc = could not find container \"d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb\": container with ID starting with d7bf3ec41f3b1781f7c280a71a05d8bbd02021994eab64e7778ce57ddc36fffb not found: ID does not exist" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.430006 4125 scope.go:117] "RemoveContainer" containerID="fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041" Mar 12 13:52:32 crc kubenswrapper[4125]: E0312 13:52:32.430706 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041\": container with ID starting with fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041 not found: ID does not exist" containerID="fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.430888 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041"} err="failed to get container status \"fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041\": rpc error: code = NotFound desc = could not find container \"fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041\": container with ID starting with fd1845e5ca9151fe3e3fcd1c70bc43e147b858621f7ada400e4491320e8da041 not found: ID does not exist" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.430918 4125 scope.go:117] "RemoveContainer" containerID="c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d" Mar 12 13:52:32 crc kubenswrapper[4125]: E0312 13:52:32.431644 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d\": container with ID starting with c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d not found: ID does not exist" containerID="c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d" Mar 12 13:52:32 crc kubenswrapper[4125]: I0312 13:52:32.431699 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d"} err="failed to get container status \"c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d\": rpc error: code = NotFound desc = could not find container \"c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d\": container with ID starting with c5451be4455e776c2c4f94a23add9e8522ec78cea0bdd3aa4435d86fd1a29f2d not found: ID does not exist" Mar 12 13:52:34 crc kubenswrapper[4125]: I0312 13:52:34.035263 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" path="/var/lib/kubelet/pods/bbabd08e-459e-499d-9883-ea17b7c9a8db/volumes" Mar 12 13:52:36 crc kubenswrapper[4125]: I0312 13:52:36.172961 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:52:36 crc kubenswrapper[4125]: I0312 13:52:36.175666 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:52:36 crc kubenswrapper[4125]: I0312 13:52:36.176074 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:52:36 crc kubenswrapper[4125]: I0312 13:52:36.176173 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:52:36 crc kubenswrapper[4125]: I0312 13:52:36.176214 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.762301 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lhkzs"] Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.763106 4125 topology_manager.go:215] "Topology Admit Handler" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" podNamespace="openshift-marketplace" podName="community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: E0312 13:52:41.763611 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="registry-server" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.763660 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="registry-server" Mar 12 13:52:41 crc kubenswrapper[4125]: E0312 13:52:41.763694 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="extract-content" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.763709 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="extract-content" Mar 12 13:52:41 crc kubenswrapper[4125]: E0312 13:52:41.763748 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="extract-utilities" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.763763 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="extract-utilities" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.764122 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbabd08e-459e-499d-9883-ea17b7c9a8db" containerName="registry-server" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.766272 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.808003 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-utilities\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.808197 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-catalog-content\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.808366 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv8s5\" (UniqueName: \"kubernetes.io/projected/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-kube-api-access-jv8s5\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.823649 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lhkzs"] Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.909158 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-utilities\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.909263 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-catalog-content\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.909332 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-jv8s5\" (UniqueName: \"kubernetes.io/projected/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-kube-api-access-jv8s5\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.909995 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-catalog-content\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.910334 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-utilities\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:41 crc kubenswrapper[4125]: I0312 13:52:41.952122 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv8s5\" (UniqueName: \"kubernetes.io/projected/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-kube-api-access-jv8s5\") pod \"community-operators-lhkzs\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:42 crc kubenswrapper[4125]: I0312 13:52:42.093997 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:52:42 crc kubenswrapper[4125]: W0312 13:52:42.474696 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod557b5f73_6b8e_42cc_87a8_b2b6ded176ef.slice/crio-10a9ed468e9b762f8e2b9e8bd6646d285447a0b90a7768485c4cf82bfbcd4302 WatchSource:0}: Error finding container 10a9ed468e9b762f8e2b9e8bd6646d285447a0b90a7768485c4cf82bfbcd4302: Status 404 returned error can't find the container with id 10a9ed468e9b762f8e2b9e8bd6646d285447a0b90a7768485c4cf82bfbcd4302 Mar 12 13:52:42 crc kubenswrapper[4125]: I0312 13:52:42.475108 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lhkzs"] Mar 12 13:52:43 crc kubenswrapper[4125]: I0312 13:52:43.346861 4125 generic.go:334] "Generic (PLEG): container finished" podID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerID="1cfa14150a7769ea4031b0066ce21c43a2c9e8f49ecf96890443a01fb0d5a5a1" exitCode=0 Mar 12 13:52:43 crc kubenswrapper[4125]: I0312 13:52:43.347881 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerDied","Data":"1cfa14150a7769ea4031b0066ce21c43a2c9e8f49ecf96890443a01fb0d5a5a1"} Mar 12 13:52:43 crc kubenswrapper[4125]: I0312 13:52:43.348392 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerStarted","Data":"10a9ed468e9b762f8e2b9e8bd6646d285447a0b90a7768485c4cf82bfbcd4302"} Mar 12 13:52:44 crc kubenswrapper[4125]: I0312 13:52:44.357702 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerStarted","Data":"8902a80dc6c1b44afe081b6bf70f980fc709638e25089b155d5a17acf2becf78"} Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.004687 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x9zfr"] Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.006991 4125 topology_manager.go:215] "Topology Admit Handler" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" podNamespace="openshift-marketplace" podName="redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.009110 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.076694 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x9zfr"] Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.136551 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvlxl\" (UniqueName: \"kubernetes.io/projected/f2644f3f-05ed-4871-9be0-9258d5745cf2-kube-api-access-bvlxl\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.136629 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-catalog-content\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.136784 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-utilities\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.238196 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-utilities\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.238728 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bvlxl\" (UniqueName: \"kubernetes.io/projected/f2644f3f-05ed-4871-9be0-9258d5745cf2-kube-api-access-bvlxl\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.238942 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-utilities\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.239156 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-catalog-content\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.239479 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-catalog-content\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.263066 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvlxl\" (UniqueName: \"kubernetes.io/projected/f2644f3f-05ed-4871-9be0-9258d5745cf2-kube-api-access-bvlxl\") pod \"redhat-operators-x9zfr\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.342285 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:52:57 crc kubenswrapper[4125]: I0312 13:52:57.723543 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x9zfr"] Mar 12 13:52:58 crc kubenswrapper[4125]: I0312 13:52:58.878755 4125 generic.go:334] "Generic (PLEG): container finished" podID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerID="ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7" exitCode=0 Mar 12 13:52:58 crc kubenswrapper[4125]: I0312 13:52:58.879085 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerDied","Data":"ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7"} Mar 12 13:52:58 crc kubenswrapper[4125]: I0312 13:52:58.879427 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerStarted","Data":"f86863f8c9470b72e10e441f60dc49d487d93e0c008dd712c07014aae3c6a61a"} Mar 12 13:53:02 crc kubenswrapper[4125]: I0312 13:53:02.009642 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerStarted","Data":"d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46"} Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.247121 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bm6vf"] Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.249637 4125 topology_manager.go:215] "Topology Admit Handler" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" podNamespace="openshift-marketplace" podName="certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.251978 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.432591 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bm6vf"] Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.452539 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-catalog-content\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.452625 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-utilities\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.452690 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tbj9\" (UniqueName: \"kubernetes.io/projected/516fe749-2e84-4f55-893f-0eae08d19f80-kube-api-access-2tbj9\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.554574 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2tbj9\" (UniqueName: \"kubernetes.io/projected/516fe749-2e84-4f55-893f-0eae08d19f80-kube-api-access-2tbj9\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.555595 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-catalog-content\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.555867 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-utilities\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.556199 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-catalog-content\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.556436 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-utilities\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.585474 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tbj9\" (UniqueName: \"kubernetes.io/projected/516fe749-2e84-4f55-893f-0eae08d19f80-kube-api-access-2tbj9\") pod \"certified-operators-bm6vf\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:12 crc kubenswrapper[4125]: I0312 13:53:12.876482 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:53:13 crc kubenswrapper[4125]: I0312 13:53:13.552715 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bm6vf"] Mar 12 13:53:13 crc kubenswrapper[4125]: W0312 13:53:13.572789 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod516fe749_2e84_4f55_893f_0eae08d19f80.slice/crio-245a7cd63c58148fb247d84d7f3c130f64953916ee19293100ca5a0d96fc8ed5 WatchSource:0}: Error finding container 245a7cd63c58148fb247d84d7f3c130f64953916ee19293100ca5a0d96fc8ed5: Status 404 returned error can't find the container with id 245a7cd63c58148fb247d84d7f3c130f64953916ee19293100ca5a0d96fc8ed5 Mar 12 13:53:14 crc kubenswrapper[4125]: I0312 13:53:14.110077 4125 generic.go:334] "Generic (PLEG): container finished" podID="516fe749-2e84-4f55-893f-0eae08d19f80" containerID="fc98f95a37ddcf11343bdcd8300587daa73bb8c75d508de2408b43ce890b711e" exitCode=0 Mar 12 13:53:14 crc kubenswrapper[4125]: I0312 13:53:14.110216 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerDied","Data":"fc98f95a37ddcf11343bdcd8300587daa73bb8c75d508de2408b43ce890b711e"} Mar 12 13:53:14 crc kubenswrapper[4125]: I0312 13:53:14.110292 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerStarted","Data":"245a7cd63c58148fb247d84d7f3c130f64953916ee19293100ca5a0d96fc8ed5"} Mar 12 13:53:16 crc kubenswrapper[4125]: I0312 13:53:16.146900 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerStarted","Data":"e91cfc6aaee95b21d35490b2818f0759c54b6f5d1cefdaf516b657b3d0ec19aa"} Mar 12 13:53:22 crc kubenswrapper[4125]: I0312 13:53:22.195573 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerDied","Data":"8902a80dc6c1b44afe081b6bf70f980fc709638e25089b155d5a17acf2becf78"} Mar 12 13:53:22 crc kubenswrapper[4125]: I0312 13:53:22.195587 4125 generic.go:334] "Generic (PLEG): container finished" podID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerID="8902a80dc6c1b44afe081b6bf70f980fc709638e25089b155d5a17acf2becf78" exitCode=0 Mar 12 13:53:25 crc kubenswrapper[4125]: I0312 13:53:25.445410 4125 patch_prober.go:28] interesting pod/apiserver-69c565c9b6-vbdpd container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]log ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:53:25 crc kubenswrapper[4125]: [-]etcd-readiness failed: reason withheld Mar 12 13:53:25 crc kubenswrapper[4125]: [+]informer-sync ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-StartUserInformer ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-StartOAuthInformer ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Mar 12 13:53:25 crc kubenswrapper[4125]: [+]shutdown ok Mar 12 13:53:25 crc kubenswrapper[4125]: readyz check failed Mar 12 13:53:25 crc kubenswrapper[4125]: I0312 13:53:25.445666 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:53:25 crc kubenswrapper[4125]: I0312 13:53:25.446046 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:53:26 crc kubenswrapper[4125]: I0312 13:53:26.263715 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerStarted","Data":"0f28aa6121420d893fabbbe6739a89ecbba9850633f989908946a7740ac42bb9"} Mar 12 13:53:27 crc kubenswrapper[4125]: I0312 13:53:27.566435 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:53:27 crc kubenswrapper[4125]: I0312 13:53:27.929257 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lhkzs" podStartSLOduration=7.456328798 podStartE2EDuration="46.929167222s" podCreationTimestamp="2026-03-12 13:52:41 +0000 UTC" firstStartedPulling="2026-03-12 13:52:43.349866829 +0000 UTC m=+1933.673252598" lastFinishedPulling="2026-03-12 13:53:22.822705083 +0000 UTC m=+1973.146091022" observedRunningTime="2026-03-12 13:53:27.925135362 +0000 UTC m=+1978.248521531" watchObservedRunningTime="2026-03-12 13:53:27.929167222 +0000 UTC m=+1978.252553131" Mar 12 13:53:32 crc kubenswrapper[4125]: I0312 13:53:32.094761 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:53:32 crc kubenswrapper[4125]: I0312 13:53:32.095116 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:53:33 crc kubenswrapper[4125]: I0312 13:53:33.280389 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-lhkzs" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" probeResult="failure" output=< Mar 12 13:53:33 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:53:33 crc kubenswrapper[4125]: > Mar 12 13:53:36 crc kubenswrapper[4125]: I0312 13:53:36.177611 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:53:36 crc kubenswrapper[4125]: I0312 13:53:36.177763 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:53:36 crc kubenswrapper[4125]: I0312 13:53:36.177871 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:53:36 crc kubenswrapper[4125]: I0312 13:53:36.177964 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:53:36 crc kubenswrapper[4125]: I0312 13:53:36.178000 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:53:39 crc kubenswrapper[4125]: I0312 13:53:39.186208 4125 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:53:39 crc kubenswrapper[4125]: I0312 13:53:39.186374 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="6a57a7fb1944b43a6bd11a349520d301" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:53:39 crc kubenswrapper[4125]: I0312 13:53:39.191212 4125 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 12 13:53:39 crc kubenswrapper[4125]: I0312 13:53:39.191301 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="6a57a7fb1944b43a6bd11a349520d301" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 12 13:53:43 crc kubenswrapper[4125]: I0312 13:53:43.387605 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-lhkzs" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" probeResult="failure" output=< Mar 12 13:53:43 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:53:43 crc kubenswrapper[4125]: > Mar 12 13:53:50 crc kubenswrapper[4125]: I0312 13:53:50.613297 4125 generic.go:334] "Generic (PLEG): container finished" podID="516fe749-2e84-4f55-893f-0eae08d19f80" containerID="e91cfc6aaee95b21d35490b2818f0759c54b6f5d1cefdaf516b657b3d0ec19aa" exitCode=0 Mar 12 13:53:50 crc kubenswrapper[4125]: I0312 13:53:50.614021 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerDied","Data":"e91cfc6aaee95b21d35490b2818f0759c54b6f5d1cefdaf516b657b3d0ec19aa"} Mar 12 13:53:53 crc kubenswrapper[4125]: I0312 13:53:53.352089 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-lhkzs" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" probeResult="failure" output=< Mar 12 13:53:53 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:53:53 crc kubenswrapper[4125]: > Mar 12 13:53:53 crc kubenswrapper[4125]: I0312 13:53:53.653482 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerStarted","Data":"648cee218f36306ad14cea2950fb1ab88cef4f9ecaedb94b6b3f734215ed161e"} Mar 12 13:53:56 crc kubenswrapper[4125]: I0312 13:53:56.790519 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bm6vf" podStartSLOduration=7.646465067 podStartE2EDuration="44.790452438s" podCreationTimestamp="2026-03-12 13:53:12 +0000 UTC" firstStartedPulling="2026-03-12 13:53:14.119085803 +0000 UTC m=+1964.442471562" lastFinishedPulling="2026-03-12 13:53:51.263072994 +0000 UTC m=+2001.586458933" observedRunningTime="2026-03-12 13:53:56.787875942 +0000 UTC m=+2007.111261901" watchObservedRunningTime="2026-03-12 13:53:56.790452438 +0000 UTC m=+2007.113838467" Mar 12 13:54:02 crc kubenswrapper[4125]: I0312 13:54:02.877614 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:54:02 crc kubenswrapper[4125]: I0312 13:54:02.878089 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:54:03 crc kubenswrapper[4125]: I0312 13:54:03.256040 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-lhkzs" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" probeResult="failure" output=< Mar 12 13:54:03 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:54:03 crc kubenswrapper[4125]: > Mar 12 13:54:04 crc kubenswrapper[4125]: I0312 13:54:04.045603 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-bm6vf" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="registry-server" probeResult="failure" output=< Mar 12 13:54:04 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:54:04 crc kubenswrapper[4125]: > Mar 12 13:54:12 crc kubenswrapper[4125]: I0312 13:54:12.226313 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:54:12 crc kubenswrapper[4125]: I0312 13:54:12.360030 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:54:12 crc kubenswrapper[4125]: I0312 13:54:12.903712 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lhkzs"] Mar 12 13:54:13 crc kubenswrapper[4125]: I0312 13:54:13.021495 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:54:13 crc kubenswrapper[4125]: I0312 13:54:13.159550 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:54:13 crc kubenswrapper[4125]: I0312 13:54:13.820287 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lhkzs" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" containerID="cri-o://0f28aa6121420d893fabbbe6739a89ecbba9850633f989908946a7740ac42bb9" gracePeriod=2 Mar 12 13:54:14 crc kubenswrapper[4125]: I0312 13:54:14.837114 4125 generic.go:334] "Generic (PLEG): container finished" podID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerID="0f28aa6121420d893fabbbe6739a89ecbba9850633f989908946a7740ac42bb9" exitCode=0 Mar 12 13:54:14 crc kubenswrapper[4125]: I0312 13:54:14.837312 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerDied","Data":"0f28aa6121420d893fabbbe6739a89ecbba9850633f989908946a7740ac42bb9"} Mar 12 13:54:16 crc kubenswrapper[4125]: I0312 13:54:16.812740 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bm6vf"] Mar 12 13:54:16 crc kubenswrapper[4125]: I0312 13:54:16.813472 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bm6vf" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="registry-server" containerID="cri-o://648cee218f36306ad14cea2950fb1ab88cef4f9ecaedb94b6b3f734215ed161e" gracePeriod=2 Mar 12 13:54:17 crc kubenswrapper[4125]: I0312 13:54:17.889989 4125 generic.go:334] "Generic (PLEG): container finished" podID="516fe749-2e84-4f55-893f-0eae08d19f80" containerID="648cee218f36306ad14cea2950fb1ab88cef4f9ecaedb94b6b3f734215ed161e" exitCode=0 Mar 12 13:54:17 crc kubenswrapper[4125]: I0312 13:54:17.890125 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerDied","Data":"648cee218f36306ad14cea2950fb1ab88cef4f9ecaedb94b6b3f734215ed161e"} Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.022941 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.028994 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.140093 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-utilities\") pod \"516fe749-2e84-4f55-893f-0eae08d19f80\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.140172 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv8s5\" (UniqueName: \"kubernetes.io/projected/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-kube-api-access-jv8s5\") pod \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.140251 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-utilities\") pod \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.140293 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tbj9\" (UniqueName: \"kubernetes.io/projected/516fe749-2e84-4f55-893f-0eae08d19f80-kube-api-access-2tbj9\") pod \"516fe749-2e84-4f55-893f-0eae08d19f80\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.140338 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-catalog-content\") pod \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\" (UID: \"557b5f73-6b8e-42cc-87a8-b2b6ded176ef\") " Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.140379 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-catalog-content\") pod \"516fe749-2e84-4f55-893f-0eae08d19f80\" (UID: \"516fe749-2e84-4f55-893f-0eae08d19f80\") " Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.141972 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-utilities" (OuterVolumeSpecName: "utilities") pod "557b5f73-6b8e-42cc-87a8-b2b6ded176ef" (UID: "557b5f73-6b8e-42cc-87a8-b2b6ded176ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.145384 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-utilities" (OuterVolumeSpecName: "utilities") pod "516fe749-2e84-4f55-893f-0eae08d19f80" (UID: "516fe749-2e84-4f55-893f-0eae08d19f80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.177276 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-kube-api-access-jv8s5" (OuterVolumeSpecName: "kube-api-access-jv8s5") pod "557b5f73-6b8e-42cc-87a8-b2b6ded176ef" (UID: "557b5f73-6b8e-42cc-87a8-b2b6ded176ef"). InnerVolumeSpecName "kube-api-access-jv8s5". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.177656 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/516fe749-2e84-4f55-893f-0eae08d19f80-kube-api-access-2tbj9" (OuterVolumeSpecName: "kube-api-access-2tbj9") pod "516fe749-2e84-4f55-893f-0eae08d19f80" (UID: "516fe749-2e84-4f55-893f-0eae08d19f80"). InnerVolumeSpecName "kube-api-access-2tbj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.243585 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.243642 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2tbj9\" (UniqueName: \"kubernetes.io/projected/516fe749-2e84-4f55-893f-0eae08d19f80-kube-api-access-2tbj9\") on node \"crc\" DevicePath \"\"" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.243658 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.243669 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-jv8s5\" (UniqueName: \"kubernetes.io/projected/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-kube-api-access-jv8s5\") on node \"crc\" DevicePath \"\"" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.494251 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "516fe749-2e84-4f55-893f-0eae08d19f80" (UID: "516fe749-2e84-4f55-893f-0eae08d19f80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.549054 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/516fe749-2e84-4f55-893f-0eae08d19f80-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.745372 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "557b5f73-6b8e-42cc-87a8-b2b6ded176ef" (UID: "557b5f73-6b8e-42cc-87a8-b2b6ded176ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.751093 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/557b5f73-6b8e-42cc-87a8-b2b6ded176ef-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.920266 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bm6vf" event={"ID":"516fe749-2e84-4f55-893f-0eae08d19f80","Type":"ContainerDied","Data":"245a7cd63c58148fb247d84d7f3c130f64953916ee19293100ca5a0d96fc8ed5"} Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.920411 4125 scope.go:117] "RemoveContainer" containerID="648cee218f36306ad14cea2950fb1ab88cef4f9ecaedb94b6b3f734215ed161e" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.920572 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bm6vf" Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.979776 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhkzs" event={"ID":"557b5f73-6b8e-42cc-87a8-b2b6ded176ef","Type":"ContainerDied","Data":"10a9ed468e9b762f8e2b9e8bd6646d285447a0b90a7768485c4cf82bfbcd4302"} Mar 12 13:54:20 crc kubenswrapper[4125]: I0312 13:54:20.980021 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhkzs" Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.101528 4125 scope.go:117] "RemoveContainer" containerID="e91cfc6aaee95b21d35490b2818f0759c54b6f5d1cefdaf516b657b3d0ec19aa" Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.197100 4125 scope.go:117] "RemoveContainer" containerID="fc98f95a37ddcf11343bdcd8300587daa73bb8c75d508de2408b43ce890b711e" Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.257944 4125 scope.go:117] "RemoveContainer" containerID="0f28aa6121420d893fabbbe6739a89ecbba9850633f989908946a7740ac42bb9" Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.321315 4125 scope.go:117] "RemoveContainer" containerID="8902a80dc6c1b44afe081b6bf70f980fc709638e25089b155d5a17acf2becf78" Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.423276 4125 scope.go:117] "RemoveContainer" containerID="1cfa14150a7769ea4031b0066ce21c43a2c9e8f49ecf96890443a01fb0d5a5a1" Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.612409 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lhkzs"] Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.695748 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lhkzs"] Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.773737 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bm6vf"] Mar 12 13:54:21 crc kubenswrapper[4125]: I0312 13:54:21.842735 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bm6vf"] Mar 12 13:54:22 crc kubenswrapper[4125]: I0312 13:54:22.039888 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" path="/var/lib/kubelet/pods/516fe749-2e84-4f55-893f-0eae08d19f80/volumes" Mar 12 13:54:22 crc kubenswrapper[4125]: I0312 13:54:22.040761 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" path="/var/lib/kubelet/pods/557b5f73-6b8e-42cc-87a8-b2b6ded176ef/volumes" Mar 12 13:54:36 crc kubenswrapper[4125]: I0312 13:54:36.178466 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:54:36 crc kubenswrapper[4125]: I0312 13:54:36.179313 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:54:36 crc kubenswrapper[4125]: I0312 13:54:36.179391 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:54:36 crc kubenswrapper[4125]: I0312 13:54:36.179429 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:54:36 crc kubenswrapper[4125]: I0312 13:54:36.179477 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:54:45 crc kubenswrapper[4125]: I0312 13:54:45.266139 4125 generic.go:334] "Generic (PLEG): container finished" podID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerID="d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46" exitCode=0 Mar 12 13:54:45 crc kubenswrapper[4125]: I0312 13:54:45.266264 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerDied","Data":"d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46"} Mar 12 13:54:47 crc kubenswrapper[4125]: I0312 13:54:47.283156 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerStarted","Data":"2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8"} Mar 12 13:54:47 crc kubenswrapper[4125]: I0312 13:54:47.343096 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:54:47 crc kubenswrapper[4125]: I0312 13:54:47.343246 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:54:47 crc kubenswrapper[4125]: I0312 13:54:47.361254 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x9zfr" podStartSLOduration=4.373795898 podStartE2EDuration="1m51.361179613s" podCreationTimestamp="2026-03-12 13:52:56 +0000 UTC" firstStartedPulling="2026-03-12 13:52:58.886349114 +0000 UTC m=+1949.209734873" lastFinishedPulling="2026-03-12 13:54:45.873732539 +0000 UTC m=+2056.197118588" observedRunningTime="2026-03-12 13:54:47.357662945 +0000 UTC m=+2057.681048884" watchObservedRunningTime="2026-03-12 13:54:47.361179613 +0000 UTC m=+2057.684565512" Mar 12 13:54:48 crc kubenswrapper[4125]: I0312 13:54:48.451476 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x9zfr" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" probeResult="failure" output=< Mar 12 13:54:48 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:54:48 crc kubenswrapper[4125]: > Mar 12 13:54:58 crc kubenswrapper[4125]: I0312 13:54:58.459478 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x9zfr" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" probeResult="failure" output=< Mar 12 13:54:58 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:54:58 crc kubenswrapper[4125]: > Mar 12 13:55:08 crc kubenswrapper[4125]: I0312 13:55:08.494583 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x9zfr" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" probeResult="failure" output=< Mar 12 13:55:08 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:55:08 crc kubenswrapper[4125]: > Mar 12 13:55:18 crc kubenswrapper[4125]: I0312 13:55:18.462448 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x9zfr" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" probeResult="failure" output=< Mar 12 13:55:18 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 13:55:18 crc kubenswrapper[4125]: > Mar 12 13:55:20 crc kubenswrapper[4125]: I0312 13:55:20.803256 4125 patch_prober.go:28] interesting pod/apiserver-69c565c9b6-vbdpd container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]log ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]etcd ok Mar 12 13:55:20 crc kubenswrapper[4125]: [-]etcd-readiness failed: reason withheld Mar 12 13:55:20 crc kubenswrapper[4125]: [+]informer-sync ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]poststarthook/generic-apiserver-start-informers ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]poststarthook/max-in-flight-filter ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-StartUserInformer ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-StartOAuthInformer ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Mar 12 13:55:20 crc kubenswrapper[4125]: [+]shutdown ok Mar 12 13:55:20 crc kubenswrapper[4125]: readyz check failed Mar 12 13:55:20 crc kubenswrapper[4125]: I0312 13:55:20.803444 4125 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 12 13:55:20 crc kubenswrapper[4125]: I0312 13:55:20.826755 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:55:20 crc kubenswrapper[4125]: I0312 13:55:20.871065 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 12 13:55:27 crc kubenswrapper[4125]: I0312 13:55:27.528284 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:55:27 crc kubenswrapper[4125]: I0312 13:55:27.689412 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:55:27 crc kubenswrapper[4125]: I0312 13:55:27.825773 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x9zfr"] Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.106261 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x9zfr" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" containerID="cri-o://2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8" gracePeriod=2 Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.645749 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.805340 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-utilities\") pod \"f2644f3f-05ed-4871-9be0-9258d5745cf2\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.805489 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvlxl\" (UniqueName: \"kubernetes.io/projected/f2644f3f-05ed-4871-9be0-9258d5745cf2-kube-api-access-bvlxl\") pod \"f2644f3f-05ed-4871-9be0-9258d5745cf2\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.805556 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-catalog-content\") pod \"f2644f3f-05ed-4871-9be0-9258d5745cf2\" (UID: \"f2644f3f-05ed-4871-9be0-9258d5745cf2\") " Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.806553 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-utilities" (OuterVolumeSpecName: "utilities") pod "f2644f3f-05ed-4871-9be0-9258d5745cf2" (UID: "f2644f3f-05ed-4871-9be0-9258d5745cf2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.814653 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2644f3f-05ed-4871-9be0-9258d5745cf2-kube-api-access-bvlxl" (OuterVolumeSpecName: "kube-api-access-bvlxl") pod "f2644f3f-05ed-4871-9be0-9258d5745cf2" (UID: "f2644f3f-05ed-4871-9be0-9258d5745cf2"). InnerVolumeSpecName "kube-api-access-bvlxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.907514 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-bvlxl\" (UniqueName: \"kubernetes.io/projected/f2644f3f-05ed-4871-9be0-9258d5745cf2-kube-api-access-bvlxl\") on node \"crc\" DevicePath \"\"" Mar 12 13:55:29 crc kubenswrapper[4125]: I0312 13:55:29.907585 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.105469 4125 generic.go:334] "Generic (PLEG): container finished" podID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerID="2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8" exitCode=0 Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.105544 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x9zfr" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.105563 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerDied","Data":"2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8"} Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.106902 4125 scope.go:117] "RemoveContainer" containerID="2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.107087 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x9zfr" event={"ID":"f2644f3f-05ed-4871-9be0-9258d5745cf2","Type":"ContainerDied","Data":"f86863f8c9470b72e10e441f60dc49d487d93e0c008dd712c07014aae3c6a61a"} Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.163155 4125 scope.go:117] "RemoveContainer" containerID="d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.287045 4125 scope.go:117] "RemoveContainer" containerID="ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.333025 4125 scope.go:117] "RemoveContainer" containerID="2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8" Mar 12 13:55:30 crc kubenswrapper[4125]: E0312 13:55:30.337533 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8\": container with ID starting with 2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8 not found: ID does not exist" containerID="2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.337623 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8"} err="failed to get container status \"2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8\": rpc error: code = NotFound desc = could not find container \"2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8\": container with ID starting with 2382cfae294028afe2d7255aedbc3710fe3512cf6554af430a1d97be6f07cec8 not found: ID does not exist" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.337651 4125 scope.go:117] "RemoveContainer" containerID="d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46" Mar 12 13:55:30 crc kubenswrapper[4125]: E0312 13:55:30.338909 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46\": container with ID starting with d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46 not found: ID does not exist" containerID="d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.338951 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46"} err="failed to get container status \"d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46\": rpc error: code = NotFound desc = could not find container \"d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46\": container with ID starting with d26baa54e83c4a2d6a75f9121ebd102d1727ab80adfa9635ca887cfd62d94b46 not found: ID does not exist" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.338966 4125 scope.go:117] "RemoveContainer" containerID="ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7" Mar 12 13:55:30 crc kubenswrapper[4125]: E0312 13:55:30.339386 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7\": container with ID starting with ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7 not found: ID does not exist" containerID="ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7" Mar 12 13:55:30 crc kubenswrapper[4125]: I0312 13:55:30.339457 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7"} err="failed to get container status \"ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7\": rpc error: code = NotFound desc = could not find container \"ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7\": container with ID starting with ae0fc416c2d1bf15e9159770156ee6f0410539a28fa677ac60e7932b1f52e3b7 not found: ID does not exist" Mar 12 13:55:31 crc kubenswrapper[4125]: I0312 13:55:31.118730 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f2644f3f-05ed-4871-9be0-9258d5745cf2" (UID: "f2644f3f-05ed-4871-9be0-9258d5745cf2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 13:55:31 crc kubenswrapper[4125]: I0312 13:55:31.127962 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2644f3f-05ed-4871-9be0-9258d5745cf2-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 13:55:31 crc kubenswrapper[4125]: I0312 13:55:31.487931 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x9zfr"] Mar 12 13:55:31 crc kubenswrapper[4125]: I0312 13:55:31.509520 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x9zfr"] Mar 12 13:55:32 crc kubenswrapper[4125]: I0312 13:55:32.036429 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" path="/var/lib/kubelet/pods/f2644f3f-05ed-4871-9be0-9258d5745cf2/volumes" Mar 12 13:55:36 crc kubenswrapper[4125]: I0312 13:55:36.180089 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:55:36 crc kubenswrapper[4125]: I0312 13:55:36.180423 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:55:36 crc kubenswrapper[4125]: I0312 13:55:36.180488 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:55:36 crc kubenswrapper[4125]: I0312 13:55:36.180523 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:55:36 crc kubenswrapper[4125]: I0312 13:55:36.180558 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:56:36 crc kubenswrapper[4125]: I0312 13:56:36.515727 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:56:36 crc kubenswrapper[4125]: I0312 13:56:36.517617 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:56:36 crc kubenswrapper[4125]: I0312 13:56:36.517742 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:56:36 crc kubenswrapper[4125]: I0312 13:56:36.517913 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:56:36 crc kubenswrapper[4125]: I0312 13:56:36.518067 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:57:36 crc kubenswrapper[4125]: I0312 13:57:36.532766 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:57:36 crc kubenswrapper[4125]: I0312 13:57:36.533720 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:57:36 crc kubenswrapper[4125]: I0312 13:57:36.533782 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:57:36 crc kubenswrapper[4125]: I0312 13:57:36.534045 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:57:36 crc kubenswrapper[4125]: I0312 13:57:36.534260 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:58:36 crc kubenswrapper[4125]: I0312 13:58:36.534914 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:58:36 crc kubenswrapper[4125]: I0312 13:58:36.535454 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:58:36 crc kubenswrapper[4125]: I0312 13:58:36.535504 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:58:36 crc kubenswrapper[4125]: I0312 13:58:36.535546 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:58:36 crc kubenswrapper[4125]: I0312 13:58:36.535565 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 13:59:36 crc kubenswrapper[4125]: I0312 13:59:36.536650 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 13:59:36 crc kubenswrapper[4125]: I0312 13:59:36.541032 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 13:59:36 crc kubenswrapper[4125]: I0312 13:59:36.541444 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 13:59:36 crc kubenswrapper[4125]: I0312 13:59:36.541790 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 13:59:36 crc kubenswrapper[4125]: I0312 13:59:36.542208 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.404158 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp"] Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.411744 4125 topology_manager.go:215] "Topology Admit Handler" podUID="079b7b69-b036-48d0-ab94-f3d1e03777f9" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.412733 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="extract-content" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.413707 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="extract-content" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.413743 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="extract-utilities" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.413755 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="extract-utilities" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.413773 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.413784 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.413951 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.413974 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.413991 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="extract-content" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414001 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="extract-content" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.414025 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414036 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.414054 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="extract-utilities" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414064 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="extract-utilities" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.414086 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="extract-utilities" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414096 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="extract-utilities" Mar 12 14:00:00 crc kubenswrapper[4125]: E0312 14:00:00.414117 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="extract-content" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414131 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="extract-content" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414420 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="516fe749-2e84-4f55-893f-0eae08d19f80" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414449 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2644f3f-05ed-4871-9be0-9258d5745cf2" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.414470 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="557b5f73-6b8e-42cc-87a8-b2b6ded176ef" containerName="registry-server" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.416345 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.429598 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.429664 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.447565 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp"] Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.451951 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/079b7b69-b036-48d0-ab94-f3d1e03777f9-config-volume\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.452355 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s6qk\" (UniqueName: \"kubernetes.io/projected/079b7b69-b036-48d0-ab94-f3d1e03777f9-kube-api-access-9s6qk\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.452402 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/079b7b69-b036-48d0-ab94-f3d1e03777f9-secret-volume\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.554141 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/079b7b69-b036-48d0-ab94-f3d1e03777f9-config-volume\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.554212 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9s6qk\" (UniqueName: \"kubernetes.io/projected/079b7b69-b036-48d0-ab94-f3d1e03777f9-kube-api-access-9s6qk\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.554269 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/079b7b69-b036-48d0-ab94-f3d1e03777f9-secret-volume\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.555988 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/079b7b69-b036-48d0-ab94-f3d1e03777f9-config-volume\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.570740 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/079b7b69-b036-48d0-ab94-f3d1e03777f9-secret-volume\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.574984 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s6qk\" (UniqueName: \"kubernetes.io/projected/079b7b69-b036-48d0-ab94-f3d1e03777f9-kube-api-access-9s6qk\") pod \"collect-profiles-29555400-kb5zp\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:00 crc kubenswrapper[4125]: I0312 14:00:00.817588 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:01 crc kubenswrapper[4125]: I0312 14:00:01.348635 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp"] Mar 12 14:00:02 crc kubenswrapper[4125]: I0312 14:00:02.183691 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" event={"ID":"079b7b69-b036-48d0-ab94-f3d1e03777f9","Type":"ContainerStarted","Data":"8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde"} Mar 12 14:00:02 crc kubenswrapper[4125]: I0312 14:00:02.184144 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" event={"ID":"079b7b69-b036-48d0-ab94-f3d1e03777f9","Type":"ContainerStarted","Data":"a713a9660bc38d16f850a9c80885d465dd76b38a4d27df2369b46cf1051048cd"} Mar 12 14:00:03 crc kubenswrapper[4125]: I0312 14:00:03.208423 4125 generic.go:334] "Generic (PLEG): container finished" podID="079b7b69-b036-48d0-ab94-f3d1e03777f9" containerID="8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde" exitCode=0 Mar 12 14:00:03 crc kubenswrapper[4125]: I0312 14:00:03.208517 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" event={"ID":"079b7b69-b036-48d0-ab94-f3d1e03777f9","Type":"ContainerDied","Data":"8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde"} Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.597374 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.729048 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/079b7b69-b036-48d0-ab94-f3d1e03777f9-secret-volume\") pod \"079b7b69-b036-48d0-ab94-f3d1e03777f9\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.729150 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/079b7b69-b036-48d0-ab94-f3d1e03777f9-config-volume\") pod \"079b7b69-b036-48d0-ab94-f3d1e03777f9\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.729202 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9s6qk\" (UniqueName: \"kubernetes.io/projected/079b7b69-b036-48d0-ab94-f3d1e03777f9-kube-api-access-9s6qk\") pod \"079b7b69-b036-48d0-ab94-f3d1e03777f9\" (UID: \"079b7b69-b036-48d0-ab94-f3d1e03777f9\") " Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.730132 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/079b7b69-b036-48d0-ab94-f3d1e03777f9-config-volume" (OuterVolumeSpecName: "config-volume") pod "079b7b69-b036-48d0-ab94-f3d1e03777f9" (UID: "079b7b69-b036-48d0-ab94-f3d1e03777f9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.730741 4125 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/079b7b69-b036-48d0-ab94-f3d1e03777f9-config-volume\") on node \"crc\" DevicePath \"\"" Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.738132 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/079b7b69-b036-48d0-ab94-f3d1e03777f9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "079b7b69-b036-48d0-ab94-f3d1e03777f9" (UID: "079b7b69-b036-48d0-ab94-f3d1e03777f9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.740365 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/079b7b69-b036-48d0-ab94-f3d1e03777f9-kube-api-access-9s6qk" (OuterVolumeSpecName: "kube-api-access-9s6qk") pod "079b7b69-b036-48d0-ab94-f3d1e03777f9" (UID: "079b7b69-b036-48d0-ab94-f3d1e03777f9"). InnerVolumeSpecName "kube-api-access-9s6qk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.831591 4125 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/079b7b69-b036-48d0-ab94-f3d1e03777f9-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 12 14:00:04 crc kubenswrapper[4125]: I0312 14:00:04.831657 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-9s6qk\" (UniqueName: \"kubernetes.io/projected/079b7b69-b036-48d0-ab94-f3d1e03777f9-kube-api-access-9s6qk\") on node \"crc\" DevicePath \"\"" Mar 12 14:00:05 crc kubenswrapper[4125]: I0312 14:00:05.232204 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 12 14:00:05 crc kubenswrapper[4125]: I0312 14:00:05.232255 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" event={"ID":"079b7b69-b036-48d0-ab94-f3d1e03777f9","Type":"ContainerDied","Data":"a713a9660bc38d16f850a9c80885d465dd76b38a4d27df2369b46cf1051048cd"} Mar 12 14:00:05 crc kubenswrapper[4125]: I0312 14:00:05.236189 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a713a9660bc38d16f850a9c80885d465dd76b38a4d27df2369b46cf1051048cd" Mar 12 14:00:36 crc kubenswrapper[4125]: I0312 14:00:36.543070 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:00:36 crc kubenswrapper[4125]: I0312 14:00:36.544105 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:00:36 crc kubenswrapper[4125]: I0312 14:00:36.544163 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:00:36 crc kubenswrapper[4125]: I0312 14:00:36.544313 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:00:36 crc kubenswrapper[4125]: I0312 14:00:36.544391 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:01:36 crc kubenswrapper[4125]: I0312 14:01:36.545645 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:01:36 crc kubenswrapper[4125]: I0312 14:01:36.546524 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:01:36 crc kubenswrapper[4125]: I0312 14:01:36.546639 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:01:36 crc kubenswrapper[4125]: I0312 14:01:36.546685 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:01:36 crc kubenswrapper[4125]: I0312 14:01:36.546743 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:02:36 crc kubenswrapper[4125]: I0312 14:02:36.902297 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:02:36 crc kubenswrapper[4125]: I0312 14:02:36.902910 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:02:36 crc kubenswrapper[4125]: I0312 14:02:36.902973 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:02:36 crc kubenswrapper[4125]: I0312 14:02:36.903009 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:02:36 crc kubenswrapper[4125]: I0312 14:02:36.903133 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.818335 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kfmr8"] Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.818890 4125 topology_manager.go:215] "Topology Admit Handler" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" podNamespace="openshift-marketplace" podName="redhat-marketplace-kfmr8" Mar 12 14:02:37 crc kubenswrapper[4125]: E0312 14:02:37.819298 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="079b7b69-b036-48d0-ab94-f3d1e03777f9" containerName="collect-profiles" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.819341 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="079b7b69-b036-48d0-ab94-f3d1e03777f9" containerName="collect-profiles" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.819578 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="079b7b69-b036-48d0-ab94-f3d1e03777f9" containerName="collect-profiles" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.820964 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.892412 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfmr8"] Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.915727 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-catalog-content\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.916005 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-utilities\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:37 crc kubenswrapper[4125]: I0312 14:02:37.916078 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlhlg\" (UniqueName: \"kubernetes.io/projected/0807ff23-2898-4c16-b19e-e9eec794a426-kube-api-access-dlhlg\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.018599 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-catalog-content\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.018717 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-catalog-content\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.018790 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-utilities\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.019458 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-utilities\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.019729 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dlhlg\" (UniqueName: \"kubernetes.io/projected/0807ff23-2898-4c16-b19e-e9eec794a426-kube-api-access-dlhlg\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.063387 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlhlg\" (UniqueName: \"kubernetes.io/projected/0807ff23-2898-4c16-b19e-e9eec794a426-kube-api-access-dlhlg\") pod \"redhat-marketplace-kfmr8\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.149522 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.525617 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfmr8"] Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.941054 4125 generic.go:334] "Generic (PLEG): container finished" podID="0807ff23-2898-4c16-b19e-e9eec794a426" containerID="7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc" exitCode=0 Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.941104 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerDied","Data":"7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc"} Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.941129 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerStarted","Data":"9a875b0c6e7b4aaa1230ca85fb41d78993fdacb42e30155f4af4154564acf076"} Mar 12 14:02:38 crc kubenswrapper[4125]: I0312 14:02:38.950364 4125 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 12 14:02:39 crc kubenswrapper[4125]: I0312 14:02:39.952272 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerStarted","Data":"2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2"} Mar 12 14:02:45 crc kubenswrapper[4125]: I0312 14:02:45.005942 4125 generic.go:334] "Generic (PLEG): container finished" podID="0807ff23-2898-4c16-b19e-e9eec794a426" containerID="2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2" exitCode=0 Mar 12 14:02:45 crc kubenswrapper[4125]: I0312 14:02:45.006216 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerDied","Data":"2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2"} Mar 12 14:02:47 crc kubenswrapper[4125]: I0312 14:02:47.021996 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerStarted","Data":"f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc"} Mar 12 14:02:47 crc kubenswrapper[4125]: I0312 14:02:47.084947 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kfmr8" podStartSLOduration=2.678672536 podStartE2EDuration="10.084855941s" podCreationTimestamp="2026-03-12 14:02:37 +0000 UTC" firstStartedPulling="2026-03-12 14:02:38.943459377 +0000 UTC m=+2529.266845236" lastFinishedPulling="2026-03-12 14:02:46.349642882 +0000 UTC m=+2536.673028641" observedRunningTime="2026-03-12 14:02:47.077199564 +0000 UTC m=+2537.400585473" watchObservedRunningTime="2026-03-12 14:02:47.084855941 +0000 UTC m=+2537.408241880" Mar 12 14:02:48 crc kubenswrapper[4125]: I0312 14:02:48.150455 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:48 crc kubenswrapper[4125]: I0312 14:02:48.151138 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:48 crc kubenswrapper[4125]: I0312 14:02:48.263008 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:58 crc kubenswrapper[4125]: I0312 14:02:58.298626 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:58 crc kubenswrapper[4125]: I0312 14:02:58.377631 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfmr8"] Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.150546 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kfmr8" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="registry-server" containerID="cri-o://f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc" gracePeriod=2 Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.622909 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.774673 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-utilities\") pod \"0807ff23-2898-4c16-b19e-e9eec794a426\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.774908 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlhlg\" (UniqueName: \"kubernetes.io/projected/0807ff23-2898-4c16-b19e-e9eec794a426-kube-api-access-dlhlg\") pod \"0807ff23-2898-4c16-b19e-e9eec794a426\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.775000 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-catalog-content\") pod \"0807ff23-2898-4c16-b19e-e9eec794a426\" (UID: \"0807ff23-2898-4c16-b19e-e9eec794a426\") " Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.775626 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-utilities" (OuterVolumeSpecName: "utilities") pod "0807ff23-2898-4c16-b19e-e9eec794a426" (UID: "0807ff23-2898-4c16-b19e-e9eec794a426"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.798378 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0807ff23-2898-4c16-b19e-e9eec794a426-kube-api-access-dlhlg" (OuterVolumeSpecName: "kube-api-access-dlhlg") pod "0807ff23-2898-4c16-b19e-e9eec794a426" (UID: "0807ff23-2898-4c16-b19e-e9eec794a426"). InnerVolumeSpecName "kube-api-access-dlhlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.876654 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:02:59 crc kubenswrapper[4125]: I0312 14:02:59.876790 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-dlhlg\" (UniqueName: \"kubernetes.io/projected/0807ff23-2898-4c16-b19e-e9eec794a426-kube-api-access-dlhlg\") on node \"crc\" DevicePath \"\"" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.074088 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0807ff23-2898-4c16-b19e-e9eec794a426" (UID: "0807ff23-2898-4c16-b19e-e9eec794a426"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.080162 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0807ff23-2898-4c16-b19e-e9eec794a426-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.172534 4125 generic.go:334] "Generic (PLEG): container finished" podID="0807ff23-2898-4c16-b19e-e9eec794a426" containerID="f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc" exitCode=0 Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.172699 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerDied","Data":"f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc"} Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.172760 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfmr8" event={"ID":"0807ff23-2898-4c16-b19e-e9eec794a426","Type":"ContainerDied","Data":"9a875b0c6e7b4aaa1230ca85fb41d78993fdacb42e30155f4af4154564acf076"} Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.172801 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfmr8" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.172960 4125 scope.go:117] "RemoveContainer" containerID="f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.267276 4125 scope.go:117] "RemoveContainer" containerID="2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.270956 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfmr8"] Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.276269 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfmr8"] Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.318474 4125 scope.go:117] "RemoveContainer" containerID="7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.353143 4125 scope.go:117] "RemoveContainer" containerID="f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc" Mar 12 14:03:00 crc kubenswrapper[4125]: E0312 14:03:00.353944 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc\": container with ID starting with f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc not found: ID does not exist" containerID="f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.354012 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc"} err="failed to get container status \"f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc\": rpc error: code = NotFound desc = could not find container \"f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc\": container with ID starting with f7210e638a47397cd8e876f3fc597afb591ab297277440f96c58ebf4bbb99cdc not found: ID does not exist" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.354027 4125 scope.go:117] "RemoveContainer" containerID="2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2" Mar 12 14:03:00 crc kubenswrapper[4125]: E0312 14:03:00.354741 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2\": container with ID starting with 2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2 not found: ID does not exist" containerID="2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.354958 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2"} err="failed to get container status \"2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2\": rpc error: code = NotFound desc = could not find container \"2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2\": container with ID starting with 2e678d45fa257b7f0d297a2363e01e885543144bc6f72e69486be662411242b2 not found: ID does not exist" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.354990 4125 scope.go:117] "RemoveContainer" containerID="7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc" Mar 12 14:03:00 crc kubenswrapper[4125]: E0312 14:03:00.355726 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc\": container with ID starting with 7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc not found: ID does not exist" containerID="7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc" Mar 12 14:03:00 crc kubenswrapper[4125]: I0312 14:03:00.355802 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc"} err="failed to get container status \"7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc\": rpc error: code = NotFound desc = could not find container \"7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc\": container with ID starting with 7250af40c0f5b757d274b555f0c562be6308df1fca9ec5f34e9aec0ed03f4bbc not found: ID does not exist" Mar 12 14:03:02 crc kubenswrapper[4125]: I0312 14:03:02.039721 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" path="/var/lib/kubelet/pods/0807ff23-2898-4c16-b19e-e9eec794a426/volumes" Mar 12 14:03:36 crc kubenswrapper[4125]: I0312 14:03:36.903992 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:03:36 crc kubenswrapper[4125]: I0312 14:03:36.904675 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:03:36 crc kubenswrapper[4125]: I0312 14:03:36.904757 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:03:36 crc kubenswrapper[4125]: I0312 14:03:36.904911 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:03:36 crc kubenswrapper[4125]: I0312 14:03:36.904940 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.443068 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wtznz"] Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.443932 4125 topology_manager.go:215] "Topology Admit Handler" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" podNamespace="openshift-marketplace" podName="community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: E0312 14:04:00.444284 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="extract-utilities" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.444314 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="extract-utilities" Mar 12 14:04:00 crc kubenswrapper[4125]: E0312 14:04:00.444331 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="extract-content" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.444341 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="extract-content" Mar 12 14:04:00 crc kubenswrapper[4125]: E0312 14:04:00.444371 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="registry-server" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.444381 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="registry-server" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.444787 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="0807ff23-2898-4c16-b19e-e9eec794a426" containerName="registry-server" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.446431 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.485205 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wtznz"] Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.561668 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-utilities\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.562004 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-catalog-content\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.562104 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccvnc\" (UniqueName: \"kubernetes.io/projected/1f70065c-ee7b-41cd-9492-016f43a8905f-kube-api-access-ccvnc\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.663517 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-utilities\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.663608 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-catalog-content\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.663653 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ccvnc\" (UniqueName: \"kubernetes.io/projected/1f70065c-ee7b-41cd-9492-016f43a8905f-kube-api-access-ccvnc\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.664732 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-utilities\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.665115 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-catalog-content\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.692054 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccvnc\" (UniqueName: \"kubernetes.io/projected/1f70065c-ee7b-41cd-9492-016f43a8905f-kube-api-access-ccvnc\") pod \"community-operators-wtznz\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:00 crc kubenswrapper[4125]: I0312 14:04:00.782366 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:01 crc kubenswrapper[4125]: I0312 14:04:01.167571 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wtznz"] Mar 12 14:04:01 crc kubenswrapper[4125]: I0312 14:04:01.910114 4125 generic.go:334] "Generic (PLEG): container finished" podID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerID="f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9" exitCode=0 Mar 12 14:04:01 crc kubenswrapper[4125]: I0312 14:04:01.911495 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerDied","Data":"f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9"} Mar 12 14:04:01 crc kubenswrapper[4125]: I0312 14:04:01.911674 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerStarted","Data":"90b0539fc7a7395f7f30ef6df4b22ee7643c631a9280c58520f5dbd5f58f971f"} Mar 12 14:04:02 crc kubenswrapper[4125]: I0312 14:04:02.922316 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerStarted","Data":"e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298"} Mar 12 14:04:16 crc kubenswrapper[4125]: I0312 14:04:16.031031 4125 generic.go:334] "Generic (PLEG): container finished" podID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerID="e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298" exitCode=0 Mar 12 14:04:16 crc kubenswrapper[4125]: I0312 14:04:16.037201 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerDied","Data":"e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298"} Mar 12 14:04:18 crc kubenswrapper[4125]: I0312 14:04:18.056475 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerStarted","Data":"9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244"} Mar 12 14:04:18 crc kubenswrapper[4125]: I0312 14:04:18.111964 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wtznz" podStartSLOduration=3.606780517 podStartE2EDuration="18.111682238s" podCreationTimestamp="2026-03-12 14:04:00 +0000 UTC" firstStartedPulling="2026-03-12 14:04:01.913638008 +0000 UTC m=+2612.237023767" lastFinishedPulling="2026-03-12 14:04:16.418539609 +0000 UTC m=+2626.741925488" observedRunningTime="2026-03-12 14:04:18.104787705 +0000 UTC m=+2628.428174404" watchObservedRunningTime="2026-03-12 14:04:18.111682238 +0000 UTC m=+2628.435068597" Mar 12 14:04:20 crc kubenswrapper[4125]: I0312 14:04:20.784083 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:20 crc kubenswrapper[4125]: I0312 14:04:20.784678 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:22 crc kubenswrapper[4125]: I0312 14:04:22.042317 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wtznz" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="registry-server" probeResult="failure" output=< Mar 12 14:04:22 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:04:22 crc kubenswrapper[4125]: > Mar 12 14:04:31 crc kubenswrapper[4125]: I0312 14:04:31.003792 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:31 crc kubenswrapper[4125]: I0312 14:04:31.150338 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:31 crc kubenswrapper[4125]: I0312 14:04:31.243330 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wtznz"] Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.173659 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wtznz" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="registry-server" containerID="cri-o://9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244" gracePeriod=2 Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.745409 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.810666 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccvnc\" (UniqueName: \"kubernetes.io/projected/1f70065c-ee7b-41cd-9492-016f43a8905f-kube-api-access-ccvnc\") pod \"1f70065c-ee7b-41cd-9492-016f43a8905f\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.811181 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-catalog-content\") pod \"1f70065c-ee7b-41cd-9492-016f43a8905f\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.817156 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-utilities" (OuterVolumeSpecName: "utilities") pod "1f70065c-ee7b-41cd-9492-016f43a8905f" (UID: "1f70065c-ee7b-41cd-9492-016f43a8905f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.817307 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-utilities\") pod \"1f70065c-ee7b-41cd-9492-016f43a8905f\" (UID: \"1f70065c-ee7b-41cd-9492-016f43a8905f\") " Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.817682 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.833080 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f70065c-ee7b-41cd-9492-016f43a8905f-kube-api-access-ccvnc" (OuterVolumeSpecName: "kube-api-access-ccvnc") pod "1f70065c-ee7b-41cd-9492-016f43a8905f" (UID: "1f70065c-ee7b-41cd-9492-016f43a8905f"). InnerVolumeSpecName "kube-api-access-ccvnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:04:32 crc kubenswrapper[4125]: I0312 14:04:32.918976 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-ccvnc\" (UniqueName: \"kubernetes.io/projected/1f70065c-ee7b-41cd-9492-016f43a8905f-kube-api-access-ccvnc\") on node \"crc\" DevicePath \"\"" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.198010 4125 generic.go:334] "Generic (PLEG): container finished" podID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerID="9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244" exitCode=0 Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.198080 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wtznz" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.198090 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerDied","Data":"9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244"} Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.198151 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wtznz" event={"ID":"1f70065c-ee7b-41cd-9492-016f43a8905f","Type":"ContainerDied","Data":"90b0539fc7a7395f7f30ef6df4b22ee7643c631a9280c58520f5dbd5f58f971f"} Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.198404 4125 scope.go:117] "RemoveContainer" containerID="9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.306159 4125 scope.go:117] "RemoveContainer" containerID="e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.463020 4125 scope.go:117] "RemoveContainer" containerID="f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.524474 4125 scope.go:117] "RemoveContainer" containerID="9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244" Mar 12 14:04:33 crc kubenswrapper[4125]: E0312 14:04:33.525623 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244\": container with ID starting with 9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244 not found: ID does not exist" containerID="9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.525908 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244"} err="failed to get container status \"9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244\": rpc error: code = NotFound desc = could not find container \"9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244\": container with ID starting with 9ec47683a711b25e7e7f1059ee48f98b060ae962729bebeb80f70f7f11b92244 not found: ID does not exist" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.526038 4125 scope.go:117] "RemoveContainer" containerID="e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298" Mar 12 14:04:33 crc kubenswrapper[4125]: E0312 14:04:33.527240 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298\": container with ID starting with e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298 not found: ID does not exist" containerID="e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.527350 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298"} err="failed to get container status \"e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298\": rpc error: code = NotFound desc = could not find container \"e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298\": container with ID starting with e76c74f0584c3390161116482b81bfd1eaa987a5782025341f0cbcd834322298 not found: ID does not exist" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.527399 4125 scope.go:117] "RemoveContainer" containerID="f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9" Mar 12 14:04:33 crc kubenswrapper[4125]: E0312 14:04:33.527904 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9\": container with ID starting with f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9 not found: ID does not exist" containerID="f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.527976 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9"} err="failed to get container status \"f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9\": rpc error: code = NotFound desc = could not find container \"f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9\": container with ID starting with f2e41427ae08db1fb50994f9d52b1a1dd80b55d54c310349b327ace64549eeb9 not found: ID does not exist" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.609368 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f70065c-ee7b-41cd-9492-016f43a8905f" (UID: "1f70065c-ee7b-41cd-9492-016f43a8905f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.630059 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f70065c-ee7b-41cd-9492-016f43a8905f-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.869799 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wtznz"] Mar 12 14:04:33 crc kubenswrapper[4125]: I0312 14:04:33.894270 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wtznz"] Mar 12 14:04:34 crc kubenswrapper[4125]: I0312 14:04:34.033898 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" path="/var/lib/kubelet/pods/1f70065c-ee7b-41cd-9492-016f43a8905f/volumes" Mar 12 14:04:36 crc kubenswrapper[4125]: I0312 14:04:36.906469 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:04:36 crc kubenswrapper[4125]: I0312 14:04:36.909067 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:04:36 crc kubenswrapper[4125]: I0312 14:04:36.909429 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:04:36 crc kubenswrapper[4125]: I0312 14:04:36.909724 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:04:36 crc kubenswrapper[4125]: I0312 14:04:36.910120 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.334949 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lmrsm"] Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.335775 4125 topology_manager.go:215] "Topology Admit Handler" podUID="c72c30d0-9608-4870-b248-cc194aad7427" podNamespace="openshift-marketplace" podName="redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: E0312 14:05:11.336158 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="registry-server" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.336181 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="registry-server" Mar 12 14:05:11 crc kubenswrapper[4125]: E0312 14:05:11.336207 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="extract-content" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.336217 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="extract-content" Mar 12 14:05:11 crc kubenswrapper[4125]: E0312 14:05:11.336236 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="extract-utilities" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.336249 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="extract-utilities" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.336494 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f70065c-ee7b-41cd-9492-016f43a8905f" containerName="registry-server" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.344125 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.376335 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lmrsm"] Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.428054 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnlt4\" (UniqueName: \"kubernetes.io/projected/c72c30d0-9608-4870-b248-cc194aad7427-kube-api-access-cnlt4\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.428190 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-utilities\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.428240 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-catalog-content\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.529923 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-cnlt4\" (UniqueName: \"kubernetes.io/projected/c72c30d0-9608-4870-b248-cc194aad7427-kube-api-access-cnlt4\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.530010 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-utilities\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.530041 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-catalog-content\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.530591 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-catalog-content\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.531374 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-utilities\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.556736 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnlt4\" (UniqueName: \"kubernetes.io/projected/c72c30d0-9608-4870-b248-cc194aad7427-kube-api-access-cnlt4\") pod \"redhat-operators-lmrsm\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:11 crc kubenswrapper[4125]: I0312 14:05:11.682185 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:05:12 crc kubenswrapper[4125]: I0312 14:05:12.068244 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lmrsm"] Mar 12 14:05:12 crc kubenswrapper[4125]: I0312 14:05:12.560486 4125 generic.go:334] "Generic (PLEG): container finished" podID="c72c30d0-9608-4870-b248-cc194aad7427" containerID="e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80" exitCode=0 Mar 12 14:05:12 crc kubenswrapper[4125]: I0312 14:05:12.560713 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerDied","Data":"e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80"} Mar 12 14:05:12 crc kubenswrapper[4125]: I0312 14:05:12.560740 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerStarted","Data":"dbe6b8cbb0d4146a0365430494e484c622117a00e671d9cd0391f4e14dc0d716"} Mar 12 14:05:13 crc kubenswrapper[4125]: I0312 14:05:13.570590 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerStarted","Data":"9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10"} Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.233872 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ksjx7"] Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.234454 4125 topology_manager.go:215] "Topology Admit Handler" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" podNamespace="openshift-marketplace" podName="certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.235599 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.267065 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksjx7"] Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.385707 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-catalog-content\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.385860 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-utilities\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.385905 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k58rw\" (UniqueName: \"kubernetes.io/projected/cf52c65e-5142-45c1-be45-050e3a06ab54-kube-api-access-k58rw\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.487531 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-utilities\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.487627 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-k58rw\" (UniqueName: \"kubernetes.io/projected/cf52c65e-5142-45c1-be45-050e3a06ab54-kube-api-access-k58rw\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.487700 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-catalog-content\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.488409 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-catalog-content\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.488765 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-utilities\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.529326 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-k58rw\" (UniqueName: \"kubernetes.io/projected/cf52c65e-5142-45c1-be45-050e3a06ab54-kube-api-access-k58rw\") pod \"certified-operators-ksjx7\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.557074 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:26 crc kubenswrapper[4125]: I0312 14:05:26.918742 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksjx7"] Mar 12 14:05:26 crc kubenswrapper[4125]: W0312 14:05:26.939152 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf52c65e_5142_45c1_be45_050e3a06ab54.slice/crio-855e1c6302f81472477ed4fdbcd5d1f93c7d2072205b6496a4f17c41b9ceddd8 WatchSource:0}: Error finding container 855e1c6302f81472477ed4fdbcd5d1f93c7d2072205b6496a4f17c41b9ceddd8: Status 404 returned error can't find the container with id 855e1c6302f81472477ed4fdbcd5d1f93c7d2072205b6496a4f17c41b9ceddd8 Mar 12 14:05:27 crc kubenswrapper[4125]: I0312 14:05:27.692652 4125 generic.go:334] "Generic (PLEG): container finished" podID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerID="d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3" exitCode=0 Mar 12 14:05:27 crc kubenswrapper[4125]: I0312 14:05:27.693059 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerDied","Data":"d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3"} Mar 12 14:05:27 crc kubenswrapper[4125]: I0312 14:05:27.693702 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerStarted","Data":"855e1c6302f81472477ed4fdbcd5d1f93c7d2072205b6496a4f17c41b9ceddd8"} Mar 12 14:05:29 crc kubenswrapper[4125]: I0312 14:05:29.712743 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerStarted","Data":"63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05"} Mar 12 14:05:36 crc kubenswrapper[4125]: I0312 14:05:36.919100 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:05:36 crc kubenswrapper[4125]: I0312 14:05:36.919660 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:05:36 crc kubenswrapper[4125]: I0312 14:05:36.919735 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:05:36 crc kubenswrapper[4125]: I0312 14:05:36.919911 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:05:36 crc kubenswrapper[4125]: I0312 14:05:36.919962 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:05:44 crc kubenswrapper[4125]: I0312 14:05:44.988982 4125 generic.go:334] "Generic (PLEG): container finished" podID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerID="63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05" exitCode=0 Mar 12 14:05:44 crc kubenswrapper[4125]: I0312 14:05:44.989032 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerDied","Data":"63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05"} Mar 12 14:05:46 crc kubenswrapper[4125]: I0312 14:05:45.999898 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerStarted","Data":"c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523"} Mar 12 14:05:46 crc kubenswrapper[4125]: I0312 14:05:46.040691 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ksjx7" podStartSLOduration=2.3427952420000002 podStartE2EDuration="20.040636732s" podCreationTimestamp="2026-03-12 14:05:26 +0000 UTC" firstStartedPulling="2026-03-12 14:05:27.701097423 +0000 UTC m=+2698.024483267" lastFinishedPulling="2026-03-12 14:05:45.398938778 +0000 UTC m=+2715.722324757" observedRunningTime="2026-03-12 14:05:46.040154518 +0000 UTC m=+2716.363540817" watchObservedRunningTime="2026-03-12 14:05:46.040636732 +0000 UTC m=+2716.364022631" Mar 12 14:05:46 crc kubenswrapper[4125]: I0312 14:05:46.557161 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:46 crc kubenswrapper[4125]: I0312 14:05:46.557318 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:47 crc kubenswrapper[4125]: I0312 14:05:47.697800 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-ksjx7" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="registry-server" probeResult="failure" output=< Mar 12 14:05:47 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:05:47 crc kubenswrapper[4125]: > Mar 12 14:05:55 crc kubenswrapper[4125]: I0312 14:05:55.058938 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerDied","Data":"9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10"} Mar 12 14:05:55 crc kubenswrapper[4125]: I0312 14:05:55.058919 4125 generic.go:334] "Generic (PLEG): container finished" podID="c72c30d0-9608-4870-b248-cc194aad7427" containerID="9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10" exitCode=0 Mar 12 14:05:56 crc kubenswrapper[4125]: I0312 14:05:56.716741 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:56 crc kubenswrapper[4125]: I0312 14:05:56.856587 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:57 crc kubenswrapper[4125]: I0312 14:05:57.084707 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerStarted","Data":"bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283"} Mar 12 14:05:57 crc kubenswrapper[4125]: I0312 14:05:57.798353 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lmrsm" podStartSLOduration=3.858737365 podStartE2EDuration="46.798299649s" podCreationTimestamp="2026-03-12 14:05:11 +0000 UTC" firstStartedPulling="2026-03-12 14:05:12.562496667 +0000 UTC m=+2682.885882436" lastFinishedPulling="2026-03-12 14:05:55.502058691 +0000 UTC m=+2725.825444720" observedRunningTime="2026-03-12 14:05:57.793671427 +0000 UTC m=+2728.117057346" watchObservedRunningTime="2026-03-12 14:05:57.798299649 +0000 UTC m=+2728.121685758" Mar 12 14:05:57 crc kubenswrapper[4125]: I0312 14:05:57.945147 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksjx7"] Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.090960 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ksjx7" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="registry-server" containerID="cri-o://c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523" gracePeriod=2 Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.636292 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.790171 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-utilities\") pod \"cf52c65e-5142-45c1-be45-050e3a06ab54\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.790307 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-catalog-content\") pod \"cf52c65e-5142-45c1-be45-050e3a06ab54\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.790364 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k58rw\" (UniqueName: \"kubernetes.io/projected/cf52c65e-5142-45c1-be45-050e3a06ab54-kube-api-access-k58rw\") pod \"cf52c65e-5142-45c1-be45-050e3a06ab54\" (UID: \"cf52c65e-5142-45c1-be45-050e3a06ab54\") " Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.792651 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-utilities" (OuterVolumeSpecName: "utilities") pod "cf52c65e-5142-45c1-be45-050e3a06ab54" (UID: "cf52c65e-5142-45c1-be45-050e3a06ab54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.798213 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf52c65e-5142-45c1-be45-050e3a06ab54-kube-api-access-k58rw" (OuterVolumeSpecName: "kube-api-access-k58rw") pod "cf52c65e-5142-45c1-be45-050e3a06ab54" (UID: "cf52c65e-5142-45c1-be45-050e3a06ab54"). InnerVolumeSpecName "kube-api-access-k58rw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.891414 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-k58rw\" (UniqueName: \"kubernetes.io/projected/cf52c65e-5142-45c1-be45-050e3a06ab54-kube-api-access-k58rw\") on node \"crc\" DevicePath \"\"" Mar 12 14:05:58 crc kubenswrapper[4125]: I0312 14:05:58.891467 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.093141 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf52c65e-5142-45c1-be45-050e3a06ab54" (UID: "cf52c65e-5142-45c1-be45-050e3a06ab54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.093967 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf52c65e-5142-45c1-be45-050e3a06ab54-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.099666 4125 generic.go:334] "Generic (PLEG): container finished" podID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerID="c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523" exitCode=0 Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.099732 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerDied","Data":"c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523"} Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.099765 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksjx7" event={"ID":"cf52c65e-5142-45c1-be45-050e3a06ab54","Type":"ContainerDied","Data":"855e1c6302f81472477ed4fdbcd5d1f93c7d2072205b6496a4f17c41b9ceddd8"} Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.099861 4125 scope.go:117] "RemoveContainer" containerID="c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.100028 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksjx7" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.160628 4125 scope.go:117] "RemoveContainer" containerID="63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.207625 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksjx7"] Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.229123 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ksjx7"] Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.258804 4125 scope.go:117] "RemoveContainer" containerID="d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.292788 4125 scope.go:117] "RemoveContainer" containerID="c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523" Mar 12 14:05:59 crc kubenswrapper[4125]: E0312 14:05:59.293667 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523\": container with ID starting with c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523 not found: ID does not exist" containerID="c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.293730 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523"} err="failed to get container status \"c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523\": rpc error: code = NotFound desc = could not find container \"c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523\": container with ID starting with c2fbe5ecd0d37f1c091c03c6b78f70a7063dcc68b7ca2f371f550cbb17f05523 not found: ID does not exist" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.293744 4125 scope.go:117] "RemoveContainer" containerID="63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05" Mar 12 14:05:59 crc kubenswrapper[4125]: E0312 14:05:59.294388 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05\": container with ID starting with 63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05 not found: ID does not exist" containerID="63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.294468 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05"} err="failed to get container status \"63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05\": rpc error: code = NotFound desc = could not find container \"63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05\": container with ID starting with 63d71eec63bb1c2b5b82acf1a063b3ab02e599d1c1ba3ab970921de44e5bde05 not found: ID does not exist" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.294483 4125 scope.go:117] "RemoveContainer" containerID="d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3" Mar 12 14:05:59 crc kubenswrapper[4125]: E0312 14:05:59.295533 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3\": container with ID starting with d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3 not found: ID does not exist" containerID="d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3" Mar 12 14:05:59 crc kubenswrapper[4125]: I0312 14:05:59.295581 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3"} err="failed to get container status \"d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3\": rpc error: code = NotFound desc = could not find container \"d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3\": container with ID starting with d97da075c39118b39d32bde85377ce59fd865c26c43f682bae398ffac4fc3cd3 not found: ID does not exist" Mar 12 14:06:00 crc kubenswrapper[4125]: I0312 14:06:00.035409 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" path="/var/lib/kubelet/pods/cf52c65e-5142-45c1-be45-050e3a06ab54/volumes" Mar 12 14:06:01 crc kubenswrapper[4125]: I0312 14:06:01.683088 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:06:01 crc kubenswrapper[4125]: I0312 14:06:01.684992 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:06:02 crc kubenswrapper[4125]: I0312 14:06:02.794381 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lmrsm" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="registry-server" probeResult="failure" output=< Mar 12 14:06:02 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:06:02 crc kubenswrapper[4125]: > Mar 12 14:06:12 crc kubenswrapper[4125]: I0312 14:06:12.801138 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lmrsm" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="registry-server" probeResult="failure" output=< Mar 12 14:06:12 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:06:12 crc kubenswrapper[4125]: > Mar 12 14:06:21 crc kubenswrapper[4125]: I0312 14:06:21.791147 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:06:21 crc kubenswrapper[4125]: I0312 14:06:21.912187 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:06:21 crc kubenswrapper[4125]: I0312 14:06:21.975234 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lmrsm"] Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.268719 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lmrsm" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="registry-server" containerID="cri-o://bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283" gracePeriod=2 Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.701708 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.866627 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-utilities\") pod \"c72c30d0-9608-4870-b248-cc194aad7427\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.866884 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-catalog-content\") pod \"c72c30d0-9608-4870-b248-cc194aad7427\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.866964 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnlt4\" (UniqueName: \"kubernetes.io/projected/c72c30d0-9608-4870-b248-cc194aad7427-kube-api-access-cnlt4\") pod \"c72c30d0-9608-4870-b248-cc194aad7427\" (UID: \"c72c30d0-9608-4870-b248-cc194aad7427\") " Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.870521 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-utilities" (OuterVolumeSpecName: "utilities") pod "c72c30d0-9608-4870-b248-cc194aad7427" (UID: "c72c30d0-9608-4870-b248-cc194aad7427"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.873178 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c72c30d0-9608-4870-b248-cc194aad7427-kube-api-access-cnlt4" (OuterVolumeSpecName: "kube-api-access-cnlt4") pod "c72c30d0-9608-4870-b248-cc194aad7427" (UID: "c72c30d0-9608-4870-b248-cc194aad7427"). InnerVolumeSpecName "kube-api-access-cnlt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.968790 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:06:23 crc kubenswrapper[4125]: I0312 14:06:23.968897 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-cnlt4\" (UniqueName: \"kubernetes.io/projected/c72c30d0-9608-4870-b248-cc194aad7427-kube-api-access-cnlt4\") on node \"crc\" DevicePath \"\"" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.277657 4125 generic.go:334] "Generic (PLEG): container finished" podID="c72c30d0-9608-4870-b248-cc194aad7427" containerID="bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283" exitCode=0 Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.277724 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerDied","Data":"bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283"} Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.277755 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lmrsm" event={"ID":"c72c30d0-9608-4870-b248-cc194aad7427","Type":"ContainerDied","Data":"dbe6b8cbb0d4146a0365430494e484c622117a00e671d9cd0391f4e14dc0d716"} Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.277793 4125 scope.go:117] "RemoveContainer" containerID="bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.278021 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lmrsm" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.341764 4125 scope.go:117] "RemoveContainer" containerID="9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.435429 4125 scope.go:117] "RemoveContainer" containerID="e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.481099 4125 scope.go:117] "RemoveContainer" containerID="bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283" Mar 12 14:06:24 crc kubenswrapper[4125]: E0312 14:06:24.483007 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283\": container with ID starting with bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283 not found: ID does not exist" containerID="bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.483072 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283"} err="failed to get container status \"bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283\": rpc error: code = NotFound desc = could not find container \"bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283\": container with ID starting with bffcdba8e85cfe894f45613fd301c6654db69af6e38f6890b224e4cf9e833283 not found: ID does not exist" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.483084 4125 scope.go:117] "RemoveContainer" containerID="9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10" Mar 12 14:06:24 crc kubenswrapper[4125]: E0312 14:06:24.483956 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10\": container with ID starting with 9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10 not found: ID does not exist" containerID="9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.483988 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10"} err="failed to get container status \"9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10\": rpc error: code = NotFound desc = could not find container \"9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10\": container with ID starting with 9fe643112f649cd0fd82e4defa119ac991ecbb9ad1609f4260d7759a5e70fb10 not found: ID does not exist" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.484001 4125 scope.go:117] "RemoveContainer" containerID="e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80" Mar 12 14:06:24 crc kubenswrapper[4125]: E0312 14:06:24.484650 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80\": container with ID starting with e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80 not found: ID does not exist" containerID="e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.484712 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80"} err="failed to get container status \"e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80\": rpc error: code = NotFound desc = could not find container \"e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80\": container with ID starting with e07ae15c127628866e1a757dd148acc54298f4658807836a8b20354ae9c57d80 not found: ID does not exist" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.885809 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c72c30d0-9608-4870-b248-cc194aad7427" (UID: "c72c30d0-9608-4870-b248-cc194aad7427"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:06:24 crc kubenswrapper[4125]: I0312 14:06:24.983154 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c72c30d0-9608-4870-b248-cc194aad7427-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:06:25 crc kubenswrapper[4125]: I0312 14:06:25.266198 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lmrsm"] Mar 12 14:06:25 crc kubenswrapper[4125]: I0312 14:06:25.273042 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lmrsm"] Mar 12 14:06:26 crc kubenswrapper[4125]: I0312 14:06:26.037165 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c72c30d0-9608-4870-b248-cc194aad7427" path="/var/lib/kubelet/pods/c72c30d0-9608-4870-b248-cc194aad7427/volumes" Mar 12 14:06:36 crc kubenswrapper[4125]: I0312 14:06:36.920335 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:06:36 crc kubenswrapper[4125]: I0312 14:06:36.920979 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:06:36 crc kubenswrapper[4125]: I0312 14:06:36.921022 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:06:36 crc kubenswrapper[4125]: I0312 14:06:36.921064 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:06:36 crc kubenswrapper[4125]: I0312 14:06:36.921082 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:07:36 crc kubenswrapper[4125]: I0312 14:07:36.921775 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:07:36 crc kubenswrapper[4125]: I0312 14:07:36.923940 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:07:36 crc kubenswrapper[4125]: I0312 14:07:36.924003 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:07:36 crc kubenswrapper[4125]: I0312 14:07:36.924025 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:07:36 crc kubenswrapper[4125]: I0312 14:07:36.924074 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:08:36 crc kubenswrapper[4125]: I0312 14:08:36.993420 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:08:36 crc kubenswrapper[4125]: I0312 14:08:36.994341 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:08:36 crc kubenswrapper[4125]: I0312 14:08:36.994449 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:08:36 crc kubenswrapper[4125]: I0312 14:08:36.994489 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:08:36 crc kubenswrapper[4125]: I0312 14:08:36.994689 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:09:36 crc kubenswrapper[4125]: I0312 14:09:36.995715 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:09:37 crc kubenswrapper[4125]: I0312 14:09:36.996740 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:09:37 crc kubenswrapper[4125]: I0312 14:09:36.996953 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:09:37 crc kubenswrapper[4125]: I0312 14:09:36.997087 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:09:37 crc kubenswrapper[4125]: I0312 14:09:36.997127 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:10:36 crc kubenswrapper[4125]: I0312 14:10:36.998442 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:10:36 crc kubenswrapper[4125]: I0312 14:10:36.999127 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:10:36 crc kubenswrapper[4125]: I0312 14:10:36.999197 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:10:36 crc kubenswrapper[4125]: I0312 14:10:36.999226 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:10:36 crc kubenswrapper[4125]: I0312 14:10:36.999285 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:11:37 crc kubenswrapper[4125]: I0312 14:11:37.000100 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:11:37 crc kubenswrapper[4125]: I0312 14:11:37.000948 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:11:37 crc kubenswrapper[4125]: I0312 14:11:37.001060 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:11:37 crc kubenswrapper[4125]: I0312 14:11:37.001090 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:11:37 crc kubenswrapper[4125]: I0312 14:11:37.001156 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:12:37 crc kubenswrapper[4125]: I0312 14:12:37.002617 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:12:37 crc kubenswrapper[4125]: I0312 14:12:37.003740 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:12:37 crc kubenswrapper[4125]: I0312 14:12:37.003946 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:12:37 crc kubenswrapper[4125]: I0312 14:12:37.004118 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:12:37 crc kubenswrapper[4125]: I0312 14:12:37.004188 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.539129 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzsd"] Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.539875 4125 topology_manager.go:215] "Topology Admit Handler" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" podNamespace="openshift-marketplace" podName="redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: E0312 14:12:44.540296 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="extract-utilities" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540314 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="extract-utilities" Mar 12 14:12:44 crc kubenswrapper[4125]: E0312 14:12:44.540336 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="extract-content" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540346 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="extract-content" Mar 12 14:12:44 crc kubenswrapper[4125]: E0312 14:12:44.540365 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="extract-utilities" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540375 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="extract-utilities" Mar 12 14:12:44 crc kubenswrapper[4125]: E0312 14:12:44.540430 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="registry-server" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540450 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="registry-server" Mar 12 14:12:44 crc kubenswrapper[4125]: E0312 14:12:44.540467 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="registry-server" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540479 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="registry-server" Mar 12 14:12:44 crc kubenswrapper[4125]: E0312 14:12:44.540498 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="extract-content" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540508 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="extract-content" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540726 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf52c65e-5142-45c1-be45-050e3a06ab54" containerName="registry-server" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.540752 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="c72c30d0-9608-4870-b248-cc194aad7427" containerName="registry-server" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.545259 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.581238 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzsd"] Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.610635 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqflx\" (UniqueName: \"kubernetes.io/projected/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-kube-api-access-rqflx\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.610753 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-catalog-content\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.610933 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-utilities\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.712114 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rqflx\" (UniqueName: \"kubernetes.io/projected/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-kube-api-access-rqflx\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.712226 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-catalog-content\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.712354 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-utilities\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.713203 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-catalog-content\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.713245 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-utilities\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.744970 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqflx\" (UniqueName: \"kubernetes.io/projected/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-kube-api-access-rqflx\") pod \"redhat-marketplace-fqzsd\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:44 crc kubenswrapper[4125]: I0312 14:12:44.867674 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:45 crc kubenswrapper[4125]: I0312 14:12:45.506078 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzsd"] Mar 12 14:12:46 crc kubenswrapper[4125]: I0312 14:12:46.023639 4125 generic.go:334] "Generic (PLEG): container finished" podID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerID="67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376" exitCode=0 Mar 12 14:12:46 crc kubenswrapper[4125]: I0312 14:12:46.024005 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerDied","Data":"67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376"} Mar 12 14:12:46 crc kubenswrapper[4125]: I0312 14:12:46.024277 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerStarted","Data":"46e2c59399d9682a9bf0213c0cd1f613a8921e521b6da2683010518c98a66e68"} Mar 12 14:12:46 crc kubenswrapper[4125]: I0312 14:12:46.025924 4125 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 12 14:12:48 crc kubenswrapper[4125]: I0312 14:12:48.038898 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerStarted","Data":"93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce"} Mar 12 14:12:53 crc kubenswrapper[4125]: I0312 14:12:53.082013 4125 generic.go:334] "Generic (PLEG): container finished" podID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerID="93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce" exitCode=0 Mar 12 14:12:53 crc kubenswrapper[4125]: I0312 14:12:53.082645 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerDied","Data":"93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce"} Mar 12 14:12:54 crc kubenswrapper[4125]: I0312 14:12:54.093166 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerStarted","Data":"4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8"} Mar 12 14:12:54 crc kubenswrapper[4125]: I0312 14:12:54.873231 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:54 crc kubenswrapper[4125]: I0312 14:12:54.873313 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:12:56 crc kubenswrapper[4125]: I0312 14:12:56.042795 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-fqzsd" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="registry-server" probeResult="failure" output=< Mar 12 14:12:56 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:12:56 crc kubenswrapper[4125]: > Mar 12 14:13:05 crc kubenswrapper[4125]: I0312 14:13:05.030783 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:13:05 crc kubenswrapper[4125]: I0312 14:13:05.105441 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fqzsd" podStartSLOduration=13.432823451 podStartE2EDuration="21.10526411s" podCreationTimestamp="2026-03-12 14:12:44 +0000 UTC" firstStartedPulling="2026-03-12 14:12:46.025466451 +0000 UTC m=+3136.348852210" lastFinishedPulling="2026-03-12 14:12:53.69790688 +0000 UTC m=+3144.021292869" observedRunningTime="2026-03-12 14:12:54.130130035 +0000 UTC m=+3144.453516064" watchObservedRunningTime="2026-03-12 14:13:05.10526411 +0000 UTC m=+3155.428650209" Mar 12 14:13:05 crc kubenswrapper[4125]: I0312 14:13:05.205580 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:13:05 crc kubenswrapper[4125]: I0312 14:13:05.280047 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzsd"] Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.201030 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fqzsd" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="registry-server" containerID="cri-o://4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8" gracePeriod=2 Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.821388 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.946570 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-utilities\") pod \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.946752 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqflx\" (UniqueName: \"kubernetes.io/projected/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-kube-api-access-rqflx\") pod \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.947097 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-catalog-content\") pod \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\" (UID: \"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7\") " Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.947310 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-utilities" (OuterVolumeSpecName: "utilities") pod "fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" (UID: "fc7da19a-1c6b-4ccb-9add-f21b9457a8f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.947574 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:13:06 crc kubenswrapper[4125]: I0312 14:13:06.960201 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-kube-api-access-rqflx" (OuterVolumeSpecName: "kube-api-access-rqflx") pod "fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" (UID: "fc7da19a-1c6b-4ccb-9add-f21b9457a8f7"). InnerVolumeSpecName "kube-api-access-rqflx". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.054732 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rqflx\" (UniqueName: \"kubernetes.io/projected/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-kube-api-access-rqflx\") on node \"crc\" DevicePath \"\"" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.173639 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" (UID: "fc7da19a-1c6b-4ccb-9add-f21b9457a8f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.235376 4125 generic.go:334] "Generic (PLEG): container finished" podID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerID="4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8" exitCode=0 Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.235477 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerDied","Data":"4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8"} Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.235524 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzsd" event={"ID":"fc7da19a-1c6b-4ccb-9add-f21b9457a8f7","Type":"ContainerDied","Data":"46e2c59399d9682a9bf0213c0cd1f613a8921e521b6da2683010518c98a66e68"} Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.235576 4125 scope.go:117] "RemoveContainer" containerID="4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.239906 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqzsd" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.263753 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.305307 4125 scope.go:117] "RemoveContainer" containerID="93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.326965 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzsd"] Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.327555 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzsd"] Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.353458 4125 scope.go:117] "RemoveContainer" containerID="67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.386179 4125 scope.go:117] "RemoveContainer" containerID="4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8" Mar 12 14:13:07 crc kubenswrapper[4125]: E0312 14:13:07.387183 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8\": container with ID starting with 4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8 not found: ID does not exist" containerID="4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.387596 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8"} err="failed to get container status \"4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8\": rpc error: code = NotFound desc = could not find container \"4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8\": container with ID starting with 4e239d1211ff021efd2e29a75c4c0de810a7b9187626bbbdf457b2e635dfb0e8 not found: ID does not exist" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.387893 4125 scope.go:117] "RemoveContainer" containerID="93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce" Mar 12 14:13:07 crc kubenswrapper[4125]: E0312 14:13:07.390205 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce\": container with ID starting with 93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce not found: ID does not exist" containerID="93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.390283 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce"} err="failed to get container status \"93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce\": rpc error: code = NotFound desc = could not find container \"93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce\": container with ID starting with 93db4d6ea428e0bcfc8b914165d9f9d5dd12c88fe4f6248c40be92c68ea911ce not found: ID does not exist" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.390344 4125 scope.go:117] "RemoveContainer" containerID="67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376" Mar 12 14:13:07 crc kubenswrapper[4125]: E0312 14:13:07.391119 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376\": container with ID starting with 67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376 not found: ID does not exist" containerID="67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376" Mar 12 14:13:07 crc kubenswrapper[4125]: I0312 14:13:07.391205 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376"} err="failed to get container status \"67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376\": rpc error: code = NotFound desc = could not find container \"67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376\": container with ID starting with 67c7ca631af00a50f26fef294ecc1d36a9625b5c072a2273bbffbe1451d65376 not found: ID does not exist" Mar 12 14:13:08 crc kubenswrapper[4125]: I0312 14:13:08.050072 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" path="/var/lib/kubelet/pods/fc7da19a-1c6b-4ccb-9add-f21b9457a8f7/volumes" Mar 12 14:13:37 crc kubenswrapper[4125]: I0312 14:13:37.005143 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:13:37 crc kubenswrapper[4125]: I0312 14:13:37.008238 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:13:37 crc kubenswrapper[4125]: I0312 14:13:37.008657 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:13:37 crc kubenswrapper[4125]: I0312 14:13:37.009141 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:13:37 crc kubenswrapper[4125]: I0312 14:13:37.009535 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:14:37 crc kubenswrapper[4125]: I0312 14:14:37.010621 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:14:37 crc kubenswrapper[4125]: I0312 14:14:37.012016 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:14:37 crc kubenswrapper[4125]: I0312 14:14:37.012093 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:14:37 crc kubenswrapper[4125]: I0312 14:14:37.012191 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:14:37 crc kubenswrapper[4125]: I0312 14:14:37.012234 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.260109 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht"] Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.261011 4125 topology_manager.go:215] "Topology Admit Handler" podUID="d1f18111-2e43-40c4-ae3c-0f02d431999d" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: E0312 14:15:00.261395 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="extract-content" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.261416 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="extract-content" Mar 12 14:15:00 crc kubenswrapper[4125]: E0312 14:15:00.261441 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="extract-utilities" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.261451 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="extract-utilities" Mar 12 14:15:00 crc kubenswrapper[4125]: E0312 14:15:00.261468 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="registry-server" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.261524 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="registry-server" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.261718 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc7da19a-1c6b-4ccb-9add-f21b9457a8f7" containerName="registry-server" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.262439 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.268022 4125 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.271779 4125 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.365333 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht"] Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.366185 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1f18111-2e43-40c4-ae3c-0f02d431999d-config-volume\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.366254 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1f18111-2e43-40c4-ae3c-0f02d431999d-secret-volume\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.366546 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4df6n\" (UniqueName: \"kubernetes.io/projected/d1f18111-2e43-40c4-ae3c-0f02d431999d-kube-api-access-4df6n\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.468180 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1f18111-2e43-40c4-ae3c-0f02d431999d-config-volume\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.468298 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1f18111-2e43-40c4-ae3c-0f02d431999d-secret-volume\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.468341 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4df6n\" (UniqueName: \"kubernetes.io/projected/d1f18111-2e43-40c4-ae3c-0f02d431999d-kube-api-access-4df6n\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.469598 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1f18111-2e43-40c4-ae3c-0f02d431999d-config-volume\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.479044 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1f18111-2e43-40c4-ae3c-0f02d431999d-secret-volume\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.490385 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4df6n\" (UniqueName: \"kubernetes.io/projected/d1f18111-2e43-40c4-ae3c-0f02d431999d-kube-api-access-4df6n\") pod \"collect-profiles-29555415-s64ht\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.599039 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:00 crc kubenswrapper[4125]: I0312 14:15:00.927593 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht"] Mar 12 14:15:01 crc kubenswrapper[4125]: I0312 14:15:01.485434 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" event={"ID":"d1f18111-2e43-40c4-ae3c-0f02d431999d","Type":"ContainerStarted","Data":"a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135"} Mar 12 14:15:01 crc kubenswrapper[4125]: I0312 14:15:01.485937 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" event={"ID":"d1f18111-2e43-40c4-ae3c-0f02d431999d","Type":"ContainerStarted","Data":"68b38792479d07b018f94009d1b3c7363f6571c725158371fbc98bdeb3697088"} Mar 12 14:15:01 crc kubenswrapper[4125]: I0312 14:15:01.526101 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" podStartSLOduration=1.526025904 podStartE2EDuration="1.526025904s" podCreationTimestamp="2026-03-12 14:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-12 14:15:01.517241112 +0000 UTC m=+3271.840627391" watchObservedRunningTime="2026-03-12 14:15:01.526025904 +0000 UTC m=+3271.849411983" Mar 12 14:15:02 crc kubenswrapper[4125]: I0312 14:15:02.501651 4125 generic.go:334] "Generic (PLEG): container finished" podID="d1f18111-2e43-40c4-ae3c-0f02d431999d" containerID="a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135" exitCode=0 Mar 12 14:15:02 crc kubenswrapper[4125]: I0312 14:15:02.501771 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" event={"ID":"d1f18111-2e43-40c4-ae3c-0f02d431999d","Type":"ContainerDied","Data":"a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135"} Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.708005 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.827394 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4df6n\" (UniqueName: \"kubernetes.io/projected/d1f18111-2e43-40c4-ae3c-0f02d431999d-kube-api-access-4df6n\") pod \"d1f18111-2e43-40c4-ae3c-0f02d431999d\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.827553 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1f18111-2e43-40c4-ae3c-0f02d431999d-secret-volume\") pod \"d1f18111-2e43-40c4-ae3c-0f02d431999d\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.827615 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1f18111-2e43-40c4-ae3c-0f02d431999d-config-volume\") pod \"d1f18111-2e43-40c4-ae3c-0f02d431999d\" (UID: \"d1f18111-2e43-40c4-ae3c-0f02d431999d\") " Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.828543 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1f18111-2e43-40c4-ae3c-0f02d431999d-config-volume" (OuterVolumeSpecName: "config-volume") pod "d1f18111-2e43-40c4-ae3c-0f02d431999d" (UID: "d1f18111-2e43-40c4-ae3c-0f02d431999d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.833745 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1f18111-2e43-40c4-ae3c-0f02d431999d-kube-api-access-4df6n" (OuterVolumeSpecName: "kube-api-access-4df6n") pod "d1f18111-2e43-40c4-ae3c-0f02d431999d" (UID: "d1f18111-2e43-40c4-ae3c-0f02d431999d"). InnerVolumeSpecName "kube-api-access-4df6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.838970 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1f18111-2e43-40c4-ae3c-0f02d431999d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d1f18111-2e43-40c4-ae3c-0f02d431999d" (UID: "d1f18111-2e43-40c4-ae3c-0f02d431999d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.928760 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-4df6n\" (UniqueName: \"kubernetes.io/projected/d1f18111-2e43-40c4-ae3c-0f02d431999d-kube-api-access-4df6n\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.929115 4125 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1f18111-2e43-40c4-ae3c-0f02d431999d-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:03 crc kubenswrapper[4125]: I0312 14:15:03.929133 4125 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1f18111-2e43-40c4-ae3c-0f02d431999d-config-volume\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:04 crc kubenswrapper[4125]: I0312 14:15:04.527187 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" event={"ID":"d1f18111-2e43-40c4-ae3c-0f02d431999d","Type":"ContainerDied","Data":"68b38792479d07b018f94009d1b3c7363f6571c725158371fbc98bdeb3697088"} Mar 12 14:15:04 crc kubenswrapper[4125]: I0312 14:15:04.527346 4125 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68b38792479d07b018f94009d1b3c7363f6571c725158371fbc98bdeb3697088" Mar 12 14:15:04 crc kubenswrapper[4125]: I0312 14:15:04.529075 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 12 14:15:04 crc kubenswrapper[4125]: I0312 14:15:04.727338 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8"] Mar 12 14:15:04 crc kubenswrapper[4125]: I0312 14:15:04.738366 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555370-8n4p8"] Mar 12 14:15:06 crc kubenswrapper[4125]: I0312 14:15:06.053783 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="973266fa-3775-4a33-9ee8-9af757721a2a" path="/var/lib/kubelet/pods/973266fa-3775-4a33-9ee8-9af757721a2a/volumes" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.024098 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l8669"] Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.026348 4125 topology_manager.go:215] "Topology Admit Handler" podUID="78339801-b5d9-49c9-b035-b02f636f3969" podNamespace="openshift-marketplace" podName="community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: E0312 14:15:13.027043 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="d1f18111-2e43-40c4-ae3c-0f02d431999d" containerName="collect-profiles" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.027203 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f18111-2e43-40c4-ae3c-0f02d431999d" containerName="collect-profiles" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.027471 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1f18111-2e43-40c4-ae3c-0f02d431999d" containerName="collect-profiles" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.029014 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.113797 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l8669"] Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.116581 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-utilities\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.116653 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-catalog-content\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.116685 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d74rq\" (UniqueName: \"kubernetes.io/projected/78339801-b5d9-49c9-b035-b02f636f3969-kube-api-access-d74rq\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.218073 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d74rq\" (UniqueName: \"kubernetes.io/projected/78339801-b5d9-49c9-b035-b02f636f3969-kube-api-access-d74rq\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.218178 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-utilities\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.218213 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-catalog-content\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.219251 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-catalog-content\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.219450 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-utilities\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.250284 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-d74rq\" (UniqueName: \"kubernetes.io/projected/78339801-b5d9-49c9-b035-b02f636f3969-kube-api-access-d74rq\") pod \"community-operators-l8669\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.358271 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:13 crc kubenswrapper[4125]: I0312 14:15:13.735040 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l8669"] Mar 12 14:15:13 crc kubenswrapper[4125]: W0312 14:15:13.747384 4125 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78339801_b5d9_49c9_b035_b02f636f3969.slice/crio-d45624b43f4023f9c1b525a7b6100674a1dedf11ffb173d240e4738d9b8dfd03 WatchSource:0}: Error finding container d45624b43f4023f9c1b525a7b6100674a1dedf11ffb173d240e4738d9b8dfd03: Status 404 returned error can't find the container with id d45624b43f4023f9c1b525a7b6100674a1dedf11ffb173d240e4738d9b8dfd03 Mar 12 14:15:14 crc kubenswrapper[4125]: I0312 14:15:14.629542 4125 generic.go:334] "Generic (PLEG): container finished" podID="78339801-b5d9-49c9-b035-b02f636f3969" containerID="66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1" exitCode=0 Mar 12 14:15:14 crc kubenswrapper[4125]: I0312 14:15:14.629601 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerDied","Data":"66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1"} Mar 12 14:15:14 crc kubenswrapper[4125]: I0312 14:15:14.629633 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerStarted","Data":"d45624b43f4023f9c1b525a7b6100674a1dedf11ffb173d240e4738d9b8dfd03"} Mar 12 14:15:15 crc kubenswrapper[4125]: I0312 14:15:15.639600 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerStarted","Data":"ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5"} Mar 12 14:15:31 crc kubenswrapper[4125]: I0312 14:15:31.182793 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerDied","Data":"ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5"} Mar 12 14:15:31 crc kubenswrapper[4125]: I0312 14:15:31.182794 4125 generic.go:334] "Generic (PLEG): container finished" podID="78339801-b5d9-49c9-b035-b02f636f3969" containerID="ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5" exitCode=0 Mar 12 14:15:33 crc kubenswrapper[4125]: I0312 14:15:33.241930 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerStarted","Data":"2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7"} Mar 12 14:15:33 crc kubenswrapper[4125]: I0312 14:15:33.283272 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l8669" podStartSLOduration=4.417048609 podStartE2EDuration="21.283209256s" podCreationTimestamp="2026-03-12 14:15:12 +0000 UTC" firstStartedPulling="2026-03-12 14:15:14.63325882 +0000 UTC m=+3284.956644589" lastFinishedPulling="2026-03-12 14:15:31.499419327 +0000 UTC m=+3301.822805236" observedRunningTime="2026-03-12 14:15:33.276574269 +0000 UTC m=+3303.599960738" watchObservedRunningTime="2026-03-12 14:15:33.283209256 +0000 UTC m=+3303.606595345" Mar 12 14:15:33 crc kubenswrapper[4125]: I0312 14:15:33.361447 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:33 crc kubenswrapper[4125]: I0312 14:15:33.363135 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:34 crc kubenswrapper[4125]: I0312 14:15:34.484586 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l8669" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="registry-server" probeResult="failure" output=< Mar 12 14:15:34 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:15:34 crc kubenswrapper[4125]: > Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.642370 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-drzhl"] Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.649065 4125 topology_manager.go:215] "Topology Admit Handler" podUID="140f3c78-7910-453f-9137-e39996e73632" podNamespace="openshift-marketplace" podName="certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.661734 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.682298 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-drzhl"] Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.758995 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkhps\" (UniqueName: \"kubernetes.io/projected/140f3c78-7910-453f-9137-e39996e73632-kube-api-access-wkhps\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.759058 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-catalog-content\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.759085 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-utilities\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.860993 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-utilities\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.861128 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wkhps\" (UniqueName: \"kubernetes.io/projected/140f3c78-7910-453f-9137-e39996e73632-kube-api-access-wkhps\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.861164 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-catalog-content\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.861692 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-utilities\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.861771 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-catalog-content\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:35 crc kubenswrapper[4125]: I0312 14:15:35.893522 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkhps\" (UniqueName: \"kubernetes.io/projected/140f3c78-7910-453f-9137-e39996e73632-kube-api-access-wkhps\") pod \"certified-operators-drzhl\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:36 crc kubenswrapper[4125]: I0312 14:15:36.066022 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.012894 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.013163 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.013219 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.013251 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.013280 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.159655 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-drzhl"] Mar 12 14:15:37 crc kubenswrapper[4125]: I0312 14:15:37.273662 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerStarted","Data":"a057aee86bbf2e2602f7d5f409b36893b48299f23e09bf6bff23c269406a7793"} Mar 12 14:15:38 crc kubenswrapper[4125]: I0312 14:15:38.282000 4125 generic.go:334] "Generic (PLEG): container finished" podID="140f3c78-7910-453f-9137-e39996e73632" containerID="d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621" exitCode=0 Mar 12 14:15:38 crc kubenswrapper[4125]: I0312 14:15:38.282160 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerDied","Data":"d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621"} Mar 12 14:15:39 crc kubenswrapper[4125]: I0312 14:15:39.294576 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerStarted","Data":"d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389"} Mar 12 14:15:44 crc kubenswrapper[4125]: I0312 14:15:44.503952 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l8669" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="registry-server" probeResult="failure" output=< Mar 12 14:15:44 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:15:44 crc kubenswrapper[4125]: > Mar 12 14:15:48 crc kubenswrapper[4125]: I0312 14:15:48.373714 4125 generic.go:334] "Generic (PLEG): container finished" podID="140f3c78-7910-453f-9137-e39996e73632" containerID="d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389" exitCode=0 Mar 12 14:15:48 crc kubenswrapper[4125]: I0312 14:15:48.373944 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerDied","Data":"d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389"} Mar 12 14:15:49 crc kubenswrapper[4125]: I0312 14:15:49.384484 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerStarted","Data":"388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1"} Mar 12 14:15:49 crc kubenswrapper[4125]: I0312 14:15:49.428707 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-drzhl" podStartSLOduration=3.919654102 podStartE2EDuration="14.42864185s" podCreationTimestamp="2026-03-12 14:15:35 +0000 UTC" firstStartedPulling="2026-03-12 14:15:38.285131246 +0000 UTC m=+3308.608517005" lastFinishedPulling="2026-03-12 14:15:48.794118144 +0000 UTC m=+3319.117504753" observedRunningTime="2026-03-12 14:15:49.423103028 +0000 UTC m=+3319.746488877" watchObservedRunningTime="2026-03-12 14:15:49.42864185 +0000 UTC m=+3319.752027909" Mar 12 14:15:53 crc kubenswrapper[4125]: I0312 14:15:53.490801 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:53 crc kubenswrapper[4125]: I0312 14:15:53.620374 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:53 crc kubenswrapper[4125]: I0312 14:15:53.691353 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l8669"] Mar 12 14:15:55 crc kubenswrapper[4125]: I0312 14:15:55.443051 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l8669" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="registry-server" containerID="cri-o://2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7" gracePeriod=2 Mar 12 14:15:55 crc kubenswrapper[4125]: I0312 14:15:55.921036 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.016127 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d74rq\" (UniqueName: \"kubernetes.io/projected/78339801-b5d9-49c9-b035-b02f636f3969-kube-api-access-d74rq\") pod \"78339801-b5d9-49c9-b035-b02f636f3969\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.016365 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-utilities\") pod \"78339801-b5d9-49c9-b035-b02f636f3969\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.016423 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-catalog-content\") pod \"78339801-b5d9-49c9-b035-b02f636f3969\" (UID: \"78339801-b5d9-49c9-b035-b02f636f3969\") " Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.017475 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-utilities" (OuterVolumeSpecName: "utilities") pod "78339801-b5d9-49c9-b035-b02f636f3969" (UID: "78339801-b5d9-49c9-b035-b02f636f3969"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.026585 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78339801-b5d9-49c9-b035-b02f636f3969-kube-api-access-d74rq" (OuterVolumeSpecName: "kube-api-access-d74rq") pod "78339801-b5d9-49c9-b035-b02f636f3969" (UID: "78339801-b5d9-49c9-b035-b02f636f3969"). InnerVolumeSpecName "kube-api-access-d74rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.068394 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.068781 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.118265 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.118378 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-d74rq\" (UniqueName: \"kubernetes.io/projected/78339801-b5d9-49c9-b035-b02f636f3969-kube-api-access-d74rq\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.213848 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.320345 4125 scope.go:117] "RemoveContainer" containerID="0260ff3055ca485655dfa3a62ea6cb1042f69d0c1b0e8a1d75401c5004d0a556" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.459450 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l8669" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.460238 4125 generic.go:334] "Generic (PLEG): container finished" podID="78339801-b5d9-49c9-b035-b02f636f3969" containerID="2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7" exitCode=0 Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.460582 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerDied","Data":"2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7"} Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.460645 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l8669" event={"ID":"78339801-b5d9-49c9-b035-b02f636f3969","Type":"ContainerDied","Data":"d45624b43f4023f9c1b525a7b6100674a1dedf11ffb173d240e4738d9b8dfd03"} Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.460685 4125 scope.go:117] "RemoveContainer" containerID="2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.545384 4125 scope.go:117] "RemoveContainer" containerID="ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.645391 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.667613 4125 scope.go:117] "RemoveContainer" containerID="66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.721065 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-drzhl"] Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.725356 4125 scope.go:117] "RemoveContainer" containerID="2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7" Mar 12 14:15:56 crc kubenswrapper[4125]: E0312 14:15:56.726194 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7\": container with ID starting with 2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7 not found: ID does not exist" containerID="2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.726280 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7"} err="failed to get container status \"2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7\": rpc error: code = NotFound desc = could not find container \"2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7\": container with ID starting with 2844f7d378003d657e563fef517e5b479d4f3b31e0ced3c0dbd886c242a6b2d7 not found: ID does not exist" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.726302 4125 scope.go:117] "RemoveContainer" containerID="ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5" Mar 12 14:15:56 crc kubenswrapper[4125]: E0312 14:15:56.727426 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5\": container with ID starting with ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5 not found: ID does not exist" containerID="ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.727455 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5"} err="failed to get container status \"ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5\": rpc error: code = NotFound desc = could not find container \"ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5\": container with ID starting with ce14ba4efbc543f2aa9dbfa3eaa8133cee4405df9772e42c67573ec3a4f817e5 not found: ID does not exist" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.727464 4125 scope.go:117] "RemoveContainer" containerID="66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1" Mar 12 14:15:56 crc kubenswrapper[4125]: E0312 14:15:56.727930 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1\": container with ID starting with 66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1 not found: ID does not exist" containerID="66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.727951 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1"} err="failed to get container status \"66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1\": rpc error: code = NotFound desc = could not find container \"66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1\": container with ID starting with 66ddb54a07eb952a59a2e17e1cb0ac51f85b8bbc2691979d70adc1d1524ac4a1 not found: ID does not exist" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.853897 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "78339801-b5d9-49c9-b035-b02f636f3969" (UID: "78339801-b5d9-49c9-b035-b02f636f3969"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:15:56 crc kubenswrapper[4125]: I0312 14:15:56.930932 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78339801-b5d9-49c9-b035-b02f636f3969-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:57 crc kubenswrapper[4125]: I0312 14:15:57.134624 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l8669"] Mar 12 14:15:57 crc kubenswrapper[4125]: I0312 14:15:57.148734 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l8669"] Mar 12 14:15:58 crc kubenswrapper[4125]: I0312 14:15:58.035454 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78339801-b5d9-49c9-b035-b02f636f3969" path="/var/lib/kubelet/pods/78339801-b5d9-49c9-b035-b02f636f3969/volumes" Mar 12 14:15:58 crc kubenswrapper[4125]: I0312 14:15:58.489157 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-drzhl" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="registry-server" containerID="cri-o://388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1" gracePeriod=2 Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.001297 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.173443 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-utilities\") pod \"140f3c78-7910-453f-9137-e39996e73632\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.173558 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkhps\" (UniqueName: \"kubernetes.io/projected/140f3c78-7910-453f-9137-e39996e73632-kube-api-access-wkhps\") pod \"140f3c78-7910-453f-9137-e39996e73632\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.173708 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-catalog-content\") pod \"140f3c78-7910-453f-9137-e39996e73632\" (UID: \"140f3c78-7910-453f-9137-e39996e73632\") " Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.174749 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-utilities" (OuterVolumeSpecName: "utilities") pod "140f3c78-7910-453f-9137-e39996e73632" (UID: "140f3c78-7910-453f-9137-e39996e73632"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.175922 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.183567 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/140f3c78-7910-453f-9137-e39996e73632-kube-api-access-wkhps" (OuterVolumeSpecName: "kube-api-access-wkhps") pod "140f3c78-7910-453f-9137-e39996e73632" (UID: "140f3c78-7910-453f-9137-e39996e73632"). InnerVolumeSpecName "kube-api-access-wkhps". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.277020 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wkhps\" (UniqueName: \"kubernetes.io/projected/140f3c78-7910-453f-9137-e39996e73632-kube-api-access-wkhps\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.503321 4125 generic.go:334] "Generic (PLEG): container finished" podID="140f3c78-7910-453f-9137-e39996e73632" containerID="388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1" exitCode=0 Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.503438 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerDied","Data":"388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1"} Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.503496 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drzhl" event={"ID":"140f3c78-7910-453f-9137-e39996e73632","Type":"ContainerDied","Data":"a057aee86bbf2e2602f7d5f409b36893b48299f23e09bf6bff23c269406a7793"} Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.503608 4125 scope.go:117] "RemoveContainer" containerID="388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.504443 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drzhl" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.510411 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "140f3c78-7910-453f-9137-e39996e73632" (UID: "140f3c78-7910-453f-9137-e39996e73632"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.580399 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/140f3c78-7910-453f-9137-e39996e73632-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.597185 4125 scope.go:117] "RemoveContainer" containerID="d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.672437 4125 scope.go:117] "RemoveContainer" containerID="d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.710297 4125 scope.go:117] "RemoveContainer" containerID="388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1" Mar 12 14:15:59 crc kubenswrapper[4125]: E0312 14:15:59.710875 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1\": container with ID starting with 388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1 not found: ID does not exist" containerID="388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.710939 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1"} err="failed to get container status \"388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1\": rpc error: code = NotFound desc = could not find container \"388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1\": container with ID starting with 388a18f39b0fbf5a4f224bd762b805ef890dd14f8f66272b94e53f88116f96d1 not found: ID does not exist" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.710955 4125 scope.go:117] "RemoveContainer" containerID="d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389" Mar 12 14:15:59 crc kubenswrapper[4125]: E0312 14:15:59.712706 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389\": container with ID starting with d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389 not found: ID does not exist" containerID="d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.712738 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389"} err="failed to get container status \"d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389\": rpc error: code = NotFound desc = could not find container \"d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389\": container with ID starting with d594ecdff6091453fa0f87673057a1d45274b314260b0a39aa4e7e4fc3b33389 not found: ID does not exist" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.712802 4125 scope.go:117] "RemoveContainer" containerID="d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621" Mar 12 14:15:59 crc kubenswrapper[4125]: E0312 14:15:59.714172 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621\": container with ID starting with d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621 not found: ID does not exist" containerID="d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.714225 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621"} err="failed to get container status \"d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621\": rpc error: code = NotFound desc = could not find container \"d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621\": container with ID starting with d6693fd339fe39f8132e197550f978f1a54dda1b2d90d148cf0ad13bc0c96621 not found: ID does not exist" Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.856376 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-drzhl"] Mar 12 14:15:59 crc kubenswrapper[4125]: I0312 14:15:59.861352 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-drzhl"] Mar 12 14:16:00 crc kubenswrapper[4125]: I0312 14:16:00.033071 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="140f3c78-7910-453f-9137-e39996e73632" path="/var/lib/kubelet/pods/140f3c78-7910-453f-9137-e39996e73632/volumes" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.429661 4125 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l6pks"] Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.430024 4125 topology_manager.go:215] "Topology Admit Handler" podUID="70ef642a-e60f-41b1-86b4-646b605a4307" podNamespace="openshift-marketplace" podName="redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: E0312 14:16:02.433685 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="extract-utilities" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.433911 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="extract-utilities" Mar 12 14:16:02 crc kubenswrapper[4125]: E0312 14:16:02.433986 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="extract-utilities" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.434011 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="extract-utilities" Mar 12 14:16:02 crc kubenswrapper[4125]: E0312 14:16:02.434062 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="registry-server" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.434079 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="registry-server" Mar 12 14:16:02 crc kubenswrapper[4125]: E0312 14:16:02.434114 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="registry-server" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.434130 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="registry-server" Mar 12 14:16:02 crc kubenswrapper[4125]: E0312 14:16:02.434182 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="extract-content" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.434198 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="extract-content" Mar 12 14:16:02 crc kubenswrapper[4125]: E0312 14:16:02.434231 4125 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="extract-content" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.434247 4125 state_mem.go:107] "Deleted CPUSet assignment" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="extract-content" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.435050 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="78339801-b5d9-49c9-b035-b02f636f3969" containerName="registry-server" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.435099 4125 memory_manager.go:354] "RemoveStaleState removing state" podUID="140f3c78-7910-453f-9137-e39996e73632" containerName="registry-server" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.439446 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.477309 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l6pks"] Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.531497 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-catalog-content\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.531721 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5vxj\" (UniqueName: \"kubernetes.io/projected/70ef642a-e60f-41b1-86b4-646b605a4307-kube-api-access-l5vxj\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.531772 4125 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-utilities\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.634413 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-catalog-content\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.634541 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l5vxj\" (UniqueName: \"kubernetes.io/projected/70ef642a-e60f-41b1-86b4-646b605a4307-kube-api-access-l5vxj\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.634577 4125 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-utilities\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.635203 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-utilities\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.635384 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-catalog-content\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.658041 4125 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5vxj\" (UniqueName: \"kubernetes.io/projected/70ef642a-e60f-41b1-86b4-646b605a4307-kube-api-access-l5vxj\") pod \"redhat-operators-l6pks\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:02 crc kubenswrapper[4125]: I0312 14:16:02.776421 4125 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:03 crc kubenswrapper[4125]: I0312 14:16:03.356770 4125 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l6pks"] Mar 12 14:16:03 crc kubenswrapper[4125]: I0312 14:16:03.545479 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerStarted","Data":"08e88ff8ef0d27af4d28f0c297c61a886f69dca1324c17339536f5a103cbe685"} Mar 12 14:16:04 crc kubenswrapper[4125]: I0312 14:16:04.554623 4125 generic.go:334] "Generic (PLEG): container finished" podID="70ef642a-e60f-41b1-86b4-646b605a4307" containerID="d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e" exitCode=0 Mar 12 14:16:04 crc kubenswrapper[4125]: I0312 14:16:04.554676 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerDied","Data":"d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e"} Mar 12 14:16:05 crc kubenswrapper[4125]: I0312 14:16:05.570023 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerStarted","Data":"5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3"} Mar 12 14:16:12 crc kubenswrapper[4125]: I0312 14:16:12.366632 4125 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" containerName="hostpath-provisioner" probeResult="failure" output="Get \"http://10.217.0.49:9898/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 12 14:16:37 crc kubenswrapper[4125]: I0312 14:16:37.013945 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:16:37 crc kubenswrapper[4125]: I0312 14:16:37.014966 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:16:37 crc kubenswrapper[4125]: I0312 14:16:37.015039 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:16:37 crc kubenswrapper[4125]: I0312 14:16:37.015114 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:16:37 crc kubenswrapper[4125]: I0312 14:16:37.015155 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:16:42 crc kubenswrapper[4125]: I0312 14:16:42.436091 4125 generic.go:334] "Generic (PLEG): container finished" podID="70ef642a-e60f-41b1-86b4-646b605a4307" containerID="5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3" exitCode=0 Mar 12 14:16:42 crc kubenswrapper[4125]: I0312 14:16:42.436425 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerDied","Data":"5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3"} Mar 12 14:16:44 crc kubenswrapper[4125]: I0312 14:16:44.455358 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerStarted","Data":"bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db"} Mar 12 14:16:44 crc kubenswrapper[4125]: I0312 14:16:44.484157 4125 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l6pks" podStartSLOduration=4.133282487 podStartE2EDuration="42.484105309s" podCreationTimestamp="2026-03-12 14:16:02 +0000 UTC" firstStartedPulling="2026-03-12 14:16:04.558511387 +0000 UTC m=+3334.881897156" lastFinishedPulling="2026-03-12 14:16:42.909334209 +0000 UTC m=+3373.232719978" observedRunningTime="2026-03-12 14:16:44.481381765 +0000 UTC m=+3374.804767684" watchObservedRunningTime="2026-03-12 14:16:44.484105309 +0000 UTC m=+3374.807491208" Mar 12 14:16:52 crc kubenswrapper[4125]: I0312 14:16:52.777583 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:52 crc kubenswrapper[4125]: I0312 14:16:52.778242 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:16:53 crc kubenswrapper[4125]: I0312 14:16:53.902730 4125 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l6pks" podUID="70ef642a-e60f-41b1-86b4-646b605a4307" containerName="registry-server" probeResult="failure" output=< Mar 12 14:16:53 crc kubenswrapper[4125]: timeout: failed to connect service ":50051" within 1s Mar 12 14:16:53 crc kubenswrapper[4125]: > Mar 12 14:17:02 crc kubenswrapper[4125]: I0312 14:17:02.919455 4125 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:17:03 crc kubenswrapper[4125]: I0312 14:17:03.046264 4125 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:17:03 crc kubenswrapper[4125]: I0312 14:17:03.124790 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l6pks"] Mar 12 14:17:04 crc kubenswrapper[4125]: I0312 14:17:04.574106 4125 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l6pks" podUID="70ef642a-e60f-41b1-86b4-646b605a4307" containerName="registry-server" containerID="cri-o://bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db" gracePeriod=2 Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.358675 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.442327 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-utilities" (OuterVolumeSpecName: "utilities") pod "70ef642a-e60f-41b1-86b4-646b605a4307" (UID: "70ef642a-e60f-41b1-86b4-646b605a4307"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.442854 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-utilities\") pod \"70ef642a-e60f-41b1-86b4-646b605a4307\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.442974 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5vxj\" (UniqueName: \"kubernetes.io/projected/70ef642a-e60f-41b1-86b4-646b605a4307-kube-api-access-l5vxj\") pod \"70ef642a-e60f-41b1-86b4-646b605a4307\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.443021 4125 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-catalog-content\") pod \"70ef642a-e60f-41b1-86b4-646b605a4307\" (UID: \"70ef642a-e60f-41b1-86b4-646b605a4307\") " Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.443476 4125 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-utilities\") on node \"crc\" DevicePath \"\"" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.462210 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70ef642a-e60f-41b1-86b4-646b605a4307-kube-api-access-l5vxj" (OuterVolumeSpecName: "kube-api-access-l5vxj") pod "70ef642a-e60f-41b1-86b4-646b605a4307" (UID: "70ef642a-e60f-41b1-86b4-646b605a4307"). InnerVolumeSpecName "kube-api-access-l5vxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.544562 4125 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-l5vxj\" (UniqueName: \"kubernetes.io/projected/70ef642a-e60f-41b1-86b4-646b605a4307-kube-api-access-l5vxj\") on node \"crc\" DevicePath \"\"" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.581464 4125 generic.go:334] "Generic (PLEG): container finished" podID="70ef642a-e60f-41b1-86b4-646b605a4307" containerID="bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db" exitCode=0 Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.581536 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerDied","Data":"bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db"} Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.581603 4125 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l6pks" event={"ID":"70ef642a-e60f-41b1-86b4-646b605a4307","Type":"ContainerDied","Data":"08e88ff8ef0d27af4d28f0c297c61a886f69dca1324c17339536f5a103cbe685"} Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.581655 4125 scope.go:117] "RemoveContainer" containerID="bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.581902 4125 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l6pks" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.645395 4125 scope.go:117] "RemoveContainer" containerID="5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.732289 4125 scope.go:117] "RemoveContainer" containerID="d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.789779 4125 scope.go:117] "RemoveContainer" containerID="bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db" Mar 12 14:17:05 crc kubenswrapper[4125]: E0312 14:17:05.795756 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db\": container with ID starting with bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db not found: ID does not exist" containerID="bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.795908 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db"} err="failed to get container status \"bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db\": rpc error: code = NotFound desc = could not find container \"bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db\": container with ID starting with bfa6f49e45624de088f1831563e8e360261045da8cfa375b86ac318a46a7a2db not found: ID does not exist" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.795926 4125 scope.go:117] "RemoveContainer" containerID="5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3" Mar 12 14:17:05 crc kubenswrapper[4125]: E0312 14:17:05.797083 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3\": container with ID starting with 5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3 not found: ID does not exist" containerID="5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.797191 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3"} err="failed to get container status \"5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3\": rpc error: code = NotFound desc = could not find container \"5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3\": container with ID starting with 5b52a0a04e1b15dbd91f37cd98f46295c484264ed9bac4eaa28351591482ccc3 not found: ID does not exist" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.797221 4125 scope.go:117] "RemoveContainer" containerID="d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e" Mar 12 14:17:05 crc kubenswrapper[4125]: E0312 14:17:05.798011 4125 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e\": container with ID starting with d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e not found: ID does not exist" containerID="d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e" Mar 12 14:17:05 crc kubenswrapper[4125]: I0312 14:17:05.798089 4125 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e"} err="failed to get container status \"d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e\": rpc error: code = NotFound desc = could not find container \"d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e\": container with ID starting with d4a50a918c9dd34ebabfe2b0c3993e265e9fccf516ff7fccc73c73b2c5b1906e not found: ID does not exist" Mar 12 14:17:06 crc kubenswrapper[4125]: I0312 14:17:06.504154 4125 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70ef642a-e60f-41b1-86b4-646b605a4307" (UID: "70ef642a-e60f-41b1-86b4-646b605a4307"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 12 14:17:06 crc kubenswrapper[4125]: I0312 14:17:06.560933 4125 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ef642a-e60f-41b1-86b4-646b605a4307-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 12 14:17:06 crc kubenswrapper[4125]: I0312 14:17:06.877297 4125 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l6pks"] Mar 12 14:17:06 crc kubenswrapper[4125]: I0312 14:17:06.887183 4125 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l6pks"] Mar 12 14:17:08 crc kubenswrapper[4125]: I0312 14:17:08.033792 4125 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70ef642a-e60f-41b1-86b4-646b605a4307" path="/var/lib/kubelet/pods/70ef642a-e60f-41b1-86b4-646b605a4307/volumes" Mar 12 14:17:37 crc kubenswrapper[4125]: I0312 14:17:37.021012 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:17:37 crc kubenswrapper[4125]: I0312 14:17:37.021858 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:17:37 crc kubenswrapper[4125]: I0312 14:17:37.021917 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:17:37 crc kubenswrapper[4125]: I0312 14:17:37.021980 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:17:37 crc kubenswrapper[4125]: I0312 14:17:37.022006 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:18:37 crc kubenswrapper[4125]: I0312 14:18:37.022856 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 12 14:18:37 crc kubenswrapper[4125]: I0312 14:18:37.023474 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 12 14:18:37 crc kubenswrapper[4125]: I0312 14:18:37.023536 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 12 14:18:37 crc kubenswrapper[4125]: I0312 14:18:37.023664 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 12 14:18:37 crc kubenswrapper[4125]: I0312 14:18:37.023696 4125 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 12 14:18:58 crc kubenswrapper[4125]: I0312 14:18:58.745447 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 14:18:59 crc kubenswrapper[4125]: I0312 14:18:59.391345 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 14:19:01 crc kubenswrapper[4125]: I0312 14:19:01.997110 4125 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 12 14:19:19 crc kubenswrapper[4125]: I0312 14:19:19.388001 4125 dynamic_cafile_content.go:171] "Shutting down controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Mar 12 14:19:19 crc systemd[1]: Stopping Kubernetes Kubelet... Mar 12 14:19:19 crc systemd[1]: kubelet.service: Deactivated successfully. Mar 12 14:19:19 crc systemd[1]: Stopped Kubernetes Kubelet. Mar 12 14:19:19 crc systemd[1]: kubelet.service: Consumed 11min 51.601s CPU time. -- Boot 66090dc1f2124b3ea31df5899cfbb0f4 -- Mar 20 15:19:39 crc systemd[1]: Starting Kubernetes Kubelet... Mar 20 15:19:40 crc kubenswrapper[3014]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:19:40 crc kubenswrapper[3014]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Mar 20 15:19:40 crc kubenswrapper[3014]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:19:40 crc kubenswrapper[3014]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:19:40 crc kubenswrapper[3014]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Mar 20 15:19:40 crc kubenswrapper[3014]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.242055 3014 server.go:204] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246781 3014 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246819 3014 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246833 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246846 3014 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246858 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246870 3014 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246881 3014 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246895 3014 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246906 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246918 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246930 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246941 3014 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246952 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246964 3014 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246975 3014 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246986 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.246997 3014 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247008 3014 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247019 3014 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247030 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247041 3014 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247052 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247064 3014 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247077 3014 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247091 3014 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247123 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247136 3014 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247147 3014 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247158 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247170 3014 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247181 3014 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247192 3014 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247206 3014 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247221 3014 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247236 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247252 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247267 3014 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247283 3014 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247301 3014 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247317 3014 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247367 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247382 3014 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247396 3014 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247411 3014 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247425 3014 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247439 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247453 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247467 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247483 3014 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247499 3014 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247515 3014 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247530 3014 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247545 3014 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247562 3014 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247578 3014 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247592 3014 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247608 3014 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247624 3014 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247639 3014 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.247650 3014 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247775 3014 flags.go:64] FLAG: --address="0.0.0.0" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247803 3014 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247817 3014 flags.go:64] FLAG: --anonymous-auth="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247828 3014 flags.go:64] FLAG: --application-metrics-count-limit="100" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247840 3014 flags.go:64] FLAG: --authentication-token-webhook="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247850 3014 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247862 3014 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247874 3014 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247884 3014 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247894 3014 flags.go:64] FLAG: --azure-container-registry-config="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247904 3014 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247914 3014 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247924 3014 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247934 3014 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247948 3014 flags.go:64] FLAG: --cgroup-root="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247959 3014 flags.go:64] FLAG: --cgroups-per-qos="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247970 3014 flags.go:64] FLAG: --client-ca-file="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247980 3014 flags.go:64] FLAG: --cloud-config="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247990 3014 flags.go:64] FLAG: --cloud-provider="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.247999 3014 flags.go:64] FLAG: --cluster-dns="[]" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248012 3014 flags.go:64] FLAG: --cluster-domain="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248021 3014 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248031 3014 flags.go:64] FLAG: --config-dir="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248040 3014 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248050 3014 flags.go:64] FLAG: --container-log-max-files="5" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248061 3014 flags.go:64] FLAG: --container-log-max-size="10Mi" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248071 3014 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248081 3014 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248091 3014 flags.go:64] FLAG: --containerd-namespace="k8s.io" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248100 3014 flags.go:64] FLAG: --contention-profiling="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248109 3014 flags.go:64] FLAG: --cpu-cfs-quota="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248119 3014 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248128 3014 flags.go:64] FLAG: --cpu-manager-policy="none" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248137 3014 flags.go:64] FLAG: --cpu-manager-policy-options="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248148 3014 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248158 3014 flags.go:64] FLAG: --enable-controller-attach-detach="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248168 3014 flags.go:64] FLAG: --enable-debugging-handlers="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248178 3014 flags.go:64] FLAG: --enable-load-reader="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248187 3014 flags.go:64] FLAG: --enable-server="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248197 3014 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248212 3014 flags.go:64] FLAG: --event-burst="100" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248225 3014 flags.go:64] FLAG: --event-qps="50" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248238 3014 flags.go:64] FLAG: --event-storage-age-limit="default=0" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248250 3014 flags.go:64] FLAG: --event-storage-event-limit="default=0" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248262 3014 flags.go:64] FLAG: --eviction-hard="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248276 3014 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248289 3014 flags.go:64] FLAG: --eviction-minimum-reclaim="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248301 3014 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248313 3014 flags.go:64] FLAG: --eviction-soft="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248359 3014 flags.go:64] FLAG: --eviction-soft-grace-period="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248375 3014 flags.go:64] FLAG: --exit-on-lock-contention="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248387 3014 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248400 3014 flags.go:64] FLAG: --experimental-mounter-path="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248414 3014 flags.go:64] FLAG: --fail-swap-on="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248425 3014 flags.go:64] FLAG: --feature-gates="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248437 3014 flags.go:64] FLAG: --file-check-frequency="20s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248447 3014 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248457 3014 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248467 3014 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248476 3014 flags.go:64] FLAG: --healthz-port="10248" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248486 3014 flags.go:64] FLAG: --help="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248495 3014 flags.go:64] FLAG: --hostname-override="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248504 3014 flags.go:64] FLAG: --housekeeping-interval="10s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248513 3014 flags.go:64] FLAG: --http-check-frequency="20s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248522 3014 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248531 3014 flags.go:64] FLAG: --image-credential-provider-config="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248540 3014 flags.go:64] FLAG: --image-gc-high-threshold="85" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248549 3014 flags.go:64] FLAG: --image-gc-low-threshold="80" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248559 3014 flags.go:64] FLAG: --image-service-endpoint="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248568 3014 flags.go:64] FLAG: --iptables-drop-bit="15" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248577 3014 flags.go:64] FLAG: --iptables-masquerade-bit="14" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248585 3014 flags.go:64] FLAG: --keep-terminated-pod-volumes="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248595 3014 flags.go:64] FLAG: --kernel-memcg-notification="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248604 3014 flags.go:64] FLAG: --kube-api-burst="100" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248613 3014 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248623 3014 flags.go:64] FLAG: --kube-api-qps="50" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248631 3014 flags.go:64] FLAG: --kube-reserved="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248640 3014 flags.go:64] FLAG: --kube-reserved-cgroup="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248649 3014 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248659 3014 flags.go:64] FLAG: --kubelet-cgroups="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248668 3014 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248677 3014 flags.go:64] FLAG: --lock-file="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248685 3014 flags.go:64] FLAG: --log-cadvisor-usage="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248695 3014 flags.go:64] FLAG: --log-flush-frequency="5s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248704 3014 flags.go:64] FLAG: --log-json-info-buffer-size="0" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248717 3014 flags.go:64] FLAG: --log-json-split-stream="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248727 3014 flags.go:64] FLAG: --logging-format="text" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248736 3014 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248746 3014 flags.go:64] FLAG: --make-iptables-util-chains="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248757 3014 flags.go:64] FLAG: --manifest-url="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248767 3014 flags.go:64] FLAG: --manifest-url-header="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248778 3014 flags.go:64] FLAG: --max-housekeeping-interval="15s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248788 3014 flags.go:64] FLAG: --max-open-files="1000000" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248798 3014 flags.go:64] FLAG: --max-pods="110" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248808 3014 flags.go:64] FLAG: --maximum-dead-containers="-1" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248817 3014 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248827 3014 flags.go:64] FLAG: --memory-manager-policy="None" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248835 3014 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248845 3014 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248854 3014 flags.go:64] FLAG: --node-ip="192.168.126.11" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248864 3014 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248884 3014 flags.go:64] FLAG: --node-status-max-images="50" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248893 3014 flags.go:64] FLAG: --node-status-update-frequency="10s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248902 3014 flags.go:64] FLAG: --oom-score-adj="-999" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248912 3014 flags.go:64] FLAG: --pod-cidr="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248921 3014 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ce0319702e115e7248d135e58342ccf3f458e19c39e86dc8e79036f578ce80a4" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248937 3014 flags.go:64] FLAG: --pod-manifest-path="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248946 3014 flags.go:64] FLAG: --pod-max-pids="-1" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248956 3014 flags.go:64] FLAG: --pods-per-core="0" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248967 3014 flags.go:64] FLAG: --port="10250" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248977 3014 flags.go:64] FLAG: --protect-kernel-defaults="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248986 3014 flags.go:64] FLAG: --provider-id="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.248995 3014 flags.go:64] FLAG: --qos-reserved="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249005 3014 flags.go:64] FLAG: --read-only-port="10255" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249014 3014 flags.go:64] FLAG: --register-node="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249023 3014 flags.go:64] FLAG: --register-schedulable="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249032 3014 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249047 3014 flags.go:64] FLAG: --registry-burst="10" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249056 3014 flags.go:64] FLAG: --registry-qps="5" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249066 3014 flags.go:64] FLAG: --reserved-cpus="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249075 3014 flags.go:64] FLAG: --reserved-memory="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249086 3014 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249128 3014 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249139 3014 flags.go:64] FLAG: --rotate-certificates="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249148 3014 flags.go:64] FLAG: --rotate-server-certificates="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249158 3014 flags.go:64] FLAG: --runonce="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249166 3014 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249176 3014 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249186 3014 flags.go:64] FLAG: --seccomp-default="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249195 3014 flags.go:64] FLAG: --serialize-image-pulls="true" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249204 3014 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249214 3014 flags.go:64] FLAG: --storage-driver-db="cadvisor" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249225 3014 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249235 3014 flags.go:64] FLAG: --storage-driver-password="root" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249246 3014 flags.go:64] FLAG: --storage-driver-secure="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249256 3014 flags.go:64] FLAG: --storage-driver-table="stats" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249265 3014 flags.go:64] FLAG: --storage-driver-user="root" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249276 3014 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249289 3014 flags.go:64] FLAG: --sync-frequency="1m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249301 3014 flags.go:64] FLAG: --system-cgroups="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249314 3014 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249366 3014 flags.go:64] FLAG: --system-reserved-cgroup="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249379 3014 flags.go:64] FLAG: --tls-cert-file="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249391 3014 flags.go:64] FLAG: --tls-cipher-suites="[]" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249407 3014 flags.go:64] FLAG: --tls-min-version="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249417 3014 flags.go:64] FLAG: --tls-private-key-file="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249427 3014 flags.go:64] FLAG: --topology-manager-policy="none" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249436 3014 flags.go:64] FLAG: --topology-manager-policy-options="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249446 3014 flags.go:64] FLAG: --topology-manager-scope="container" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249456 3014 flags.go:64] FLAG: --v="2" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249469 3014 flags.go:64] FLAG: --version="false" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249481 3014 flags.go:64] FLAG: --vmodule="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249491 3014 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.249501 3014 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249630 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249643 3014 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249656 3014 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249667 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249679 3014 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249691 3014 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249702 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249714 3014 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249724 3014 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249735 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249746 3014 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249756 3014 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249768 3014 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249779 3014 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249790 3014 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249801 3014 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249811 3014 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249822 3014 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249833 3014 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249844 3014 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249855 3014 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249865 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249876 3014 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249887 3014 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249898 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249909 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249920 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249930 3014 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249941 3014 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249952 3014 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249963 3014 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249974 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249984 3014 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.249995 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250006 3014 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250017 3014 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250027 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250038 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250049 3014 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250060 3014 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250072 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250083 3014 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250093 3014 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250104 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250115 3014 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250126 3014 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250138 3014 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250150 3014 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250161 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250172 3014 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250183 3014 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250194 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250204 3014 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250215 3014 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250226 3014 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250237 3014 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250249 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250261 3014 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250272 3014 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.250283 3014 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.250296 3014 feature_gate.go:250] feature gates: &{map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false ServiceAccountTokenNodeBindingValidation:false ServiceAccountTokenPodNodeInfo:false TranslateStreamCloseWebsocketRequests:false ValidatingAdmissionPolicy:false]} Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.264655 3014 server.go:487] "Kubelet version" kubeletVersion="v1.29.5+29c95f3" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.264721 3014 server.go:489] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264777 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264790 3014 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264797 3014 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264805 3014 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264814 3014 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264821 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264828 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264835 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264842 3014 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264849 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264856 3014 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264864 3014 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264871 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264878 3014 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264885 3014 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264892 3014 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264899 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264906 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264913 3014 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264920 3014 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264927 3014 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264935 3014 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264944 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264951 3014 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264958 3014 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264966 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264975 3014 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264984 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.264991 3014 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265000 3014 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265007 3014 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265014 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265022 3014 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265029 3014 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265039 3014 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265047 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265054 3014 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265061 3014 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265068 3014 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265076 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265083 3014 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265091 3014 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265098 3014 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265144 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265151 3014 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265159 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265166 3014 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265174 3014 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265181 3014 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265188 3014 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265196 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265203 3014 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265210 3014 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265220 3014 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265227 3014 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265235 3014 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265242 3014 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265248 3014 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265256 3014 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265263 3014 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.265271 3014 feature_gate.go:250] feature gates: &{map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false ServiceAccountTokenNodeBindingValidation:false ServiceAccountTokenPodNodeInfo:false TranslateStreamCloseWebsocketRequests:false ValidatingAdmissionPolicy:false]} Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265393 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265401 3014 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265408 3014 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265415 3014 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265421 3014 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265428 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265435 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265442 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265449 3014 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265457 3014 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265464 3014 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265471 3014 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265478 3014 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265484 3014 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265491 3014 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265498 3014 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265505 3014 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265513 3014 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265520 3014 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265527 3014 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265535 3014 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265543 3014 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265550 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265557 3014 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265564 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265571 3014 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265578 3014 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265585 3014 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265592 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265598 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265605 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265612 3014 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265619 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265625 3014 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265631 3014 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265638 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265645 3014 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265651 3014 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265658 3014 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265664 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265671 3014 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265677 3014 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265684 3014 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265690 3014 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265700 3014 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265707 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265714 3014 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265721 3014 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265728 3014 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265734 3014 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265741 3014 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265747 3014 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265754 3014 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265760 3014 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265769 3014 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265776 3014 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265783 3014 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265791 3014 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265799 3014 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.265806 3014 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.265814 3014 feature_gate.go:250] feature gates: &{map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false ServiceAccountTokenNodeBindingValidation:false ServiceAccountTokenPodNodeInfo:false TranslateStreamCloseWebsocketRequests:false ValidatingAdmissionPolicy:false]} Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.266168 3014 server.go:925] "Client rotation is on, will bootstrap in background" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.273650 3014 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.277426 3014 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.277686 3014 server.go:982] "Starting client certificate rotation" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.277703 3014 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.277943 3014 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-06-10 09:19:07.786349159 +0000 UTC Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.278048 3014 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1961h59m27.508304132s for next certificate rotation Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.309568 3014 dynamic_cafile_content.go:119] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.313772 3014 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.317089 3014 util_unix.go:103] "Using this endpoint is deprecated, please consider using full URL format" endpoint="/var/run/crio/crio.sock" URL="unix:///var/run/crio/crio.sock" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.361056 3014 remote_runtime.go:143] "Validated CRI v1 runtime API" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.361186 3014 util_unix.go:103] "Using this endpoint is deprecated, please consider using full URL format" endpoint="/var/run/crio/crio.sock" URL="unix:///var/run/crio/crio.sock" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.431824 3014 remote_image.go:111] "Validated CRI v1 image API" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.452922 3014 fs.go:132] Filesystem UUIDs: map[2026-03-20-15-18-37-00:/dev/sr0 68d6f3e9-64e9-44a4-a1d0-311f9c629a01:/dev/vda4 6ea7ef63-bc43-49c4-9337-b3b14ffb2763:/dev/vda3 7B77-95E7:/dev/vda2] Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.452966 3014 fs.go:133] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0}] Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.470747 3014 manager.go:217] Machine: {Timestamp:2026-03-20 15:19:40.467617644 +0000 UTC m=+0.908618016 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:c1bd596843fb445da20eca66471ddf66 SystemUUID:071e7dfc-e1e7-4107-84a5-e8044dfe14fc BootID:66090dc1-f212-4b3e-a31d-f5899cfbb0f4 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85294297088 Type:vfs Inodes:41680320 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:80:41:cc Speed:0 Mtu:1500} {Name:br-int MacAddress:4e:ec:11:72:80:3b Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:80:41:cc Speed:-1 Mtu:1500} {Name:eth10 MacAddress:b6:ea:15:f7:d4:30 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:b6:dc:d9:26:03:d4 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:5a:a5:1d:91:2d:66 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.471945 3014 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.472020 3014 manager.go:233] Version: {KernelVersion:5.14.0-427.22.1.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 416.94.202406172220-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.476686 3014 container_manager_linux.go:268] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.476894 3014 container_manager_linux.go:273] "Creating Container Manager object based on Node Config" nodeConfig={"RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null} Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.476932 3014 topology_manager.go:138] "Creating topology manager with none policy" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.476943 3014 container_manager_linux.go:304] "Creating device plugin manager" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.477965 3014 manager.go:136] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.477996 3014 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.479465 3014 state_mem.go:36] "Initialized new in-memory state store" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.479880 3014 server.go:1227] "Using root directory" path="/var/lib/kubelet" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.482742 3014 kubelet.go:406] "Attempting to sync node with API server" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.482764 3014 kubelet.go:311] "Adding static pod path" path="/etc/kubernetes/manifests" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.483391 3014 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.483413 3014 kubelet.go:322] "Adding apiserver pod source" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.483438 3014 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.487507 3014 kuberuntime_manager.go:258] "Container runtime initialized" containerRuntime="cri-o" version="1.29.5-5.rhaos4.16.git7032128.el9" apiVersion="v1" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.489614 3014 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.490818 3014 kubelet.go:826] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491109 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491496 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/rbd" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491576 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/azure-file" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491641 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491709 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491770 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491824 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491875 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/secret" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491930 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.491981 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/cephfs" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.492046 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.492098 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/fc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.492159 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.492223 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/projected" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.492274 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.493199 3014 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/csi" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.493782 3014 server.go:1262] "Started kubelet" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.494041 3014 server.go:162] "Starting to listen" address="0.0.0.0" port=10250 Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.495060 3014 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.495995 3014 server.go:233] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Mar 20 15:19:40 crc systemd[1]: Started Kubernetes Kubelet. Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497057 3014 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497092 3014 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497408 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.497304 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497566 3014 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-05-27 00:27:25.992452404 +0000 UTC Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.497609 3014 kubelet_node_status.go:512] "Error getting the current node from lister" err="node \"crc\" not found" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497639 3014 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1617h7m45.494820462s for next certificate rotation Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.497546 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497818 3014 volume_manager.go:289] "The desired_state_of_world populator starts" Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.497820 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.497833 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497834 3014 volume_manager.go:291] "Starting Kubelet Volume Manager" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.497885 3014 server.go:461] "Adding debug handlers to kubelet server" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.499928 3014 desired_state_of_world_populator.go:151] "Desired state populator starts to run" Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.501404 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="200ms" Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.502749 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.502929 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.503691 3014 factory.go:55] Registering systemd factory Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.503734 3014 factory.go:221] Registration of the systemd container factory successfully Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.510750 3014 factory.go:153] Registering CRI-O factory Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.510933 3014 factory.go:221] Registration of the crio container factory successfully Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.511051 3014 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.511124 3014 factory.go:103] Registering Raw factory Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.511185 3014 manager.go:1196] Started watching for new ooms in manager Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.511858 3014 manager.go:319] Starting recovery of all containers Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.526198 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532370 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a23c0ee-5648-448c-b772-83dced2891ce" volumeName="kubernetes.io/projected/6a23c0ee-5648-448c-b772-83dced2891ce-kube-api-access-gsxd9" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532430 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532462 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="297ab9b6-2186-4d5b-a952-2bfd59af63c4" volumeName="kubernetes.io/projected/297ab9b6-2186-4d5b-a952-2bfd59af63c4-kube-api-access-vtgqn" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532495 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532519 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532546 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" volumeName="kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-binary-copy" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532571 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d0dcce3-d96e-48cb-9b9f-362105911589" volumeName="kubernetes.io/configmap/9d0dcce3-d96e-48cb-9b9f-362105911589-mcd-auth-proxy-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532613 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532642 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="297ab9b6-2186-4d5b-a952-2bfd59af63c4" volumeName="kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532667 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="71af81a9-7d43-49b2-9287-c375900aa905" volumeName="kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532724 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="71af81a9-7d43-49b2-9287-c375900aa905" volumeName="kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532748 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532778 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c085412c-b875-46c9-ae3e-e6b0d8067091" volumeName="kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532803 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532828 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532855 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="59748b9b-c309-4712-aa85-bb38d71c4915" volumeName="kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532878 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532930 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532953 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" volumeName="kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-sysctl-allowlist" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.532977 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533000 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" volumeName="kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533025 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" volumeName="kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533051 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="13045510-8717-4a71-ade4-be95a76440a7" volumeName="kubernetes.io/projected/13045510-8717-4a71-ade4-be95a76440a7-kube-api-access-dtjml" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533077 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="2b6d14a5-ca00-40c7-af7a-051a98a24eed" volumeName="kubernetes.io/projected/2b6d14a5-ca00-40c7-af7a-051a98a24eed-kube-api-access-j4qn7" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533104 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533129 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533154 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533177 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="8a5ae51d-d173-4531-8975-f164c975ce1f" volumeName="kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533200 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533223 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533278 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533303 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="12e733dd-0939-4f1b-9cbb-13897e093787" volumeName="kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533380 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-env-overrides" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533405 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" volumeName="kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533428 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533452 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/projected/410cf605-1970-4691-9c95-53fdc123b1f3-kube-api-access-cx4f9" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533502 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" volumeName="kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533528 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533553 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-metrics-certs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533575 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533598 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0f394926-bdb9-425c-b36e-264d7fd34550" volumeName="kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533626 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533650 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="475321a1-8b7e-4033-8f72-b05a8b377347" volumeName="kubernetes.io/projected/475321a1-8b7e-4033-8f72-b05a8b377347-kube-api-access-c2f8t" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533676 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" volumeName="kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533698 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533725 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="887d596e-c519-4bfa-af90-3edd9e1b2f0f" volumeName="kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533748 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533805 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533837 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="13045510-8717-4a71-ade4-be95a76440a7" volumeName="kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533864 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533889 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="10603adc-d495-423c-9459-4caa405960bb" volumeName="kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533917 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4092a9f8-5acc-4932-9e90-ef962eeb301a" volumeName="kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533940 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="475321a1-8b7e-4033-8f72-b05a8b377347" volumeName="kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-multus-daemon-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533964 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.533988 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/projected/51a02bbf-2d40-4f84-868a-d399ea18a846-kube-api-access-zjg2w" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534011 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534034 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5d722a-1123-4935-9740-52a08d018bc9" volumeName="kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534058 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" volumeName="kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534084 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6268b7fe-8910-4505-b404-6f1df638105c" volumeName="kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534108 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534132 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d0f40333-c860-4c04-8058-a0bf572dcf12" volumeName="kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534158 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534182 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534206 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e" volumeName="kubernetes.io/projected/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-kube-api-access-d7jw8" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534230 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="2b6d14a5-ca00-40c7-af7a-051a98a24eed" volumeName="kubernetes.io/configmap/2b6d14a5-ca00-40c7-af7a-051a98a24eed-iptables-alerter-script" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534475 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534499 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf1a8b70-3856-486f-9912-a2de1d57c3fb" volumeName="kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-node-bootstrap-token" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534522 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534544 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534567 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534595 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="cf1a8966-f594-490a-9fbb-eec5bafd13d3" volumeName="kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534620 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3f4dca86-e6ee-4ec9-8324-86aff960225e" volumeName="kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534643 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" volumeName="kubernetes.io/projected/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-kube-api-access-bwbqm" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534668 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c782cf62-a827-4677-b3c2-6f82c5f09cbb" volumeName="kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534692 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" volumeName="kubernetes.io/projected/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-kube-api-access-rkkfv" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534718 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534744 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" volumeName="kubernetes.io/projected/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-kube-api-access-8svnk" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534777 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534802 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0f394926-bdb9-425c-b36e-264d7fd34550" volumeName="kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534826 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534850 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/secret/410cf605-1970-4691-9c95-53fdc123b1f3-ovn-control-plane-metrics-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534873 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534897 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ed024e5d-8fc2-4c22-803d-73f3c9795f19" volumeName="kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534920 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534943 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-default-certificate" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534967 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.534992 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c085412c-b875-46c9-ae3e-e6b0d8067091" volumeName="kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535016 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535041 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="8a5ae51d-d173-4531-8975-f164c975ce1f" volumeName="kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535066 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535091 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535114 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-auth-proxy-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535141 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ed024e5d-8fc2-4c22-803d-73f3c9795f19" volumeName="kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535166 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-ovnkube-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535189 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="530553aa-0a1d-423e-8a22-f5eb4bdbb883" volumeName="kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535216 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/projected/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-kube-api-access-4qr9t" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535248 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535273 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" volumeName="kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535300 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535350 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-kube-api-access-9x6dp" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535379 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" volumeName="kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535408 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="34a48baf-1bee-4921-8bb2-9b7320e76f79" volumeName="kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535434 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="530553aa-0a1d-423e-8a22-f5eb4bdbb883" volumeName="kubernetes.io/empty-dir/530553aa-0a1d-423e-8a22-f5eb4bdbb883-available-featuregates" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535458 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-bound-sa-token" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535487 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535513 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535539 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535563 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535587 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535611 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535636 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" volumeName="kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535661 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535686 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c085412c-b875-46c9-ae3e-e6b0d8067091" volumeName="kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535709 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" volumeName="kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535734 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc291782-27d2-4a74-af79-c7dcb31535d2" volumeName="kubernetes.io/projected/cc291782-27d2-4a74-af79-c7dcb31535d2-kube-api-access-4sfhc" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535757 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" volumeName="kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535780 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535804 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535828 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6639609b-906b-4193-883e-ed1160aa5d50" volumeName="kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535852 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6d67253e-2acd-4bc1-8185-793587da4f17" volumeName="kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535876 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="887d596e-c519-4bfa-af90-3edd9e1b2f0f" volumeName="kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535900 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="887d596e-c519-4bfa-af90-3edd9e1b2f0f" volumeName="kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535927 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535951 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d0dcce3-d96e-48cb-9b9f-362105911589" volumeName="kubernetes.io/secret/9d0dcce3-d96e-48cb-9b9f-362105911589-proxy-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535974 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9fb762d1-812f-43f1-9eac-68034c1ecec7" volumeName="kubernetes.io/projected/9fb762d1-812f-43f1-9eac-68034c1ecec7-kube-api-access" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.535998 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc291782-27d2-4a74-af79-c7dcb31535d2" volumeName="kubernetes.io/secret/cc291782-27d2-4a74-af79-c7dcb31535d2-metrics-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536023 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536047 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536127 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536208 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536314 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="297ab9b6-2186-4d5b-a952-2bfd59af63c4" volumeName="kubernetes.io/configmap/297ab9b6-2186-4d5b-a952-2bfd59af63c4-mcc-auth-proxy-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536366 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536393 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4092a9f8-5acc-4932-9e90-ef962eeb301a" volumeName="kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536419 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-ovnkube-identity-cm" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536443 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" volumeName="kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536468 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536496 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536533 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-auth-proxy-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536557 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="71af81a9-7d43-49b2-9287-c375900aa905" volumeName="kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536581 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536605 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0f394926-bdb9-425c-b36e-264d7fd34550" volumeName="kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536629 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3f4dca86-e6ee-4ec9-8324-86aff960225e" volumeName="kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536653 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="530553aa-0a1d-423e-8a22-f5eb4bdbb883" volumeName="kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536676 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" volumeName="kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536699 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/secret/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-machine-approver-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536723 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-env-overrides" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536751 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536775 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536798 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536821 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf1a8b70-3856-486f-9912-a2de1d57c3fb" volumeName="kubernetes.io/projected/bf1a8b70-3856-486f-9912-a2de1d57c3fb-kube-api-access-6z2n9" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536845 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3f4dca86-e6ee-4ec9-8324-86aff960225e" volumeName="kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536867 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="59748b9b-c309-4712-aa85-bb38d71c4915" volumeName="kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536891 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536914 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536937 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" volumeName="kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536963 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.536987 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.537010 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.537035 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540428 3014 reconstruct_new.go:149] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ea5f9a7192af1960ec8c50a86fd2d9a756dbf85695798868f611e04a03ec009/globalmount" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540487 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/projected/120b38dc-8236-4fa6-a452-642b8ad738ee-kube-api-access-bwvjb" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540529 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-stats-auth" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540555 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" volumeName="kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540581 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6d67253e-2acd-4bc1-8185-793587da4f17" volumeName="kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540607 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9fb762d1-812f-43f1-9eac-68034c1ecec7" volumeName="kubernetes.io/secret/9fb762d1-812f-43f1-9eac-68034c1ecec7-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540632 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540661 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6d67253e-2acd-4bc1-8185-793587da4f17" volumeName="kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540687 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" volumeName="kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540716 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/empty-dir/bd556935-a077-45df-ba3f-d42c39326ccd-tmpfs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540741 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c782cf62-a827-4677-b3c2-6f82c5f09cbb" volumeName="kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540768 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-bound-sa-token" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540793 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" volumeName="kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540822 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="13045510-8717-4a71-ade4-be95a76440a7" volumeName="kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540847 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" volumeName="kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540872 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" volumeName="kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540900 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/configmap/aa90b3c2-febd-4588-a063-7fbbe82f00c1-service-ca-bundle" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540925 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="10603adc-d495-423c-9459-4caa405960bb" volumeName="kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540952 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.540977 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541002 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c782cf62-a827-4677-b3c2-6f82c5f09cbb" volumeName="kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541031 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541059 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541086 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4092a9f8-5acc-4932-9e90-ef962eeb301a" volumeName="kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541112 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541140 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e" volumeName="kubernetes.io/configmap/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-serviceca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541175 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9fb762d1-812f-43f1-9eac-68034c1ecec7" volumeName="kubernetes.io/configmap/9fb762d1-812f-43f1-9eac-68034c1ecec7-service-ca" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541200 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" volumeName="kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541225 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541289 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541318 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541372 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="475321a1-8b7e-4033-8f72-b05a8b377347" volumeName="kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-cni-binary-copy" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541398 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" volumeName="kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541422 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/projected/aa90b3c2-febd-4588-a063-7fbbe82f00c1-kube-api-access-v45vm" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541456 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541484 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541512 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" volumeName="kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541539 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="8a5ae51d-d173-4531-8975-f164c975ce1f" volumeName="kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541601 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d0dcce3-d96e-48cb-9b9f-362105911589" volumeName="kubernetes.io/projected/9d0dcce3-d96e-48cb-9b9f-362105911589-kube-api-access-xkzjk" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541628 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541715 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6639609b-906b-4193-883e-ed1160aa5d50" volumeName="kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541752 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf1a8b70-3856-486f-9912-a2de1d57c3fb" volumeName="kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-certs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541795 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/secret/51a02bbf-2d40-4f84-868a-d399ea18a846-webhook-cert" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541845 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ed024e5d-8fc2-4c22-803d-73f3c9795f19" volumeName="kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541895 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6639609b-906b-4193-883e-ed1160aa5d50" volumeName="kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541936 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541970 3014 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" volumeName="kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config" seLinuxMountContext="" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.541998 3014 reconstruct_new.go:102] "Volume reconstruction finished" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.542023 3014 reconciler_new.go:29] "Reconciler: start to sync state" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.544928 3014 manager.go:324] Recovery completed Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.566020 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.569543 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.569590 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.569603 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.570847 3014 cpu_manager.go:215] "Starting CPU manager" policy="none" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.570864 3014 cpu_manager.go:216] "Reconciling" reconcilePeriod="10s" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.570883 3014 state_mem.go:36] "Initialized new in-memory state store" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.592199 3014 policy_none.go:49] "None policy: Start" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.593248 3014 memory_manager.go:170] "Starting memorymanager" policy="None" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.593304 3014 state_mem.go:35] "Initializing new in-memory state store" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.598017 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.599673 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.599705 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.599717 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.599745 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.601249 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.662266 3014 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.663423 3014 manager.go:296] "Starting Device Plugin manager" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.663705 3014 manager.go:479] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.663718 3014 server.go:79] "Starting device plugin registration server" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.664427 3014 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.664474 3014 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.664498 3014 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.664507 3014 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.665431 3014 status_manager.go:217] "Starting to sync pod status with apiserver" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.665716 3014 kubelet.go:2343] "Starting kubelet main sync loop" Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.665885 3014 kubelet.go:2367] "Skipping pod synchronization" err="PLEG is not healthy: pleg has yet to be successful" Mar 20 15:19:40 crc kubenswrapper[3014]: W0320 15:19:40.667846 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.667925 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.703497 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="400ms" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.766350 3014 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.766456 3014 topology_manager.go:215] "Topology Admit Handler" podUID="d3ae206906481b4831fd849b559269c8" podNamespace="openshift-machine-config-operator" podName="kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.766568 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.767847 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.767896 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.767914 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.768039 3014 topology_manager.go:215] "Topology Admit Handler" podUID="b2a6a3b2ca08062d24afa4c01aaf9e4f" podNamespace="openshift-etcd" podName="etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.768097 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.768234 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.768290 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.769406 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.769469 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.769484 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.770655 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.770692 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.770709 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.770844 3014 topology_manager.go:215] "Topology Admit Handler" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" podNamespace="openshift-kube-apiserver" podName="kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.770897 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.771395 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.771440 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772459 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772511 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772529 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772661 3014 topology_manager.go:215] "Topology Admit Handler" podUID="4faaac70bf21c7d77dcb526af466bffa" podNamespace="openshift-kube-controller-manager" podName="kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772711 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772873 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772910 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.772928 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773198 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773245 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773431 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773481 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773503 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773736 3014 topology_manager.go:215] "Topology Admit Handler" podUID="6a57a7fb1944b43a6bd11a349520d301" podNamespace="openshift-kube-scheduler" podName="openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773810 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773866 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.773908 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.774355 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.774390 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.774406 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.774863 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.774902 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.774923 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.775088 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.775131 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.775147 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.775378 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.775426 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.776343 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.776388 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.776418 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.782901 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.801660 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.802816 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.802843 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.802853 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.802876 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:40 crc kubenswrapper[3014]: E0320 15:19:40.804124 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.851787 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-log-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.851825 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.851998 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852115 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-resource-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852220 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-data-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852258 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852285 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-usr-local-bin\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852352 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852388 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852431 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852473 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852523 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-static-pod-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852575 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-cert-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852610 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.852657 3014 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954186 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-data-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954246 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-resource-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954275 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-usr-local-bin\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954301 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954356 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954382 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954413 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954441 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-static-pod-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954473 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-cert-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954505 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954541 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954609 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954646 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954680 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.954711 3014 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-log-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955504 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-data-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955541 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-usr-local-bin\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955569 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955672 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955606 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-resource-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955687 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955695 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-cert-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.955985 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.956104 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.956162 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.956475 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-log-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.956572 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.956603 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-static-pod-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.957296 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:40 crc kubenswrapper[3014]: I0320 15:19:40.960623 3014 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.106193 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="800ms" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.116578 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.137115 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.153233 3014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3ae206906481b4831fd849b559269c8.slice/crio-3a8892bca91ed899eef810ec4a11f159ea06330fac5c45b32bc35b6660c1e5d1 WatchSource:0}: Error finding container 3a8892bca91ed899eef810ec4a11f159ea06330fac5c45b32bc35b6660c1e5d1: Status 404 returned error can't find the container with id 3a8892bca91ed899eef810ec4a11f159ea06330fac5c45b32bc35b6660c1e5d1 Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.154206 3014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2a6a3b2ca08062d24afa4c01aaf9e4f.slice/crio-fc6276e2dbf92c74575ed8610796496c978a1f1fb425c725aa10cc307068febf WatchSource:0}: Error finding container fc6276e2dbf92c74575ed8610796496c978a1f1fb425c725aa10cc307068febf: Status 404 returned error can't find the container with id fc6276e2dbf92c74575ed8610796496c978a1f1fb425c725aa10cc307068febf Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.160044 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.188998 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.197518 3014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.202193 3014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4faaac70bf21c7d77dcb526af466bffa.slice/crio-12fdb397341e32e564dadf6746e073ff52d00e2e3280b576ef811edb898dec3f WatchSource:0}: Error finding container 12fdb397341e32e564dadf6746e073ff52d00e2e3280b576ef811edb898dec3f: Status 404 returned error can't find the container with id 12fdb397341e32e564dadf6746e073ff52d00e2e3280b576ef811edb898dec3f Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.205025 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.208676 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.208720 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.208729 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.208756 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.210831 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.217647 3014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a57a7fb1944b43a6bd11a349520d301.slice/crio-df958464b5b5ee076d634547d1e51e59ccbe3955c241e11a08233bbac58c1477 WatchSource:0}: Error finding container df958464b5b5ee076d634547d1e51e59ccbe3955c241e11a08233bbac58c1477: Status 404 returned error can't find the container with id df958464b5b5ee076d634547d1e51e59ccbe3955c241e11a08233bbac58c1477 Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.417309 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.417408 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.499820 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.543615 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.543723 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.598038 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.598127 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.671660 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"fc6276e2dbf92c74575ed8610796496c978a1f1fb425c725aa10cc307068febf"} Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.673056 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d3ae206906481b4831fd849b559269c8","Type":"ContainerStarted","Data":"3a8892bca91ed899eef810ec4a11f159ea06330fac5c45b32bc35b6660c1e5d1"} Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.676885 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"df958464b5b5ee076d634547d1e51e59ccbe3955c241e11a08233bbac58c1477"} Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.679006 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"12fdb397341e32e564dadf6746e073ff52d00e2e3280b576ef811edb898dec3f"} Mar 20 15:19:41 crc kubenswrapper[3014]: I0320 15:19:41.681221 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"9bb9c0c5911adbb22d1fba514a92ce2c6c598ec0034d171d89c98ff953c016e9"} Mar 20 15:19:41 crc kubenswrapper[3014]: W0320 15:19:41.747779 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.747885 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:41 crc kubenswrapper[3014]: E0320 15:19:41.908242 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="1.6s" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.011858 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.013257 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.013295 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.013307 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.013361 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:42 crc kubenswrapper[3014]: E0320 15:19:42.014959 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.499252 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.686008 3014 generic.go:334] "Generic (PLEG): container finished" podID="6a57a7fb1944b43a6bd11a349520d301" containerID="f98f0fb593ccd9f867cbdee35b7e650167c840d2b9dfc561a209fbcd9ea91ae4" exitCode=0 Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.686139 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerDied","Data":"f98f0fb593ccd9f867cbdee35b7e650167c840d2b9dfc561a209fbcd9ea91ae4"} Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.686215 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.687749 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.687810 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.687838 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.693638 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"39030b3d95fd996fdc4b705f8aad4581bcc8f9667a038e71b04e254c7317fc11"} Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.693687 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"3aaa717a20184ba7c678f2d92cd90adfa09d7a1e12805df8611ac4c25ee8dd45"} Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.696722 3014 generic.go:334] "Generic (PLEG): container finished" podID="a3f6a3e226d5c60ea73cb7fac85e9195" containerID="441dfd6c209dc44a52fad942b84fca1b39d64bc5d4b6f097e46fbc012dd75ebc" exitCode=0 Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.696786 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerDied","Data":"441dfd6c209dc44a52fad942b84fca1b39d64bc5d4b6f097e46fbc012dd75ebc"} Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.696866 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.698151 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.698219 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.698241 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.699471 3014 generic.go:334] "Generic (PLEG): container finished" podID="b2a6a3b2ca08062d24afa4c01aaf9e4f" containerID="1453dd0b8a8778b0785c93f5e2df1a064a6816feb78e4f5e3a541e260d0b64e0" exitCode=0 Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.699629 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerDied","Data":"1453dd0b8a8778b0785c93f5e2df1a064a6816feb78e4f5e3a541e260d0b64e0"} Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.699637 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.700740 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.701126 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.701224 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.701307 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.702677 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.702702 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.702713 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.703041 3014 generic.go:334] "Generic (PLEG): container finished" podID="d3ae206906481b4831fd849b559269c8" containerID="e0b214fe44763fd2ebb8e12552c2eb79c9248c388c5c30a3919fd4493e615c9b" exitCode=0 Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.703146 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.703199 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d3ae206906481b4831fd849b559269c8","Type":"ContainerDied","Data":"e0b214fe44763fd2ebb8e12552c2eb79c9248c388c5c30a3919fd4493e615c9b"} Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.704225 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.704260 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:42 crc kubenswrapper[3014]: I0320 15:19:42.704279 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:43 crc kubenswrapper[3014]: W0320 15:19:43.156295 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:43 crc kubenswrapper[3014]: E0320 15:19:43.156440 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.500559 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:43 crc kubenswrapper[3014]: E0320 15:19:43.513410 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="3.2s" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.615727 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.616811 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.616842 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.616854 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.616878 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:43 crc kubenswrapper[3014]: E0320 15:19:43.617924 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.705934 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d3ae206906481b4831fd849b559269c8","Type":"ContainerStarted","Data":"2047924a522d63a3ba3d916524885c69e651250f88c9fd436961fcbcd67e982d"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.706258 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.707124 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.707230 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.707302 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.713671 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"8559735f97f15f2b3c1ed4e54e8005f0256c7e127ce88637d6fb5319fa6db925"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.713806 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"40321b683bf0b5bcf8caaa9bd0ad5a7307bf86a6e72763ef0bfb438eb247ff05"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.713869 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"caeb5dbf961ea501f35c60e7de30859b6296a3ceb00ef88b05c7a61a2898d3e0"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.713878 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.714777 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.714836 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.714849 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.716499 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"3fda780eb537645614085cc6165e9c6b095a6da1f88777f2ed1491cd9ad6ddfa"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.716524 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"7df91f8788913b8796b60466191f221ec776c445d78c50d548691d24a4120c1a"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.716646 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.717563 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.717586 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.717598 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.719312 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"0b034dcb39057ef0a45d1d42ce5494d922092951227f6fba688273d36fbc2a4a"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.719420 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"f8e793dfc90ffd7927443e4e730985353106506c7b315dc6bcdf59a724d4c3b3"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.719509 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"f777805e2bf5eb31d37ed7385eeba522008d0b1cb5ae7b37b058914758ecee08"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.721209 3014 generic.go:334] "Generic (PLEG): container finished" podID="b2a6a3b2ca08062d24afa4c01aaf9e4f" containerID="860df4b24c551cd54cf2a4cc5250b72d7fa6ce71a821305934f7fab69781f1a5" exitCode=0 Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.721295 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerDied","Data":"860df4b24c551cd54cf2a4cc5250b72d7fa6ce71a821305934f7fab69781f1a5"} Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.721298 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.721937 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.721959 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:43 crc kubenswrapper[3014]: I0320 15:19:43.721968 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:44 crc kubenswrapper[3014]: W0320 15:19:44.012827 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: E0320 15:19:44.012887 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: W0320 15:19:44.331405 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: E0320 15:19:44.331484 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: W0320 15:19:44.444607 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: E0320 15:19:44.445004 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.499599 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.727080 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"9be0b348028c6764796fdf1316f79b7560b3f85430bfc7da213462db2c536630"} Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.727132 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"61aa1ebb4c39f862c61dbac762b57eafec50aaa058750afcc5540b400699f30e"} Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.727204 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.728475 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.728562 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.728577 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730380 3014 generic.go:334] "Generic (PLEG): container finished" podID="b2a6a3b2ca08062d24afa4c01aaf9e4f" containerID="cdc21f5b6342bfb12aee24377fabbe4f62a5b24eb0a8e173861bd5d99f33b3e4" exitCode=0 Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730482 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730507 3014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730552 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730577 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerDied","Data":"cdc21f5b6342bfb12aee24377fabbe4f62a5b24eb0a8e173861bd5d99f33b3e4"} Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730661 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.730773 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.731927 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.731958 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.731968 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.732666 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.732697 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.732713 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.732718 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.732776 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.732805 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.733042 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.733161 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:44 crc kubenswrapper[3014]: I0320 15:19:44.733271 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.347499 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.499584 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.738963 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"a35828f8826fc17ad6487ee26f2b5caff7ced7e2750aabb9aafab1e6e3f10bd1"} Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.738982 3014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.739006 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"f8464aa8254d191065c56bf7f99b02c8a0e4d6702969c278ab75ca85cff5fd64"} Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.739019 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"8cc9abb3b27829024f9e25911853304ae07b800b67ff7ed4e5d774314e38f0ec"} Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.739030 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.739053 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.740664 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.740694 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.740665 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.740705 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.740719 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:45 crc kubenswrapper[3014]: I0320 15:19:45.740730 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.393619 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.393748 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.393957 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.394193 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.395348 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.395381 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.395394 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.406222 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.499273 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:46 crc kubenswrapper[3014]: W0320 15:19:46.637524 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:46 crc kubenswrapper[3014]: E0320 15:19:46.637633 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:46 crc kubenswrapper[3014]: E0320 15:19:46.716079 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="6.4s" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.745996 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"be90fd914b7e1f3c6193351365aef7364b1d8162d708b41c918e60565fdbec21"} Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.746008 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.746025 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.747267 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.747302 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.747319 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.747491 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.747517 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.747532 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.818451 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.819827 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.819871 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.819887 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:46 crc kubenswrapper[3014]: I0320 15:19:46.819916 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:46 crc kubenswrapper[3014]: E0320 15:19:46.821499 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.500060 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.608578 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.608720 3014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.608755 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.609841 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.609927 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.609946 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:47 crc kubenswrapper[3014]: W0320 15:19:47.696558 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:47 crc kubenswrapper[3014]: E0320 15:19:47.696664 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.748052 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.748192 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.749660 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.749710 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.749732 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.749868 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.749933 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:47 crc kubenswrapper[3014]: I0320 15:19:47.749958 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:48 crc kubenswrapper[3014]: W0320 15:19:48.287360 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:48 crc kubenswrapper[3014]: E0320 15:19:48.287657 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.500165 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.722391 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.722637 3014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.722707 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.724705 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.725260 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:48 crc kubenswrapper[3014]: I0320 15:19:48.725283 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:49 crc kubenswrapper[3014]: E0320 15:19:49.004933 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:19:49 crc kubenswrapper[3014]: W0320 15:19:49.168174 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:49 crc kubenswrapper[3014]: E0320 15:19:49.168276 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.343898 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.344270 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.346233 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.346300 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.346401 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.437135 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.437561 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.439303 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.439467 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.439519 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:49 crc kubenswrapper[3014]: I0320 15:19:49.500000 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:50 crc kubenswrapper[3014]: I0320 15:19:50.499972 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:50 crc kubenswrapper[3014]: E0320 15:19:50.783686 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:19:51 crc kubenswrapper[3014]: I0320 15:19:51.499413 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:52 crc kubenswrapper[3014]: I0320 15:19:52.499070 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:53 crc kubenswrapper[3014]: E0320 15:19:53.117713 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.222062 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.224230 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.224341 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.224367 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.224409 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:19:53 crc kubenswrapper[3014]: E0320 15:19:53.225857 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.499717 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.499727 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.500493 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.501688 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.501748 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.501773 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.505846 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.768218 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.769266 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.769355 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:53 crc kubenswrapper[3014]: I0320 15:19:53.769381 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:54 crc kubenswrapper[3014]: I0320 15:19:54.499533 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.500296 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.570934 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.571120 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.572206 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.572286 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.572318 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:55 crc kubenswrapper[3014]: W0320 15:19:55.626820 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:55 crc kubenswrapper[3014]: E0320 15:19:55.626925 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.630575 3014 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.630756 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.639810 3014 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.639896 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.667016 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.776293 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_a3f6a3e226d5c60ea73cb7fac85e9195/kube-apiserver-check-endpoints/1.log" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.778691 3014 generic.go:334] "Generic (PLEG): container finished" podID="a3f6a3e226d5c60ea73cb7fac85e9195" containerID="9be0b348028c6764796fdf1316f79b7560b3f85430bfc7da213462db2c536630" exitCode=255 Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.778818 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.778811 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerDied","Data":"9be0b348028c6764796fdf1316f79b7560b3f85430bfc7da213462db2c536630"} Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.779137 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.779743 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.779803 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.779816 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.780503 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.780532 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.780543 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.781667 3014 scope.go:117] "RemoveContainer" containerID="9be0b348028c6764796fdf1316f79b7560b3f85430bfc7da213462db2c536630" Mar 20 15:19:55 crc kubenswrapper[3014]: I0320 15:19:55.812702 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Mar 20 15:19:55 crc kubenswrapper[3014]: W0320 15:19:55.823747 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:55 crc kubenswrapper[3014]: E0320 15:19:55.823820 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.499103 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.500141 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.500210 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.784109 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_a3f6a3e226d5c60ea73cb7fac85e9195/kube-apiserver-check-endpoints/1.log" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.785542 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"16f4cdbbbac26791f83d90f300a9d6945f8fa6a3b83d2c72b36112dd5c36b958"} Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.785635 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.785637 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.786396 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.786421 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.786430 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.787522 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.787571 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:56 crc kubenswrapper[3014]: I0320 15:19:56.787582 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:57 crc kubenswrapper[3014]: I0320 15:19:57.498895 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:57 crc kubenswrapper[3014]: W0320 15:19:57.544653 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:57 crc kubenswrapper[3014]: E0320 15:19:57.544717 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.500133 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.729582 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.729822 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.730097 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.731497 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.731564 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.731576 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.741266 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.791313 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.792779 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.792857 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:58 crc kubenswrapper[3014]: I0320 15:19:58.792875 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:59 crc kubenswrapper[3014]: E0320 15:19:59.007392 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:19:59 crc kubenswrapper[3014]: I0320 15:19:59.500017 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:59 crc kubenswrapper[3014]: I0320 15:19:59.794468 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:19:59 crc kubenswrapper[3014]: I0320 15:19:59.795916 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:19:59 crc kubenswrapper[3014]: I0320 15:19:59.795974 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:19:59 crc kubenswrapper[3014]: I0320 15:19:59.795987 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:19:59 crc kubenswrapper[3014]: W0320 15:19:59.889609 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:19:59 crc kubenswrapper[3014]: E0320 15:19:59.889749 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:00 crc kubenswrapper[3014]: E0320 15:20:00.120388 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:00 crc kubenswrapper[3014]: I0320 15:20:00.226925 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:00 crc kubenswrapper[3014]: I0320 15:20:00.228558 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:00 crc kubenswrapper[3014]: I0320 15:20:00.228626 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:00 crc kubenswrapper[3014]: I0320 15:20:00.228643 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:00 crc kubenswrapper[3014]: I0320 15:20:00.228682 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:00 crc kubenswrapper[3014]: E0320 15:20:00.230707 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:00 crc kubenswrapper[3014]: I0320 15:20:00.499117 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:00 crc kubenswrapper[3014]: E0320 15:20:00.784804 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:20:01 crc kubenswrapper[3014]: I0320 15:20:01.498984 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:02 crc kubenswrapper[3014]: I0320 15:20:02.499401 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:03 crc kubenswrapper[3014]: I0320 15:20:03.500408 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:04 crc kubenswrapper[3014]: I0320 15:20:04.499745 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:05 crc kubenswrapper[3014]: I0320 15:20:05.499441 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:06 crc kubenswrapper[3014]: I0320 15:20:06.499400 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:06 crc kubenswrapper[3014]: I0320 15:20:06.499874 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:20:06 crc kubenswrapper[3014]: I0320 15:20:06.500525 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:20:07 crc kubenswrapper[3014]: E0320 15:20:07.123130 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:07 crc kubenswrapper[3014]: I0320 15:20:07.231937 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:07 crc kubenswrapper[3014]: I0320 15:20:07.234078 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:07 crc kubenswrapper[3014]: I0320 15:20:07.234294 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:07 crc kubenswrapper[3014]: I0320 15:20:07.234489 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:07 crc kubenswrapper[3014]: I0320 15:20:07.234667 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:07 crc kubenswrapper[3014]: E0320 15:20:07.236563 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:07 crc kubenswrapper[3014]: I0320 15:20:07.499268 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:08 crc kubenswrapper[3014]: I0320 15:20:08.499541 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:09 crc kubenswrapper[3014]: E0320 15:20:09.010068 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:20:09 crc kubenswrapper[3014]: I0320 15:20:09.447010 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:20:09 crc kubenswrapper[3014]: I0320 15:20:09.447315 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:09 crc kubenswrapper[3014]: I0320 15:20:09.449102 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:09 crc kubenswrapper[3014]: I0320 15:20:09.449166 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:09 crc kubenswrapper[3014]: I0320 15:20:09.449186 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:09 crc kubenswrapper[3014]: I0320 15:20:09.499664 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:10 crc kubenswrapper[3014]: I0320 15:20:10.498956 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:10 crc kubenswrapper[3014]: E0320 15:20:10.785968 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:20:11 crc kubenswrapper[3014]: I0320 15:20:11.500288 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:12 crc kubenswrapper[3014]: I0320 15:20:12.500406 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.499046 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.954542 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:58508->192.168.126.11:10357: read: connection reset by peer" start-of-body= Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.954656 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:58508->192.168.126.11:10357: read: connection reset by peer" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.954716 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.954890 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.956552 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.956788 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.957150 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.961010 3014 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"39030b3d95fd996fdc4b705f8aad4581bcc8f9667a038e71b04e254c7317fc11"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 20 15:20:13 crc kubenswrapper[3014]: I0320 15:20:13.961664 3014 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" containerID="cri-o://39030b3d95fd996fdc4b705f8aad4581bcc8f9667a038e71b04e254c7317fc11" gracePeriod=30 Mar 20 15:20:14 crc kubenswrapper[3014]: E0320 15:20:14.124750 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.237105 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.238358 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.238386 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.238398 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.238423 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:14 crc kubenswrapper[3014]: E0320 15:20:14.239929 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.499978 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.842448 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/1.log" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.843018 3014 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="39030b3d95fd996fdc4b705f8aad4581bcc8f9667a038e71b04e254c7317fc11" exitCode=255 Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.843073 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"39030b3d95fd996fdc4b705f8aad4581bcc8f9667a038e71b04e254c7317fc11"} Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.843104 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"3a24a46e04162d08ebee39ec0db0c7bd7935642af1258d5ad545d9ea1e64e63b"} Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.843216 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.845091 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.845128 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:14 crc kubenswrapper[3014]: I0320 15:20:14.845144 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:15 crc kubenswrapper[3014]: W0320 15:20:15.239705 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:15 crc kubenswrapper[3014]: E0320 15:20:15.239824 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:15 crc kubenswrapper[3014]: I0320 15:20:15.347908 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:20:15 crc kubenswrapper[3014]: I0320 15:20:15.499614 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:15 crc kubenswrapper[3014]: I0320 15:20:15.846092 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:15 crc kubenswrapper[3014]: I0320 15:20:15.847949 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:15 crc kubenswrapper[3014]: I0320 15:20:15.848099 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:15 crc kubenswrapper[3014]: I0320 15:20:15.848194 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:16 crc kubenswrapper[3014]: I0320 15:20:16.499391 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:16 crc kubenswrapper[3014]: W0320 15:20:16.597438 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:16 crc kubenswrapper[3014]: E0320 15:20:16.597509 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:17 crc kubenswrapper[3014]: I0320 15:20:17.500284 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:18 crc kubenswrapper[3014]: W0320 15:20:18.270377 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:18 crc kubenswrapper[3014]: E0320 15:20:18.271390 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:18 crc kubenswrapper[3014]: I0320 15:20:18.500660 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:19 crc kubenswrapper[3014]: E0320 15:20:19.013163 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:20:19 crc kubenswrapper[3014]: W0320 15:20:19.377140 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:19 crc kubenswrapper[3014]: E0320 15:20:19.377248 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:19 crc kubenswrapper[3014]: I0320 15:20:19.499459 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:20 crc kubenswrapper[3014]: I0320 15:20:20.499170 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:20 crc kubenswrapper[3014]: E0320 15:20:20.786915 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:20:21 crc kubenswrapper[3014]: E0320 15:20:21.127284 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:21 crc kubenswrapper[3014]: I0320 15:20:21.240155 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:21 crc kubenswrapper[3014]: I0320 15:20:21.241498 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:21 crc kubenswrapper[3014]: I0320 15:20:21.241532 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:21 crc kubenswrapper[3014]: I0320 15:20:21.241541 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:21 crc kubenswrapper[3014]: I0320 15:20:21.241563 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:21 crc kubenswrapper[3014]: E0320 15:20:21.243092 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:21 crc kubenswrapper[3014]: I0320 15:20:21.500095 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:22 crc kubenswrapper[3014]: I0320 15:20:22.499686 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:23 crc kubenswrapper[3014]: I0320 15:20:23.499645 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:20:23 crc kubenswrapper[3014]: I0320 15:20:23.499705 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:23 crc kubenswrapper[3014]: I0320 15:20:23.500856 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:23 crc kubenswrapper[3014]: I0320 15:20:23.502356 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:23 crc kubenswrapper[3014]: I0320 15:20:23.502568 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:23 crc kubenswrapper[3014]: I0320 15:20:23.502719 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:24 crc kubenswrapper[3014]: I0320 15:20:24.499224 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:25 crc kubenswrapper[3014]: I0320 15:20:25.498705 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:26 crc kubenswrapper[3014]: I0320 15:20:26.499763 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:20:26 crc kubenswrapper[3014]: I0320 15:20:26.500316 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:20:26 crc kubenswrapper[3014]: I0320 15:20:26.500765 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:27 crc kubenswrapper[3014]: I0320 15:20:27.499295 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:28 crc kubenswrapper[3014]: E0320 15:20:28.129499 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:28 crc kubenswrapper[3014]: I0320 15:20:28.243638 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:28 crc kubenswrapper[3014]: I0320 15:20:28.245497 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:28 crc kubenswrapper[3014]: I0320 15:20:28.245567 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:28 crc kubenswrapper[3014]: I0320 15:20:28.245591 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:28 crc kubenswrapper[3014]: I0320 15:20:28.245633 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:28 crc kubenswrapper[3014]: E0320 15:20:28.247389 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:28 crc kubenswrapper[3014]: I0320 15:20:28.499893 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:29 crc kubenswrapper[3014]: E0320 15:20:29.015844 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:20:29 crc kubenswrapper[3014]: I0320 15:20:29.500207 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:30 crc kubenswrapper[3014]: I0320 15:20:30.498755 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:30 crc kubenswrapper[3014]: E0320 15:20:30.787547 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:20:31 crc kubenswrapper[3014]: I0320 15:20:31.499602 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:32 crc kubenswrapper[3014]: I0320 15:20:32.499296 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:33 crc kubenswrapper[3014]: I0320 15:20:33.499562 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:34 crc kubenswrapper[3014]: I0320 15:20:34.498908 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:35 crc kubenswrapper[3014]: E0320 15:20:35.131266 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:35 crc kubenswrapper[3014]: I0320 15:20:35.248583 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:35 crc kubenswrapper[3014]: I0320 15:20:35.250237 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:35 crc kubenswrapper[3014]: I0320 15:20:35.250395 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:35 crc kubenswrapper[3014]: I0320 15:20:35.250511 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:35 crc kubenswrapper[3014]: I0320 15:20:35.250621 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:35 crc kubenswrapper[3014]: E0320 15:20:35.252416 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:35 crc kubenswrapper[3014]: I0320 15:20:35.499478 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.400429 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.400540 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.402369 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.402431 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.402451 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.499091 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.500100 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:20:36 crc kubenswrapper[3014]: I0320 15:20:36.500192 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:20:37 crc kubenswrapper[3014]: I0320 15:20:37.499593 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:38 crc kubenswrapper[3014]: I0320 15:20:38.500162 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:39 crc kubenswrapper[3014]: E0320 15:20:39.018176 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:20:39 crc kubenswrapper[3014]: I0320 15:20:39.499527 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:40 crc kubenswrapper[3014]: I0320 15:20:40.498572 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:20:40 crc kubenswrapper[3014]: I0320 15:20:40.498656 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:20:40 crc kubenswrapper[3014]: I0320 15:20:40.498720 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:20:40 crc kubenswrapper[3014]: I0320 15:20:40.498771 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:20:40 crc kubenswrapper[3014]: I0320 15:20:40.498815 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:20:40 crc kubenswrapper[3014]: I0320 15:20:40.499976 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:40 crc kubenswrapper[3014]: E0320 15:20:40.788219 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:20:41 crc kubenswrapper[3014]: I0320 15:20:41.499968 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:41 crc kubenswrapper[3014]: W0320 15:20:41.937515 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:41 crc kubenswrapper[3014]: E0320 15:20:41.937617 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:42 crc kubenswrapper[3014]: E0320 15:20:42.133366 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:42 crc kubenswrapper[3014]: I0320 15:20:42.252909 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:42 crc kubenswrapper[3014]: I0320 15:20:42.254480 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:42 crc kubenswrapper[3014]: I0320 15:20:42.254547 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:42 crc kubenswrapper[3014]: I0320 15:20:42.254569 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:42 crc kubenswrapper[3014]: I0320 15:20:42.254606 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:42 crc kubenswrapper[3014]: E0320 15:20:42.256546 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:42 crc kubenswrapper[3014]: I0320 15:20:42.499558 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:43 crc kubenswrapper[3014]: I0320 15:20:43.499155 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.499537 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.660864 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:36742->192.168.126.11:10357: read: connection reset by peer" start-of-body= Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.661030 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:36742->192.168.126.11:10357: read: connection reset by peer" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.661122 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.661353 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.662885 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.662943 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.662955 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.665242 3014 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"3a24a46e04162d08ebee39ec0db0c7bd7935642af1258d5ad545d9ea1e64e63b"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.665706 3014 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" containerID="cri-o://3a24a46e04162d08ebee39ec0db0c7bd7935642af1258d5ad545d9ea1e64e63b" gracePeriod=30 Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.930487 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/2.log" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.932855 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/1.log" Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.933911 3014 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="3a24a46e04162d08ebee39ec0db0c7bd7935642af1258d5ad545d9ea1e64e63b" exitCode=255 Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.933974 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"3a24a46e04162d08ebee39ec0db0c7bd7935642af1258d5ad545d9ea1e64e63b"} Mar 20 15:20:44 crc kubenswrapper[3014]: I0320 15:20:44.934041 3014 scope.go:117] "RemoveContainer" containerID="39030b3d95fd996fdc4b705f8aad4581bcc8f9667a038e71b04e254c7317fc11" Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.499915 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.940965 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/2.log" Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.942753 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"dfeb2836b419a932633f8eed204a0a1a8d48a7e6f8aefbfec15d62b25b20cdfb"} Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.942864 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.944147 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.944199 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:45 crc kubenswrapper[3014]: I0320 15:20:45.944224 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:46 crc kubenswrapper[3014]: I0320 15:20:46.500247 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:46 crc kubenswrapper[3014]: I0320 15:20:46.944700 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:46 crc kubenswrapper[3014]: I0320 15:20:46.945630 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:46 crc kubenswrapper[3014]: I0320 15:20:46.945688 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:46 crc kubenswrapper[3014]: I0320 15:20:46.945710 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:47 crc kubenswrapper[3014]: I0320 15:20:47.499468 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:47 crc kubenswrapper[3014]: I0320 15:20:47.666398 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:47 crc kubenswrapper[3014]: I0320 15:20:47.668218 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:47 crc kubenswrapper[3014]: I0320 15:20:47.668288 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:47 crc kubenswrapper[3014]: I0320 15:20:47.668318 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:48 crc kubenswrapper[3014]: I0320 15:20:48.500018 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:49 crc kubenswrapper[3014]: E0320 15:20:49.020062 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:20:49 crc kubenswrapper[3014]: E0320 15:20:49.135712 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:49 crc kubenswrapper[3014]: I0320 15:20:49.257596 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:49 crc kubenswrapper[3014]: I0320 15:20:49.258846 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:49 crc kubenswrapper[3014]: I0320 15:20:49.258880 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:49 crc kubenswrapper[3014]: I0320 15:20:49.258892 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:49 crc kubenswrapper[3014]: I0320 15:20:49.258914 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:49 crc kubenswrapper[3014]: E0320 15:20:49.260224 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:49 crc kubenswrapper[3014]: I0320 15:20:49.499114 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:49 crc kubenswrapper[3014]: W0320 15:20:49.906084 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:49 crc kubenswrapper[3014]: E0320 15:20:49.906169 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:50 crc kubenswrapper[3014]: W0320 15:20:50.413692 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:50 crc kubenswrapper[3014]: E0320 15:20:50.414484 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:50 crc kubenswrapper[3014]: I0320 15:20:50.500234 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:50 crc kubenswrapper[3014]: E0320 15:20:50.789111 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:20:51 crc kubenswrapper[3014]: I0320 15:20:51.500247 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:52 crc kubenswrapper[3014]: I0320 15:20:52.500023 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:53 crc kubenswrapper[3014]: I0320 15:20:53.498869 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:53 crc kubenswrapper[3014]: I0320 15:20:53.500000 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:20:53 crc kubenswrapper[3014]: I0320 15:20:53.500240 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:53 crc kubenswrapper[3014]: I0320 15:20:53.502824 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:53 crc kubenswrapper[3014]: I0320 15:20:53.502911 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:53 crc kubenswrapper[3014]: I0320 15:20:53.502934 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:54 crc kubenswrapper[3014]: I0320 15:20:54.500091 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:55 crc kubenswrapper[3014]: I0320 15:20:55.348478 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:20:55 crc kubenswrapper[3014]: I0320 15:20:55.348648 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:55 crc kubenswrapper[3014]: I0320 15:20:55.349592 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:55 crc kubenswrapper[3014]: I0320 15:20:55.349635 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:55 crc kubenswrapper[3014]: I0320 15:20:55.349648 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:55 crc kubenswrapper[3014]: I0320 15:20:55.499960 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:56 crc kubenswrapper[3014]: E0320 15:20:56.138012 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.261222 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.262796 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.262856 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.262874 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.262908 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:20:56 crc kubenswrapper[3014]: E0320 15:20:56.264413 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.499564 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.500574 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:20:56 crc kubenswrapper[3014]: I0320 15:20:56.500666 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:20:57 crc kubenswrapper[3014]: I0320 15:20:57.500063 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:58 crc kubenswrapper[3014]: I0320 15:20:58.499375 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:20:59 crc kubenswrapper[3014]: E0320 15:20:59.022968 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:20:59 crc kubenswrapper[3014]: I0320 15:20:59.499939 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:00 crc kubenswrapper[3014]: I0320 15:21:00.499686 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:00 crc kubenswrapper[3014]: E0320 15:21:00.789294 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:21:01 crc kubenswrapper[3014]: I0320 15:21:01.499860 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:02 crc kubenswrapper[3014]: I0320 15:21:02.498888 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:03 crc kubenswrapper[3014]: E0320 15:21:03.140263 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:03 crc kubenswrapper[3014]: I0320 15:21:03.264788 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:03 crc kubenswrapper[3014]: I0320 15:21:03.266570 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:03 crc kubenswrapper[3014]: I0320 15:21:03.266642 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:03 crc kubenswrapper[3014]: I0320 15:21:03.266668 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:03 crc kubenswrapper[3014]: I0320 15:21:03.266714 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:03 crc kubenswrapper[3014]: E0320 15:21:03.268281 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:03 crc kubenswrapper[3014]: I0320 15:21:03.499005 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:03 crc kubenswrapper[3014]: W0320 15:21:03.693652 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:03 crc kubenswrapper[3014]: E0320 15:21:03.693754 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:04 crc kubenswrapper[3014]: I0320 15:21:04.499190 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:05 crc kubenswrapper[3014]: I0320 15:21:05.500303 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:06 crc kubenswrapper[3014]: I0320 15:21:06.499995 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:06 crc kubenswrapper[3014]: I0320 15:21:06.500107 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:21:06 crc kubenswrapper[3014]: I0320 15:21:06.500206 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:21:07 crc kubenswrapper[3014]: I0320 15:21:07.499087 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:08 crc kubenswrapper[3014]: I0320 15:21:08.499744 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:09 crc kubenswrapper[3014]: E0320 15:21:09.025798 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:09 crc kubenswrapper[3014]: I0320 15:21:09.500012 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:10 crc kubenswrapper[3014]: E0320 15:21:10.142819 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:10 crc kubenswrapper[3014]: I0320 15:21:10.268995 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:10 crc kubenswrapper[3014]: I0320 15:21:10.270524 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:10 crc kubenswrapper[3014]: I0320 15:21:10.270587 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:10 crc kubenswrapper[3014]: I0320 15:21:10.270608 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:10 crc kubenswrapper[3014]: I0320 15:21:10.270649 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:10 crc kubenswrapper[3014]: E0320 15:21:10.277097 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:10 crc kubenswrapper[3014]: I0320 15:21:10.499889 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:10 crc kubenswrapper[3014]: E0320 15:21:10.789925 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:21:11 crc kubenswrapper[3014]: I0320 15:21:11.499295 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:12 crc kubenswrapper[3014]: I0320 15:21:12.499766 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:13 crc kubenswrapper[3014]: I0320 15:21:13.500133 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:13 crc kubenswrapper[3014]: I0320 15:21:13.666722 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:13 crc kubenswrapper[3014]: I0320 15:21:13.668098 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:13 crc kubenswrapper[3014]: I0320 15:21:13.668159 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:13 crc kubenswrapper[3014]: I0320 15:21:13.668170 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:14 crc kubenswrapper[3014]: I0320 15:21:14.499782 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.499760 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.578916 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:51570->192.168.126.11:10357: read: connection reset by peer" start-of-body= Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.579043 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:51570->192.168.126.11:10357: read: connection reset by peer" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.579130 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.579387 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.581761 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.581843 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.581865 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.585656 3014 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"dfeb2836b419a932633f8eed204a0a1a8d48a7e6f8aefbfec15d62b25b20cdfb"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 20 15:21:15 crc kubenswrapper[3014]: I0320 15:21:15.586238 3014 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" containerID="cri-o://dfeb2836b419a932633f8eed204a0a1a8d48a7e6f8aefbfec15d62b25b20cdfb" gracePeriod=30 Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.041583 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/3.log" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.042681 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/2.log" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.044775 3014 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="dfeb2836b419a932633f8eed204a0a1a8d48a7e6f8aefbfec15d62b25b20cdfb" exitCode=255 Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.044840 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"dfeb2836b419a932633f8eed204a0a1a8d48a7e6f8aefbfec15d62b25b20cdfb"} Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.044920 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"e1f65d88ca5f0789b0758c62b7911deba1239f751f4138bb42e2e1c007aa7d2b"} Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.044964 3014 scope.go:117] "RemoveContainer" containerID="3a24a46e04162d08ebee39ec0db0c7bd7935642af1258d5ad545d9ea1e64e63b" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.045064 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.046246 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.046310 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.046365 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:16 crc kubenswrapper[3014]: I0320 15:21:16.499412 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.049306 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/3.log" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.050634 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.051289 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.051368 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.051388 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:17 crc kubenswrapper[3014]: E0320 15:21:17.145513 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.278022 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.279664 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.279718 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.279739 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.279776 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:17 crc kubenswrapper[3014]: E0320 15:21:17.281401 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:17 crc kubenswrapper[3014]: I0320 15:21:17.499483 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:18 crc kubenswrapper[3014]: I0320 15:21:18.499936 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:19 crc kubenswrapper[3014]: E0320 15:21:19.027679 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:19 crc kubenswrapper[3014]: I0320 15:21:19.499076 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:20 crc kubenswrapper[3014]: I0320 15:21:20.499096 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:20 crc kubenswrapper[3014]: E0320 15:21:20.791244 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:21:21 crc kubenswrapper[3014]: I0320 15:21:21.499929 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:22 crc kubenswrapper[3014]: I0320 15:21:22.499649 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:23 crc kubenswrapper[3014]: I0320 15:21:23.499868 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:21:23 crc kubenswrapper[3014]: I0320 15:21:23.500198 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:23 crc kubenswrapper[3014]: I0320 15:21:23.501883 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:23 crc kubenswrapper[3014]: I0320 15:21:23.501945 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:23 crc kubenswrapper[3014]: I0320 15:21:23.501965 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:23 crc kubenswrapper[3014]: I0320 15:21:23.503341 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:24 crc kubenswrapper[3014]: E0320 15:21:24.148122 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:24 crc kubenswrapper[3014]: I0320 15:21:24.282372 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:24 crc kubenswrapper[3014]: I0320 15:21:24.283810 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:24 crc kubenswrapper[3014]: I0320 15:21:24.283864 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:24 crc kubenswrapper[3014]: I0320 15:21:24.283880 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:24 crc kubenswrapper[3014]: I0320 15:21:24.283920 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:24 crc kubenswrapper[3014]: E0320 15:21:24.285377 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:24 crc kubenswrapper[3014]: I0320 15:21:24.499645 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:25 crc kubenswrapper[3014]: I0320 15:21:25.348163 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:21:25 crc kubenswrapper[3014]: I0320 15:21:25.348418 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:25 crc kubenswrapper[3014]: I0320 15:21:25.349989 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:25 crc kubenswrapper[3014]: I0320 15:21:25.350049 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:25 crc kubenswrapper[3014]: I0320 15:21:25.350070 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:25 crc kubenswrapper[3014]: I0320 15:21:25.499439 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:26 crc kubenswrapper[3014]: I0320 15:21:26.499317 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:26 crc kubenswrapper[3014]: I0320 15:21:26.500302 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:21:26 crc kubenswrapper[3014]: I0320 15:21:26.500397 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:21:27 crc kubenswrapper[3014]: I0320 15:21:27.500116 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:27 crc kubenswrapper[3014]: I0320 15:21:27.666262 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:27 crc kubenswrapper[3014]: I0320 15:21:27.667698 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:27 crc kubenswrapper[3014]: I0320 15:21:27.667762 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:27 crc kubenswrapper[3014]: I0320 15:21:27.667789 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:28 crc kubenswrapper[3014]: I0320 15:21:28.499798 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:29 crc kubenswrapper[3014]: E0320 15:21:29.030461 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:29 crc kubenswrapper[3014]: E0320 15:21:29.030570 3014 event.go:294] "Unable to write event (retry limit exceeded!)" event="&Event{ObjectMeta:{crc.189e95c5e9c67485 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,LastTimestamp:2026-03-20 15:19:40.493747333 +0000 UTC m=+0.934747685,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:29 crc kubenswrapper[3014]: E0320 15:21:29.032076 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:29 crc kubenswrapper[3014]: I0320 15:21:29.501261 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:30 crc kubenswrapper[3014]: I0320 15:21:30.500203 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:30 crc kubenswrapper[3014]: E0320 15:21:30.792093 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:21:31 crc kubenswrapper[3014]: E0320 15:21:31.150727 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:31 crc kubenswrapper[3014]: I0320 15:21:31.286281 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:31 crc kubenswrapper[3014]: I0320 15:21:31.288485 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:31 crc kubenswrapper[3014]: I0320 15:21:31.288551 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:31 crc kubenswrapper[3014]: I0320 15:21:31.288575 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:31 crc kubenswrapper[3014]: I0320 15:21:31.288619 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:31 crc kubenswrapper[3014]: E0320 15:21:31.290315 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:31 crc kubenswrapper[3014]: I0320 15:21:31.500279 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:31 crc kubenswrapper[3014]: W0320 15:21:31.570171 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:31 crc kubenswrapper[3014]: E0320 15:21:31.570264 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:32 crc kubenswrapper[3014]: I0320 15:21:32.499173 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:32 crc kubenswrapper[3014]: E0320 15:21:32.925746 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:33 crc kubenswrapper[3014]: I0320 15:21:33.499548 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:34 crc kubenswrapper[3014]: I0320 15:21:34.499579 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:35 crc kubenswrapper[3014]: I0320 15:21:35.499827 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:36 crc kubenswrapper[3014]: I0320 15:21:36.500022 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:21:36 crc kubenswrapper[3014]: I0320 15:21:36.500081 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:36 crc kubenswrapper[3014]: I0320 15:21:36.500124 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:21:37 crc kubenswrapper[3014]: I0320 15:21:37.499275 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:37 crc kubenswrapper[3014]: I0320 15:21:37.666967 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:37 crc kubenswrapper[3014]: I0320 15:21:37.668950 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:37 crc kubenswrapper[3014]: I0320 15:21:37.669123 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:37 crc kubenswrapper[3014]: I0320 15:21:37.669234 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:38 crc kubenswrapper[3014]: E0320 15:21:38.153014 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:38 crc kubenswrapper[3014]: I0320 15:21:38.291289 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:38 crc kubenswrapper[3014]: I0320 15:21:38.292767 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:38 crc kubenswrapper[3014]: I0320 15:21:38.292887 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:38 crc kubenswrapper[3014]: I0320 15:21:38.292966 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:38 crc kubenswrapper[3014]: I0320 15:21:38.293064 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:38 crc kubenswrapper[3014]: E0320 15:21:38.294472 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:38 crc kubenswrapper[3014]: I0320 15:21:38.499611 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:39 crc kubenswrapper[3014]: I0320 15:21:39.499495 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:39 crc kubenswrapper[3014]: W0320 15:21:39.696554 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:39 crc kubenswrapper[3014]: E0320 15:21:39.696628 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:40 crc kubenswrapper[3014]: I0320 15:21:40.498977 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:21:40 crc kubenswrapper[3014]: I0320 15:21:40.499033 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:21:40 crc kubenswrapper[3014]: I0320 15:21:40.499077 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:21:40 crc kubenswrapper[3014]: I0320 15:21:40.499113 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:21:40 crc kubenswrapper[3014]: I0320 15:21:40.499142 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:21:40 crc kubenswrapper[3014]: I0320 15:21:40.500076 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:40 crc kubenswrapper[3014]: E0320 15:21:40.793041 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:21:41 crc kubenswrapper[3014]: I0320 15:21:41.499434 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:41 crc kubenswrapper[3014]: W0320 15:21:41.653870 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:41 crc kubenswrapper[3014]: E0320 15:21:41.654787 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:42 crc kubenswrapper[3014]: I0320 15:21:42.500450 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:42 crc kubenswrapper[3014]: E0320 15:21:42.927850 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:43 crc kubenswrapper[3014]: I0320 15:21:43.499553 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:44 crc kubenswrapper[3014]: I0320 15:21:44.500280 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:45 crc kubenswrapper[3014]: E0320 15:21:45.155902 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:45 crc kubenswrapper[3014]: I0320 15:21:45.294981 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:45 crc kubenswrapper[3014]: I0320 15:21:45.296381 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:45 crc kubenswrapper[3014]: I0320 15:21:45.296420 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:45 crc kubenswrapper[3014]: I0320 15:21:45.296433 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:45 crc kubenswrapper[3014]: I0320 15:21:45.296458 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:45 crc kubenswrapper[3014]: E0320 15:21:45.298872 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:45 crc kubenswrapper[3014]: I0320 15:21:45.499683 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.486624 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:60960->192.168.126.11:10357: read: connection reset by peer" start-of-body= Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.486714 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": read tcp 192.168.126.11:60960->192.168.126.11:10357: read: connection reset by peer" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.486750 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.486853 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.487687 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.487704 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.487712 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.489357 3014 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"e1f65d88ca5f0789b0758c62b7911deba1239f751f4138bb42e2e1c007aa7d2b"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.489696 3014 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" containerID="cri-o://e1f65d88ca5f0789b0758c62b7911deba1239f751f4138bb42e2e1c007aa7d2b" gracePeriod=30 Mar 20 15:21:46 crc kubenswrapper[3014]: I0320 15:21:46.499500 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.137991 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/4.log" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.138988 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/3.log" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.140489 3014 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="e1f65d88ca5f0789b0758c62b7911deba1239f751f4138bb42e2e1c007aa7d2b" exitCode=255 Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.140545 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"e1f65d88ca5f0789b0758c62b7911deba1239f751f4138bb42e2e1c007aa7d2b"} Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.140584 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2"} Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.140612 3014 scope.go:117] "RemoveContainer" containerID="dfeb2836b419a932633f8eed204a0a1a8d48a7e6f8aefbfec15d62b25b20cdfb" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.140780 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.142770 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.142812 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.142838 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:47 crc kubenswrapper[3014]: I0320 15:21:47.500144 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:48 crc kubenswrapper[3014]: I0320 15:21:48.145685 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/4.log" Mar 20 15:21:48 crc kubenswrapper[3014]: I0320 15:21:48.499551 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:49 crc kubenswrapper[3014]: W0320 15:21:49.227694 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:49 crc kubenswrapper[3014]: E0320 15:21:49.227810 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:49 crc kubenswrapper[3014]: I0320 15:21:49.500717 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:50 crc kubenswrapper[3014]: I0320 15:21:50.500155 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:50 crc kubenswrapper[3014]: E0320 15:21:50.793577 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:21:51 crc kubenswrapper[3014]: I0320 15:21:51.499937 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:52 crc kubenswrapper[3014]: E0320 15:21:52.157652 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:52 crc kubenswrapper[3014]: I0320 15:21:52.299575 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:52 crc kubenswrapper[3014]: I0320 15:21:52.301067 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:52 crc kubenswrapper[3014]: I0320 15:21:52.301100 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:52 crc kubenswrapper[3014]: I0320 15:21:52.301110 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:52 crc kubenswrapper[3014]: I0320 15:21:52.301136 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:52 crc kubenswrapper[3014]: E0320 15:21:52.302403 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:52 crc kubenswrapper[3014]: I0320 15:21:52.499645 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:52 crc kubenswrapper[3014]: E0320 15:21:52.930102 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:21:53 crc kubenswrapper[3014]: I0320 15:21:53.499451 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:53 crc kubenswrapper[3014]: I0320 15:21:53.499576 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:21:53 crc kubenswrapper[3014]: I0320 15:21:53.499724 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:53 crc kubenswrapper[3014]: I0320 15:21:53.500742 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:53 crc kubenswrapper[3014]: I0320 15:21:53.500787 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:53 crc kubenswrapper[3014]: I0320 15:21:53.500799 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:54 crc kubenswrapper[3014]: I0320 15:21:54.499286 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:55 crc kubenswrapper[3014]: I0320 15:21:55.347979 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:21:55 crc kubenswrapper[3014]: I0320 15:21:55.348252 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:55 crc kubenswrapper[3014]: I0320 15:21:55.349679 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:55 crc kubenswrapper[3014]: I0320 15:21:55.349725 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:55 crc kubenswrapper[3014]: I0320 15:21:55.349741 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:55 crc kubenswrapper[3014]: I0320 15:21:55.500369 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:56 crc kubenswrapper[3014]: I0320 15:21:56.498907 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:56 crc kubenswrapper[3014]: I0320 15:21:56.499920 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:21:56 crc kubenswrapper[3014]: I0320 15:21:56.500024 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:21:57 crc kubenswrapper[3014]: I0320 15:21:57.499896 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:58 crc kubenswrapper[3014]: I0320 15:21:58.500505 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:21:59 crc kubenswrapper[3014]: E0320 15:21:59.159939 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:21:59 crc kubenswrapper[3014]: I0320 15:21:59.303564 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:21:59 crc kubenswrapper[3014]: I0320 15:21:59.305562 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:21:59 crc kubenswrapper[3014]: I0320 15:21:59.305606 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:21:59 crc kubenswrapper[3014]: I0320 15:21:59.305620 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:21:59 crc kubenswrapper[3014]: I0320 15:21:59.305652 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:21:59 crc kubenswrapper[3014]: E0320 15:21:59.307241 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:21:59 crc kubenswrapper[3014]: I0320 15:21:59.498961 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:00 crc kubenswrapper[3014]: I0320 15:22:00.499045 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:00 crc kubenswrapper[3014]: E0320 15:22:00.794068 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:22:01 crc kubenswrapper[3014]: I0320 15:22:01.499496 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:02 crc kubenswrapper[3014]: I0320 15:22:02.499561 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:02 crc kubenswrapper[3014]: E0320 15:22:02.932460 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:22:03 crc kubenswrapper[3014]: I0320 15:22:03.499540 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:04 crc kubenswrapper[3014]: I0320 15:22:04.499796 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:05 crc kubenswrapper[3014]: I0320 15:22:05.499872 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:06 crc kubenswrapper[3014]: E0320 15:22:06.161949 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.307962 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.309403 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.309449 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.309463 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.309493 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:06 crc kubenswrapper[3014]: E0320 15:22:06.310978 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.498982 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.500056 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:22:06 crc kubenswrapper[3014]: I0320 15:22:06.500149 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:22:07 crc kubenswrapper[3014]: I0320 15:22:07.501830 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:08 crc kubenswrapper[3014]: I0320 15:22:08.502814 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:09 crc kubenswrapper[3014]: I0320 15:22:09.503624 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:10 crc kubenswrapper[3014]: I0320 15:22:10.507830 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:10 crc kubenswrapper[3014]: E0320 15:22:10.795091 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:22:11 crc kubenswrapper[3014]: I0320 15:22:11.500020 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:12 crc kubenswrapper[3014]: I0320 15:22:12.500084 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:12 crc kubenswrapper[3014]: W0320 15:22:12.817241 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:12 crc kubenswrapper[3014]: E0320 15:22:12.817382 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:12 crc kubenswrapper[3014]: E0320 15:22:12.934839 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:22:13 crc kubenswrapper[3014]: E0320 15:22:13.163545 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.311884 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.313271 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.313301 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.313312 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.313357 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:13 crc kubenswrapper[3014]: E0320 15:22:13.314671 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.499745 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.666292 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.667486 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.667563 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:13 crc kubenswrapper[3014]: I0320 15:22:13.667577 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:14 crc kubenswrapper[3014]: W0320 15:22:14.289098 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:14 crc kubenswrapper[3014]: E0320 15:22:14.289170 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:14 crc kubenswrapper[3014]: I0320 15:22:14.499398 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:15 crc kubenswrapper[3014]: I0320 15:22:15.499075 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.499716 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.499753 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.500580 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.500658 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.500792 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.501862 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.501900 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.501914 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.503873 3014 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 20 15:22:16 crc kubenswrapper[3014]: I0320 15:22:16.504273 3014 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" containerID="cri-o://2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" gracePeriod=30 Mar 20 15:22:16 crc kubenswrapper[3014]: E0320 15:22:16.598547 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.227242 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/5.log" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.227710 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/4.log" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.228661 3014 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" exitCode=255 Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.228722 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2"} Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.228764 3014 scope.go:117] "RemoveContainer" containerID="e1f65d88ca5f0789b0758c62b7911deba1239f751f4138bb42e2e1c007aa7d2b" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.228856 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.229676 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.229709 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.229721 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.231143 3014 scope.go:117] "RemoveContainer" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" Mar 20 15:22:17 crc kubenswrapper[3014]: E0320 15:22:17.231848 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:22:17 crc kubenswrapper[3014]: I0320 15:22:17.499415 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:18 crc kubenswrapper[3014]: I0320 15:22:18.232969 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/5.log" Mar 20 15:22:18 crc kubenswrapper[3014]: I0320 15:22:18.498837 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:19 crc kubenswrapper[3014]: I0320 15:22:19.618013 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:19 crc kubenswrapper[3014]: W0320 15:22:19.886695 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:19 crc kubenswrapper[3014]: E0320 15:22:19.886800 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:20 crc kubenswrapper[3014]: E0320 15:22:20.165869 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:20 crc kubenswrapper[3014]: I0320 15:22:20.314822 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:20 crc kubenswrapper[3014]: I0320 15:22:20.316389 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:20 crc kubenswrapper[3014]: I0320 15:22:20.316533 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:20 crc kubenswrapper[3014]: I0320 15:22:20.316613 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:20 crc kubenswrapper[3014]: I0320 15:22:20.316699 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:20 crc kubenswrapper[3014]: E0320 15:22:20.318377 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:20 crc kubenswrapper[3014]: I0320 15:22:20.499549 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:20 crc kubenswrapper[3014]: E0320 15:22:20.795524 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:22:21 crc kubenswrapper[3014]: W0320 15:22:21.488435 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:21 crc kubenswrapper[3014]: E0320 15:22:21.488531 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:21 crc kubenswrapper[3014]: I0320 15:22:21.499771 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.499237 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.736724 3014 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.736903 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.738129 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.738171 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.738183 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:22 crc kubenswrapper[3014]: I0320 15:22:22.739431 3014 scope.go:117] "RemoveContainer" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" Mar 20 15:22:22 crc kubenswrapper[3014]: E0320 15:22:22.739980 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:22:22 crc kubenswrapper[3014]: E0320 15:22:22.936660 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:22:23 crc kubenswrapper[3014]: I0320 15:22:23.499421 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:24 crc kubenswrapper[3014]: I0320 15:22:24.499714 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:25 crc kubenswrapper[3014]: I0320 15:22:25.499826 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:26 crc kubenswrapper[3014]: I0320 15:22:26.499605 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:26 crc kubenswrapper[3014]: I0320 15:22:26.666579 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:26 crc kubenswrapper[3014]: I0320 15:22:26.667696 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:26 crc kubenswrapper[3014]: I0320 15:22:26.667727 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:26 crc kubenswrapper[3014]: I0320 15:22:26.667735 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:27 crc kubenswrapper[3014]: E0320 15:22:27.168779 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:27 crc kubenswrapper[3014]: I0320 15:22:27.319397 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:27 crc kubenswrapper[3014]: I0320 15:22:27.320852 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:27 crc kubenswrapper[3014]: I0320 15:22:27.320909 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:27 crc kubenswrapper[3014]: I0320 15:22:27.320936 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:27 crc kubenswrapper[3014]: I0320 15:22:27.320984 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:27 crc kubenswrapper[3014]: E0320 15:22:27.322670 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:27 crc kubenswrapper[3014]: I0320 15:22:27.499258 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:28 crc kubenswrapper[3014]: I0320 15:22:28.499621 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:29 crc kubenswrapper[3014]: I0320 15:22:29.518638 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:30 crc kubenswrapper[3014]: I0320 15:22:30.499554 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:30 crc kubenswrapper[3014]: E0320 15:22:30.796014 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:22:31 crc kubenswrapper[3014]: I0320 15:22:31.499805 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:32 crc kubenswrapper[3014]: I0320 15:22:32.499677 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:32 crc kubenswrapper[3014]: E0320 15:22:32.938692 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:22:33 crc kubenswrapper[3014]: I0320 15:22:33.499220 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:34 crc kubenswrapper[3014]: E0320 15:22:34.170911 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:34 crc kubenswrapper[3014]: I0320 15:22:34.323144 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:34 crc kubenswrapper[3014]: I0320 15:22:34.324671 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:34 crc kubenswrapper[3014]: I0320 15:22:34.324722 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:34 crc kubenswrapper[3014]: I0320 15:22:34.324744 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:34 crc kubenswrapper[3014]: I0320 15:22:34.324777 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:34 crc kubenswrapper[3014]: E0320 15:22:34.326251 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:34 crc kubenswrapper[3014]: I0320 15:22:34.499551 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:35 crc kubenswrapper[3014]: I0320 15:22:35.499542 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:35 crc kubenswrapper[3014]: I0320 15:22:35.666289 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:35 crc kubenswrapper[3014]: I0320 15:22:35.667664 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:35 crc kubenswrapper[3014]: I0320 15:22:35.667799 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:35 crc kubenswrapper[3014]: I0320 15:22:35.667889 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:35 crc kubenswrapper[3014]: I0320 15:22:35.669508 3014 scope.go:117] "RemoveContainer" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" Mar 20 15:22:35 crc kubenswrapper[3014]: E0320 15:22:35.670666 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:22:36 crc kubenswrapper[3014]: I0320 15:22:36.499992 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:37 crc kubenswrapper[3014]: I0320 15:22:37.499567 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:38 crc kubenswrapper[3014]: I0320 15:22:38.499258 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:39 crc kubenswrapper[3014]: I0320 15:22:39.499765 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:40 crc kubenswrapper[3014]: I0320 15:22:40.499882 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:22:40 crc kubenswrapper[3014]: I0320 15:22:40.499983 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:22:40 crc kubenswrapper[3014]: I0320 15:22:40.500031 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:22:40 crc kubenswrapper[3014]: I0320 15:22:40.500068 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:22:40 crc kubenswrapper[3014]: I0320 15:22:40.500099 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:22:40 crc kubenswrapper[3014]: I0320 15:22:40.500482 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:40 crc kubenswrapper[3014]: E0320 15:22:40.797042 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:22:41 crc kubenswrapper[3014]: E0320 15:22:41.172657 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:41 crc kubenswrapper[3014]: I0320 15:22:41.327507 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:41 crc kubenswrapper[3014]: I0320 15:22:41.329524 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:41 crc kubenswrapper[3014]: I0320 15:22:41.329664 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:41 crc kubenswrapper[3014]: I0320 15:22:41.329778 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:41 crc kubenswrapper[3014]: I0320 15:22:41.329910 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:41 crc kubenswrapper[3014]: E0320 15:22:41.331511 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:41 crc kubenswrapper[3014]: I0320 15:22:41.499607 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:42 crc kubenswrapper[3014]: I0320 15:22:42.500214 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:42 crc kubenswrapper[3014]: E0320 15:22:42.940911 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:22:43 crc kubenswrapper[3014]: I0320 15:22:43.499551 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:44 crc kubenswrapper[3014]: I0320 15:22:44.500001 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:44 crc kubenswrapper[3014]: I0320 15:22:44.667128 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:44 crc kubenswrapper[3014]: I0320 15:22:44.668973 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:44 crc kubenswrapper[3014]: I0320 15:22:44.669039 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:44 crc kubenswrapper[3014]: I0320 15:22:44.669068 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:45 crc kubenswrapper[3014]: I0320 15:22:45.541794 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:46 crc kubenswrapper[3014]: I0320 15:22:46.499847 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:47 crc kubenswrapper[3014]: I0320 15:22:47.499777 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:47 crc kubenswrapper[3014]: I0320 15:22:47.667114 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:47 crc kubenswrapper[3014]: I0320 15:22:47.669181 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:47 crc kubenswrapper[3014]: I0320 15:22:47.669307 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:47 crc kubenswrapper[3014]: I0320 15:22:47.669385 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:48 crc kubenswrapper[3014]: E0320 15:22:48.175129 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.331880 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.333459 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.333572 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.333608 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.333669 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:48 crc kubenswrapper[3014]: E0320 15:22:48.335669 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.499259 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.665927 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.666847 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.666879 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.666891 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:48 crc kubenswrapper[3014]: I0320 15:22:48.668159 3014 scope.go:117] "RemoveContainer" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" Mar 20 15:22:48 crc kubenswrapper[3014]: E0320 15:22:48.668779 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:22:49 crc kubenswrapper[3014]: I0320 15:22:49.499654 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:50 crc kubenswrapper[3014]: E0320 15:22:50.798366 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:22:54 crc kubenswrapper[3014]: I0320 15:22:54.724777 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.47.54:53: no such host Mar 20 15:22:54 crc kubenswrapper[3014]: E0320 15:22:54.724831 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.47.54:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:22:55 crc kubenswrapper[3014]: E0320 15:22:55.177932 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:22:55 crc kubenswrapper[3014]: I0320 15:22:55.336063 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:22:55 crc kubenswrapper[3014]: I0320 15:22:55.338738 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:22:55 crc kubenswrapper[3014]: I0320 15:22:55.338804 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:22:55 crc kubenswrapper[3014]: I0320 15:22:55.338839 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:22:55 crc kubenswrapper[3014]: I0320 15:22:55.338893 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:22:55 crc kubenswrapper[3014]: E0320 15:22:55.340308 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:22:55 crc kubenswrapper[3014]: I0320 15:22:55.499719 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:56 crc kubenswrapper[3014]: W0320 15:22:56.485729 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:56 crc kubenswrapper[3014]: E0320 15:22:56.486222 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:56 crc kubenswrapper[3014]: I0320 15:22:56.499368 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:57 crc kubenswrapper[3014]: I0320 15:22:57.499775 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:58 crc kubenswrapper[3014]: I0320 15:22:58.500255 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:22:59 crc kubenswrapper[3014]: I0320 15:22:59.499553 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:00 crc kubenswrapper[3014]: I0320 15:23:00.499581 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:00 crc kubenswrapper[3014]: I0320 15:23:00.666734 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:00 crc kubenswrapper[3014]: I0320 15:23:00.668493 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:00 crc kubenswrapper[3014]: I0320 15:23:00.668552 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:00 crc kubenswrapper[3014]: I0320 15:23:00.668572 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:00 crc kubenswrapper[3014]: I0320 15:23:00.671177 3014 scope.go:117] "RemoveContainer" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" Mar 20 15:23:00 crc kubenswrapper[3014]: E0320 15:23:00.800143 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:23:01 crc kubenswrapper[3014]: W0320 15:23:01.343433 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:01 crc kubenswrapper[3014]: E0320 15:23:01.343499 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.346406 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/5.log" Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.347494 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5"} Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.347612 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.348696 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.348784 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.348811 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:01 crc kubenswrapper[3014]: I0320 15:23:01.499965 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:02 crc kubenswrapper[3014]: E0320 15:23:02.180177 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:23:02 crc kubenswrapper[3014]: I0320 15:23:02.340940 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:02 crc kubenswrapper[3014]: I0320 15:23:02.342629 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:02 crc kubenswrapper[3014]: I0320 15:23:02.342716 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:02 crc kubenswrapper[3014]: I0320 15:23:02.342743 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:02 crc kubenswrapper[3014]: I0320 15:23:02.342787 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:23:02 crc kubenswrapper[3014]: E0320 15:23:02.344578 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:23:02 crc kubenswrapper[3014]: I0320 15:23:02.499580 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:03 crc kubenswrapper[3014]: I0320 15:23:03.499634 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:23:03 crc kubenswrapper[3014]: I0320 15:23:03.499666 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:03 crc kubenswrapper[3014]: I0320 15:23:03.499763 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:03 crc kubenswrapper[3014]: I0320 15:23:03.500886 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:03 crc kubenswrapper[3014]: I0320 15:23:03.500946 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:03 crc kubenswrapper[3014]: I0320 15:23:03.500974 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:04 crc kubenswrapper[3014]: I0320 15:23:04.499276 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:04 crc kubenswrapper[3014]: E0320 15:23:04.727478 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:23:04 crc kubenswrapper[3014]: W0320 15:23:04.762809 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:04 crc kubenswrapper[3014]: E0320 15:23:04.762914 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:05 crc kubenswrapper[3014]: I0320 15:23:05.348535 3014 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:23:05 crc kubenswrapper[3014]: I0320 15:23:05.348721 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:05 crc kubenswrapper[3014]: I0320 15:23:05.350217 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:05 crc kubenswrapper[3014]: I0320 15:23:05.350267 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:05 crc kubenswrapper[3014]: I0320 15:23:05.350286 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:05 crc kubenswrapper[3014]: I0320 15:23:05.499119 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:06 crc kubenswrapper[3014]: I0320 15:23:06.500118 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:23:06 crc kubenswrapper[3014]: I0320 15:23:06.500574 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:23:06 crc kubenswrapper[3014]: I0320 15:23:06.500178 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:07 crc kubenswrapper[3014]: I0320 15:23:07.499865 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:08 crc kubenswrapper[3014]: I0320 15:23:08.500078 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:09 crc kubenswrapper[3014]: E0320 15:23:09.182666 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:23:09 crc kubenswrapper[3014]: I0320 15:23:09.345628 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:09 crc kubenswrapper[3014]: I0320 15:23:09.347430 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:09 crc kubenswrapper[3014]: I0320 15:23:09.347498 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:09 crc kubenswrapper[3014]: I0320 15:23:09.347525 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:09 crc kubenswrapper[3014]: I0320 15:23:09.347583 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:23:09 crc kubenswrapper[3014]: E0320 15:23:09.348824 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:23:09 crc kubenswrapper[3014]: I0320 15:23:09.499843 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:10 crc kubenswrapper[3014]: I0320 15:23:10.498824 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:10 crc kubenswrapper[3014]: W0320 15:23:10.516224 3014 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:10 crc kubenswrapper[3014]: E0320 15:23:10.516285 3014 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:10 crc kubenswrapper[3014]: E0320 15:23:10.800520 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:23:11 crc kubenswrapper[3014]: I0320 15:23:11.500295 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:12 crc kubenswrapper[3014]: I0320 15:23:12.500069 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:13 crc kubenswrapper[3014]: I0320 15:23:13.499994 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:14 crc kubenswrapper[3014]: I0320 15:23:14.498991 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:14 crc kubenswrapper[3014]: E0320 15:23:14.729951 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:23:14 crc kubenswrapper[3014]: E0320 15:23:14.730087 3014 event.go:294] "Unable to write event (retry limit exceeded!)" event="&Event{ObjectMeta:{crc.189e95c5ee4b7ef9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,LastTimestamp:2026-03-20 15:19:40.569575161 +0000 UTC m=+1.010575513,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:23:14 crc kubenswrapper[3014]: E0320 15:23:14.731584 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4bd43f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569596991 +0000 UTC m=+1.010597343,LastTimestamp:2026-03-20 15:19:40.569596991 +0000 UTC m=+1.010597343,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:23:15 crc kubenswrapper[3014]: I0320 15:23:15.499854 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:16 crc kubenswrapper[3014]: E0320 15:23:16.184589 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.350020 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.351435 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.351469 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.351482 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.351505 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:23:16 crc kubenswrapper[3014]: E0320 15:23:16.352680 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.499856 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.500954 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:23:16 crc kubenswrapper[3014]: I0320 15:23:16.501119 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:23:17 crc kubenswrapper[3014]: I0320 15:23:17.499729 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:18 crc kubenswrapper[3014]: I0320 15:23:18.499914 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:19 crc kubenswrapper[3014]: I0320 15:23:19.500007 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:20 crc kubenswrapper[3014]: I0320 15:23:20.499886 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:20 crc kubenswrapper[3014]: E0320 15:23:20.801250 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:23:21 crc kubenswrapper[3014]: E0320 15:23:21.459703 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4bd43f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569596991 +0000 UTC m=+1.010597343,LastTimestamp:2026-03-20 15:19:40.569596991 +0000 UTC m=+1.010597343,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:23:21 crc kubenswrapper[3014]: I0320 15:23:21.499367 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:22 crc kubenswrapper[3014]: I0320 15:23:22.499734 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:22 crc kubenswrapper[3014]: I0320 15:23:22.666074 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:22 crc kubenswrapper[3014]: I0320 15:23:22.667663 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:22 crc kubenswrapper[3014]: I0320 15:23:22.667760 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:22 crc kubenswrapper[3014]: I0320 15:23:22.667825 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:23 crc kubenswrapper[3014]: E0320 15:23:23.186649 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:23:23 crc kubenswrapper[3014]: I0320 15:23:23.353488 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:23 crc kubenswrapper[3014]: I0320 15:23:23.355554 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:23 crc kubenswrapper[3014]: I0320 15:23:23.355779 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:23 crc kubenswrapper[3014]: I0320 15:23:23.355969 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:23 crc kubenswrapper[3014]: I0320 15:23:23.356175 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:23:23 crc kubenswrapper[3014]: E0320 15:23:23.357907 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:23:23 crc kubenswrapper[3014]: I0320 15:23:23.499608 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:24 crc kubenswrapper[3014]: I0320 15:23:24.498615 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:25 crc kubenswrapper[3014]: I0320 15:23:25.499558 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.498958 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.500083 3014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.500199 3014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.500266 3014 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.500469 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.501888 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.501924 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.501939 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.504017 3014 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Mar 20 15:23:26 crc kubenswrapper[3014]: I0320 15:23:26.504417 3014 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" containerID="cri-o://425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5" gracePeriod=30 Mar 20 15:23:26 crc kubenswrapper[3014]: E0320 15:23:26.581027 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.435957 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/6.log" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.436856 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/5.log" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.438174 3014 generic.go:334] "Generic (PLEG): container finished" podID="4faaac70bf21c7d77dcb526af466bffa" containerID="425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5" exitCode=255 Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.438237 3014 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerDied","Data":"425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5"} Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.438358 3014 scope.go:117] "RemoveContainer" containerID="2a4bbbcf093620bce12d9546eca5a9ab9bb3efb28b2ffdeaab7fb00ce33542c2" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.438462 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.439522 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.439550 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.439563 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.440852 3014 scope.go:117] "RemoveContainer" containerID="425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5" Mar 20 15:23:27 crc kubenswrapper[3014]: E0320 15:23:27.442459 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:23:27 crc kubenswrapper[3014]: I0320 15:23:27.499468 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:28 crc kubenswrapper[3014]: I0320 15:23:28.441589 3014 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_4faaac70bf21c7d77dcb526af466bffa/cluster-policy-controller/6.log" Mar 20 15:23:28 crc kubenswrapper[3014]: I0320 15:23:28.499551 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:29 crc kubenswrapper[3014]: I0320 15:23:29.499568 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:30 crc kubenswrapper[3014]: E0320 15:23:30.188486 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:23:30 crc kubenswrapper[3014]: I0320 15:23:30.357984 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:30 crc kubenswrapper[3014]: I0320 15:23:30.359553 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:30 crc kubenswrapper[3014]: I0320 15:23:30.359602 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:30 crc kubenswrapper[3014]: I0320 15:23:30.359621 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:30 crc kubenswrapper[3014]: I0320 15:23:30.359654 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:23:30 crc kubenswrapper[3014]: E0320 15:23:30.360997 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:23:30 crc kubenswrapper[3014]: I0320 15:23:30.500146 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:30 crc kubenswrapper[3014]: E0320 15:23:30.802007 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:23:31 crc kubenswrapper[3014]: E0320 15:23:31.461846 3014 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" event="&Event{ObjectMeta:{crc.189e95c5ee4bd43f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:19:40.569596991 +0000 UTC m=+1.010597343,LastTimestamp:2026-03-20 15:19:40.569596991 +0000 UTC m=+1.010597343,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:23:31 crc kubenswrapper[3014]: I0320 15:23:31.498990 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.499630 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.736033 3014 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.736287 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.737809 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.737845 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.737857 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:32 crc kubenswrapper[3014]: I0320 15:23:32.739291 3014 scope.go:117] "RemoveContainer" containerID="425e912d50a15f456cb7feeec115350270ac14135611214347099abc656023a5" Mar 20 15:23:32 crc kubenswrapper[3014]: E0320 15:23:32.739989 3014 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-policy-controller\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=cluster-policy-controller pod=kube-controller-manager-crc_openshift-kube-controller-manager(4faaac70bf21c7d77dcb526af466bffa)\"" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" Mar 20 15:23:33 crc kubenswrapper[3014]: I0320 15:23:33.499693 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:34 crc kubenswrapper[3014]: I0320 15:23:34.499761 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:35 crc kubenswrapper[3014]: I0320 15:23:35.499557 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:36 crc kubenswrapper[3014]: I0320 15:23:36.500086 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:37 crc kubenswrapper[3014]: E0320 15:23:37.190756 3014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" interval="7s" Mar 20 15:23:37 crc kubenswrapper[3014]: I0320 15:23:37.361837 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:37 crc kubenswrapper[3014]: I0320 15:23:37.364950 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:37 crc kubenswrapper[3014]: I0320 15:23:37.365372 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:37 crc kubenswrapper[3014]: I0320 15:23:37.365387 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:37 crc kubenswrapper[3014]: I0320 15:23:37.365421 3014 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:23:37 crc kubenswrapper[3014]: E0320 15:23:37.367041 3014 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host" node="crc" Mar 20 15:23:37 crc kubenswrapper[3014]: I0320 15:23:37.499521 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:38 crc kubenswrapper[3014]: I0320 15:23:38.498736 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:38 crc kubenswrapper[3014]: I0320 15:23:38.666549 3014 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:23:38 crc kubenswrapper[3014]: I0320 15:23:38.667912 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:23:38 crc kubenswrapper[3014]: I0320 15:23:38.667947 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:23:38 crc kubenswrapper[3014]: I0320 15:23:38.667956 3014 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:23:39 crc kubenswrapper[3014]: I0320 15:23:39.500115 3014 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp: lookup api-int.crc.testing on 199.204.44.24:53: no such host Mar 20 15:23:39 crc kubenswrapper[3014]: I0320 15:23:39.945046 3014 reconstruct_new.go:210] "DevicePaths of reconstructed volumes updated" Mar 20 15:23:40 crc kubenswrapper[3014]: I0320 15:23:40.500625 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:23:40 crc kubenswrapper[3014]: I0320 15:23:40.502180 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:23:40 crc kubenswrapper[3014]: I0320 15:23:40.502313 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:23:40 crc kubenswrapper[3014]: I0320 15:23:40.502485 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:23:40 crc kubenswrapper[3014]: I0320 15:23:40.502644 3014 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:23:40 crc kubenswrapper[3014]: E0320 15:23:40.802222 3014 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:23:42 crc systemd[1]: Stopping Kubernetes Kubelet... Mar 20 15:23:42 crc systemd[1]: kubelet.service: Deactivated successfully. Mar 20 15:23:42 crc systemd[1]: Stopped Kubernetes Kubelet. Mar 20 15:23:42 crc systemd[1]: kubelet.service: Consumed 13.400s CPU time. -- Boot 212dbf6c9c7c492dbcbdda587f0ba44e -- Mar 20 15:25:00 crc systemd[1]: Starting Kubernetes Kubelet... Mar 20 15:25:00 crc kubenswrapper[3552]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:25:00 crc kubenswrapper[3552]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Mar 20 15:25:00 crc kubenswrapper[3552]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:25:00 crc kubenswrapper[3552]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:25:00 crc kubenswrapper[3552]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Mar 20 15:25:00 crc kubenswrapper[3552]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.959223 3552 server.go:204] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962101 3552 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962136 3552 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962150 3552 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962163 3552 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962176 3552 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962190 3552 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962202 3552 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962216 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962228 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962240 3552 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962252 3552 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962263 3552 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962276 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962288 3552 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962300 3552 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962312 3552 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962324 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962336 3552 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962348 3552 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962360 3552 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962372 3552 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962385 3552 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962398 3552 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962435 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962456 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962468 3552 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962480 3552 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962492 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962504 3552 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962516 3552 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962528 3552 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962541 3552 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962552 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962565 3552 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962577 3552 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962589 3552 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962601 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962613 3552 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962627 3552 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962639 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962651 3552 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962663 3552 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962675 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962687 3552 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962699 3552 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962713 3552 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962725 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962737 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962749 3552 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962761 3552 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962772 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962784 3552 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962796 3552 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962808 3552 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962820 3552 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962867 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962882 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962894 3552 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962906 3552 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.962918 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967469 3552 flags.go:64] FLAG: --address="0.0.0.0" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967514 3552 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967529 3552 flags.go:64] FLAG: --anonymous-auth="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967541 3552 flags.go:64] FLAG: --application-metrics-count-limit="100" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967553 3552 flags.go:64] FLAG: --authentication-token-webhook="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967564 3552 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967576 3552 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967588 3552 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967598 3552 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967608 3552 flags.go:64] FLAG: --azure-container-registry-config="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967619 3552 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967631 3552 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967642 3552 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967653 3552 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967663 3552 flags.go:64] FLAG: --cgroup-root="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967672 3552 flags.go:64] FLAG: --cgroups-per-qos="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967682 3552 flags.go:64] FLAG: --client-ca-file="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967692 3552 flags.go:64] FLAG: --cloud-config="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967701 3552 flags.go:64] FLAG: --cloud-provider="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967710 3552 flags.go:64] FLAG: --cluster-dns="[]" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967723 3552 flags.go:64] FLAG: --cluster-domain="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967733 3552 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967744 3552 flags.go:64] FLAG: --config-dir="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967754 3552 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967765 3552 flags.go:64] FLAG: --container-log-max-files="5" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967778 3552 flags.go:64] FLAG: --container-log-max-size="10Mi" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967788 3552 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967798 3552 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967809 3552 flags.go:64] FLAG: --containerd-namespace="k8s.io" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967819 3552 flags.go:64] FLAG: --contention-profiling="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967829 3552 flags.go:64] FLAG: --cpu-cfs-quota="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967839 3552 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967849 3552 flags.go:64] FLAG: --cpu-manager-policy="none" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967859 3552 flags.go:64] FLAG: --cpu-manager-policy-options="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967876 3552 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967886 3552 flags.go:64] FLAG: --enable-controller-attach-detach="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967897 3552 flags.go:64] FLAG: --enable-debugging-handlers="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967907 3552 flags.go:64] FLAG: --enable-load-reader="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967917 3552 flags.go:64] FLAG: --enable-server="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967927 3552 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967938 3552 flags.go:64] FLAG: --event-burst="100" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967949 3552 flags.go:64] FLAG: --event-qps="50" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967959 3552 flags.go:64] FLAG: --event-storage-age-limit="default=0" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967969 3552 flags.go:64] FLAG: --event-storage-event-limit="default=0" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967979 3552 flags.go:64] FLAG: --eviction-hard="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.967991 3552 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968001 3552 flags.go:64] FLAG: --eviction-minimum-reclaim="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968011 3552 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968021 3552 flags.go:64] FLAG: --eviction-soft="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968031 3552 flags.go:64] FLAG: --eviction-soft-grace-period="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968041 3552 flags.go:64] FLAG: --exit-on-lock-contention="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968050 3552 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968061 3552 flags.go:64] FLAG: --experimental-mounter-path="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968071 3552 flags.go:64] FLAG: --fail-swap-on="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968081 3552 flags.go:64] FLAG: --feature-gates="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968092 3552 flags.go:64] FLAG: --file-check-frequency="20s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968102 3552 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968112 3552 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968129 3552 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968139 3552 flags.go:64] FLAG: --healthz-port="10248" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968149 3552 flags.go:64] FLAG: --help="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968158 3552 flags.go:64] FLAG: --hostname-override="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968168 3552 flags.go:64] FLAG: --housekeeping-interval="10s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968178 3552 flags.go:64] FLAG: --http-check-frequency="20s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968188 3552 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968198 3552 flags.go:64] FLAG: --image-credential-provider-config="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968207 3552 flags.go:64] FLAG: --image-gc-high-threshold="85" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968216 3552 flags.go:64] FLAG: --image-gc-low-threshold="80" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968226 3552 flags.go:64] FLAG: --image-service-endpoint="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968236 3552 flags.go:64] FLAG: --iptables-drop-bit="15" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968245 3552 flags.go:64] FLAG: --iptables-masquerade-bit="14" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968255 3552 flags.go:64] FLAG: --keep-terminated-pod-volumes="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968265 3552 flags.go:64] FLAG: --kernel-memcg-notification="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968275 3552 flags.go:64] FLAG: --kube-api-burst="100" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968285 3552 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968296 3552 flags.go:64] FLAG: --kube-api-qps="50" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968305 3552 flags.go:64] FLAG: --kube-reserved="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968315 3552 flags.go:64] FLAG: --kube-reserved-cgroup="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968326 3552 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968337 3552 flags.go:64] FLAG: --kubelet-cgroups="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968346 3552 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968356 3552 flags.go:64] FLAG: --lock-file="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968365 3552 flags.go:64] FLAG: --log-cadvisor-usage="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968375 3552 flags.go:64] FLAG: --log-flush-frequency="5s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968385 3552 flags.go:64] FLAG: --log-json-info-buffer-size="0" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968399 3552 flags.go:64] FLAG: --log-json-split-stream="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968437 3552 flags.go:64] FLAG: --logging-format="text" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968447 3552 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968458 3552 flags.go:64] FLAG: --make-iptables-util-chains="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968468 3552 flags.go:64] FLAG: --manifest-url="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968481 3552 flags.go:64] FLAG: --manifest-url-header="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968493 3552 flags.go:64] FLAG: --max-housekeeping-interval="15s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968503 3552 flags.go:64] FLAG: --max-open-files="1000000" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968515 3552 flags.go:64] FLAG: --max-pods="110" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968525 3552 flags.go:64] FLAG: --maximum-dead-containers="-1" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968535 3552 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968545 3552 flags.go:64] FLAG: --memory-manager-policy="None" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968555 3552 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968565 3552 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968575 3552 flags.go:64] FLAG: --node-ip="192.168.126.11" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968585 3552 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968607 3552 flags.go:64] FLAG: --node-status-max-images="50" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968617 3552 flags.go:64] FLAG: --node-status-update-frequency="10s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968626 3552 flags.go:64] FLAG: --oom-score-adj="-999" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968636 3552 flags.go:64] FLAG: --pod-cidr="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968646 3552 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ce0319702e115e7248d135e58342ccf3f458e19c39e86dc8e79036f578ce80a4" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968672 3552 flags.go:64] FLAG: --pod-manifest-path="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968681 3552 flags.go:64] FLAG: --pod-max-pids="-1" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968692 3552 flags.go:64] FLAG: --pods-per-core="0" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968702 3552 flags.go:64] FLAG: --port="10250" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968712 3552 flags.go:64] FLAG: --protect-kernel-defaults="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968722 3552 flags.go:64] FLAG: --provider-id="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968733 3552 flags.go:64] FLAG: --qos-reserved="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968745 3552 flags.go:64] FLAG: --read-only-port="10255" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968759 3552 flags.go:64] FLAG: --register-node="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968772 3552 flags.go:64] FLAG: --register-schedulable="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968784 3552 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968806 3552 flags.go:64] FLAG: --registry-burst="10" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968818 3552 flags.go:64] FLAG: --registry-qps="5" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968860 3552 flags.go:64] FLAG: --reserved-cpus="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968875 3552 flags.go:64] FLAG: --reserved-memory="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968887 3552 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968897 3552 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968911 3552 flags.go:64] FLAG: --rotate-certificates="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968920 3552 flags.go:64] FLAG: --rotate-server-certificates="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968931 3552 flags.go:64] FLAG: --runonce="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968942 3552 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968953 3552 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968963 3552 flags.go:64] FLAG: --seccomp-default="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968974 3552 flags.go:64] FLAG: --serialize-image-pulls="true" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968984 3552 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.968994 3552 flags.go:64] FLAG: --storage-driver-db="cadvisor" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969005 3552 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969015 3552 flags.go:64] FLAG: --storage-driver-password="root" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969024 3552 flags.go:64] FLAG: --storage-driver-secure="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969035 3552 flags.go:64] FLAG: --storage-driver-table="stats" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969044 3552 flags.go:64] FLAG: --storage-driver-user="root" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969054 3552 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969064 3552 flags.go:64] FLAG: --sync-frequency="1m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969073 3552 flags.go:64] FLAG: --system-cgroups="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969083 3552 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969099 3552 flags.go:64] FLAG: --system-reserved-cgroup="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969109 3552 flags.go:64] FLAG: --tls-cert-file="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969118 3552 flags.go:64] FLAG: --tls-cipher-suites="[]" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969132 3552 flags.go:64] FLAG: --tls-min-version="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969142 3552 flags.go:64] FLAG: --tls-private-key-file="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969151 3552 flags.go:64] FLAG: --topology-manager-policy="none" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969161 3552 flags.go:64] FLAG: --topology-manager-policy-options="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969170 3552 flags.go:64] FLAG: --topology-manager-scope="container" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969180 3552 flags.go:64] FLAG: --v="2" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969192 3552 flags.go:64] FLAG: --version="false" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969204 3552 flags.go:64] FLAG: --vmodule="" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969215 3552 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.969226 3552 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969347 3552 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969365 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969378 3552 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969391 3552 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969433 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969445 3552 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969457 3552 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969469 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969481 3552 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969493 3552 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969505 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969517 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969528 3552 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969540 3552 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969552 3552 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969563 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969574 3552 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969586 3552 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969599 3552 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969611 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969622 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969634 3552 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969645 3552 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969657 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969671 3552 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969682 3552 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969693 3552 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969704 3552 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969715 3552 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969726 3552 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969737 3552 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969748 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969759 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969772 3552 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969783 3552 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969794 3552 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969805 3552 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969815 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969826 3552 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969837 3552 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969847 3552 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969858 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969868 3552 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969879 3552 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969890 3552 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969900 3552 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969911 3552 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969921 3552 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969932 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969943 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969953 3552 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969964 3552 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969974 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969985 3552 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.969996 3552 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.970006 3552 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.970017 3552 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.970027 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.970038 3552 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.970048 3552 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.970060 3552 feature_gate.go:250] feature gates: &{map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false ServiceAccountTokenNodeBindingValidation:false ServiceAccountTokenPodNodeInfo:false TranslateStreamCloseWebsocketRequests:false ValidatingAdmissionPolicy:false]} Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.985121 3552 server.go:487] "Kubelet version" kubeletVersion="v1.29.5+29c95f3" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.985168 3552 server.go:489] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985239 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985253 3552 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985265 3552 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985276 3552 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985286 3552 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985297 3552 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985308 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985319 3552 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985330 3552 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985341 3552 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985352 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985365 3552 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985376 3552 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985388 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985398 3552 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985443 3552 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985456 3552 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985468 3552 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985479 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985489 3552 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985501 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985512 3552 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985523 3552 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985535 3552 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985545 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985557 3552 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985568 3552 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985579 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985590 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985600 3552 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985611 3552 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985622 3552 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985633 3552 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985643 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985654 3552 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985665 3552 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985675 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985686 3552 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985697 3552 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985707 3552 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985718 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985728 3552 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985739 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985750 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985762 3552 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985773 3552 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985784 3552 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985796 3552 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985806 3552 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985817 3552 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985828 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985838 3552 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985849 3552 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985860 3552 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985871 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985881 3552 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985892 3552 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985902 3552 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985913 3552 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.985924 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.985936 3552 feature_gate.go:250] feature gates: &{map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false ServiceAccountTokenNodeBindingValidation:false ServiceAccountTokenPodNodeInfo:false TranslateStreamCloseWebsocketRequests:false ValidatingAdmissionPolicy:false]} Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986067 3552 feature_gate.go:227] unrecognized feature gate: ChunkSizeMiB Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986080 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986091 3552 feature_gate.go:227] unrecognized feature gate: GCPClusterHostedDNS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986102 3552 feature_gate.go:227] unrecognized feature gate: NewOLM Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986113 3552 feature_gate.go:227] unrecognized feature gate: AzureWorkloadIdentity Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986123 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfigAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986134 3552 feature_gate.go:227] unrecognized feature gate: NetworkLiveMigration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986145 3552 feature_gate.go:227] unrecognized feature gate: BuildCSIVolumes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986156 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallVSphere Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986166 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallIBMCloud Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986177 3552 feature_gate.go:227] unrecognized feature gate: DNSNameResolver Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986187 3552 feature_gate.go:227] unrecognized feature gate: ExternalRouteCertificate Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986198 3552 feature_gate.go:227] unrecognized feature gate: GCPLabelsTags Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986209 3552 feature_gate.go:227] unrecognized feature gate: BareMetalLoadBalancer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986220 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstall Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986231 3552 feature_gate.go:227] unrecognized feature gate: NodeDisruptionPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986242 3552 feature_gate.go:227] unrecognized feature gate: PrivateHostedZoneAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986253 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProvider Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986263 3552 feature_gate.go:227] unrecognized feature gate: MachineConfigNodes Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986274 3552 feature_gate.go:227] unrecognized feature gate: GatewayAPI Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986284 3552 feature_gate.go:227] unrecognized feature gate: HardwareSpeed Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986295 3552 feature_gate.go:227] unrecognized feature gate: InsightsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986306 3552 feature_gate.go:227] unrecognized feature gate: VSphereDriverConfiguration Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986316 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986326 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderAzure Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986337 3552 feature_gate.go:227] unrecognized feature gate: OnClusterBuild Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986348 3552 feature_gate.go:240] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986359 3552 feature_gate.go:227] unrecognized feature gate: MetricsServer Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986371 3552 feature_gate.go:227] unrecognized feature gate: VSphereMultiVCenters Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986381 3552 feature_gate.go:227] unrecognized feature gate: CSIDriverSharedResource Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986392 3552 feature_gate.go:227] unrecognized feature gate: Example Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986424 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986435 3552 feature_gate.go:227] unrecognized feature gate: InsightsOnDemandDataGather Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986446 3552 feature_gate.go:227] unrecognized feature gate: OpenShiftPodSecurityAdmission Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986457 3552 feature_gate.go:227] unrecognized feature gate: SigstoreImageVerification Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986468 3552 feature_gate.go:227] unrecognized feature gate: ExternalCloudProviderExternal Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986479 3552 feature_gate.go:227] unrecognized feature gate: SignatureStores Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986490 3552 feature_gate.go:227] unrecognized feature gate: UpgradeStatus Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986501 3552 feature_gate.go:227] unrecognized feature gate: VSphereStaticIPs Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986512 3552 feature_gate.go:227] unrecognized feature gate: AlibabaPlatform Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986522 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986532 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986543 3552 feature_gate.go:227] unrecognized feature gate: ManagedBootImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986554 3552 feature_gate.go:227] unrecognized feature gate: MetricsCollectionProfiles Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986565 3552 feature_gate.go:227] unrecognized feature gate: NetworkDiagnosticsConfig Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986576 3552 feature_gate.go:227] unrecognized feature gate: AdminNetworkPolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986586 3552 feature_gate.go:227] unrecognized feature gate: EtcdBackendQuota Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986597 3552 feature_gate.go:227] unrecognized feature gate: MachineAPIProviderOpenStack Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986608 3552 feature_gate.go:227] unrecognized feature gate: PinnedImages Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986619 3552 feature_gate.go:227] unrecognized feature gate: PlatformOperators Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986630 3552 feature_gate.go:227] unrecognized feature gate: VolumeGroupSnapshot Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986640 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallNutanix Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986652 3552 feature_gate.go:227] unrecognized feature gate: ImagePolicy Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986662 3552 feature_gate.go:227] unrecognized feature gate: VSphereControlPlaneMachineSet Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986673 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallGCP Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986684 3552 feature_gate.go:227] unrecognized feature gate: ExternalOIDC Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986694 3552 feature_gate.go:227] unrecognized feature gate: InstallAlternateInfrastructureAWS Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986705 3552 feature_gate.go:227] unrecognized feature gate: MixedCPUsAllocation Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986715 3552 feature_gate.go:227] unrecognized feature gate: AutomatedEtcdBackup Mar 20 15:25:00 crc kubenswrapper[3552]: W0320 15:25:00.986726 3552 feature_gate.go:227] unrecognized feature gate: ClusterAPIInstallPowerVS Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.986737 3552 feature_gate.go:250] feature gates: &{map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false ServiceAccountTokenNodeBindingValidation:false ServiceAccountTokenPodNodeInfo:false TranslateStreamCloseWebsocketRequests:false ValidatingAdmissionPolicy:false]} Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.988122 3552 server.go:925] "Client rotation is on, will bootstrap in background" Mar 20 15:25:00 crc kubenswrapper[3552]: I0320 15:25:00.999514 3552 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.002545 3552 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.003786 3552 server.go:982] "Starting client certificate rotation" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.003831 3552 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.005516 3552 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-06-13 15:12:41.140312073 +0000 UTC Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.005683 3552 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 2039h47m40.134634615s for next certificate rotation Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.039628 3552 dynamic_cafile_content.go:119] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.043122 3552 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.047306 3552 util_unix.go:103] "Using this endpoint is deprecated, please consider using full URL format" endpoint="/var/run/crio/crio.sock" URL="unix:///var/run/crio/crio.sock" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.080004 3552 remote_runtime.go:143] "Validated CRI v1 runtime API" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.080068 3552 util_unix.go:103] "Using this endpoint is deprecated, please consider using full URL format" endpoint="/var/run/crio/crio.sock" URL="unix:///var/run/crio/crio.sock" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.175554 3552 remote_image.go:111] "Validated CRI v1 image API" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.190980 3552 fs.go:132] Filesystem UUIDs: map[2026-03-20-15-18-37-00:/dev/sr0 68d6f3e9-64e9-44a4-a1d0-311f9c629a01:/dev/vda4 6ea7ef63-bc43-49c4-9337-b3b14ffb2763:/dev/vda3 7B77-95E7:/dev/vda2] Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.191034 3552 fs.go:133] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.226938 3552 manager.go:217] Machine: {Timestamp:2026-03-20 15:25:01.221694778 +0000 UTC m=+0.915391698 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:c1bd596843fb445da20eca66471ddf66 SystemUUID:071e7dfc-e1e7-4107-84a5-e8044dfe14fc BootID:212dbf6c-9c7c-492d-bcbd-da587f0ba44e Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85294297088 Type:vfs Inodes:41680320 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:80:41:cc Speed:0 Mtu:1500} {Name:br-int MacAddress:4e:ec:11:72:80:3b Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:80:41:cc Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:aa:1f:db Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:21:b4:20 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:40:21:e4 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:9a:58:5f Speed:-1 Mtu:1496} {Name:eth10 MacAddress:da:88:2b:da:9b:2a Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:b6:dc:d9:26:03:d4 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:92:0a:af:62:d0:8d Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.227570 3552 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.227859 3552 manager.go:233] Version: {KernelVersion:5.14.0-427.22.1.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 416.94.202406172220-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.231725 3552 container_manager_linux.go:268] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.232081 3552 container_manager_linux.go:273] "Creating Container Manager object based on Node Config" nodeConfig={"RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null} Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.232142 3552 topology_manager.go:138] "Creating topology manager with none policy" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.232165 3552 container_manager_linux.go:304] "Creating device plugin manager" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.233484 3552 manager.go:136] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.234518 3552 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.236509 3552 state_mem.go:36] "Initialized new in-memory state store" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.236677 3552 server.go:1227] "Using root directory" path="/var/lib/kubelet" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.242996 3552 kubelet.go:406] "Attempting to sync node with API server" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.243048 3552 kubelet.go:311] "Adding static pod path" path="/etc/kubernetes/manifests" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.243137 3552 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.243161 3552 kubelet.go:322] "Adding apiserver pod source" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.243239 3552 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.248832 3552 kuberuntime_manager.go:258] "Container runtime initialized" containerRuntime="cri-o" version="1.29.5-5.rhaos4.16.git7032128.el9" apiVersion="v1" Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.253249 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.253366 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.253391 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.253639 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.254961 3552 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.257889 3552 kubelet.go:826] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259192 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/azure-file" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259233 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259247 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/rbd" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259265 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259277 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259295 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259307 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259318 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/secret" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259332 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259342 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/cephfs" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259359 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259370 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/fc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259383 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259399 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/projected" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.259432 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.260445 3552 plugins.go:642] "Loaded volume plugin" pluginName="kubernetes.io/csi" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.260990 3552 server.go:1262] "Started kubelet" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.261203 3552 server.go:162] "Starting to listen" address="0.0.0.0" port=10250 Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.261337 3552 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.262228 3552 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc systemd[1]: Started Kubernetes Kubelet. Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265144 3552 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265215 3552 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265629 3552 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-06-27 13:05:20 +0000 UTC, rotation deadline is 2026-06-02 12:51:37.118933074 +0000 UTC Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265699 3552 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1773h26m35.853239932s for next certificate rotation Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265887 3552 volume_manager.go:289] "The desired_state_of_world populator starts" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265900 3552 server.go:233] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.265922 3552 volume_manager.go:291] "Starting Kubelet Volume Manager" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.266784 3552 server.go:461] "Adding debug handlers to kubelet server" Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.267355 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.267543 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.267752 3552 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.200:6443: connect: connection refused" interval="200ms" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.267964 3552 desired_state_of_world_populator.go:151] "Desired state populator starts to run" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.269348 3552 factory.go:55] Registering systemd factory Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.269371 3552 factory.go:221] Registration of the systemd container factory successfully Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.272516 3552 factory.go:153] Registering CRI-O factory Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.272566 3552 factory.go:221] Registration of the crio container factory successfully Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.272744 3552 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.272798 3552 factory.go:103] Registering Raw factory Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.272832 3552 manager.go:1196] Started watching for new ooms in manager Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.274364 3552 manager.go:319] Starting recovery of all containers Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.274817 3552 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.200:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.189e961098fd82ee default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:25:01.260948206 +0000 UTC m=+0.954645076,LastTimestamp:2026-03-20 15:25:01.260948206 +0000 UTC m=+0.954645076,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.302116 3552 manager.go:324] Recovery completed Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.310892 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.314491 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.314685 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.314703 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.315565 3552 cpu_manager.go:215] "Starting CPU manager" policy="none" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.315594 3552 cpu_manager.go:216] "Reconciling" reconcilePeriod="10s" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.315609 3552 state_mem.go:36] "Initialized new in-memory state store" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.317761 3552 policy_none.go:49] "None policy: Start" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.318529 3552 memory_manager.go:170] "Starting memorymanager" policy="None" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.318560 3552 state_mem.go:35] "Initializing new in-memory state store" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365315 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c085412c-b875-46c9-ae3e-e6b0d8067091" volumeName="kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365351 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365371 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365385 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" volumeName="kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365414 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" volumeName="kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365430 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365445 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365459 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="13045510-8717-4a71-ade4-be95a76440a7" volumeName="kubernetes.io/projected/13045510-8717-4a71-ade4-be95a76440a7-kube-api-access-dtjml" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365474 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365489 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365503 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6639609b-906b-4193-883e-ed1160aa5d50" volumeName="kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365517 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" volumeName="kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365534 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" volumeName="kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365547 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365563 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365577 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365594 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365609 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" volumeName="kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365624 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="475321a1-8b7e-4033-8f72-b05a8b377347" volumeName="kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-cni-binary-copy" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365638 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365655 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365670 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d0f40333-c860-4c04-8058-a0bf572dcf12" volumeName="kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365685 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="10603adc-d495-423c-9459-4caa405960bb" volumeName="kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365700 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3f4dca86-e6ee-4ec9-8324-86aff960225e" volumeName="kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365716 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365754 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365770 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365791 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365811 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc291782-27d2-4a74-af79-c7dcb31535d2" volumeName="kubernetes.io/secret/cc291782-27d2-4a74-af79-c7dcb31535d2-metrics-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365825 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/projected/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-kube-api-access-4qr9t" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365838 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365854 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" volumeName="kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365868 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365881 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" volumeName="kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365896 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" volumeName="kubernetes.io/projected/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-kube-api-access-rkkfv" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365910 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365925 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365938 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="8a5ae51d-d173-4531-8975-f164c975ce1f" volumeName="kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365951 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ed024e5d-8fc2-4c22-803d-73f3c9795f19" volumeName="kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365966 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" volumeName="kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365978 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.365992 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="8a5ae51d-d173-4531-8975-f164c975ce1f" volumeName="kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366006 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3f4dca86-e6ee-4ec9-8324-86aff960225e" volumeName="kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366021 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366035 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0f394926-bdb9-425c-b36e-264d7fd34550" volumeName="kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366049 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366063 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366075 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366094 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c782cf62-a827-4677-b3c2-6f82c5f09cbb" volumeName="kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366111 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e" volumeName="kubernetes.io/projected/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-kube-api-access-d7jw8" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366134 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="297ab9b6-2186-4d5b-a952-2bfd59af63c4" volumeName="kubernetes.io/configmap/297ab9b6-2186-4d5b-a952-2bfd59af63c4-mcc-auth-proxy-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366149 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366164 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366177 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366193 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6639609b-906b-4193-883e-ed1160aa5d50" volumeName="kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366206 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c782cf62-a827-4677-b3c2-6f82c5f09cbb" volumeName="kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366223 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="10603adc-d495-423c-9459-4caa405960bb" volumeName="kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366241 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366257 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-stats-auth" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366272 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366286 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf1a8b70-3856-486f-9912-a2de1d57c3fb" volumeName="kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-node-bootstrap-token" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366300 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-env-overrides" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366314 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.366595 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.368773 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.368832 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.368851 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.368893 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.370090 3552 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.200:6443: connect: connection refused" node="crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372290 3552 reconstruct_new.go:149] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ea5f9a7192af1960ec8c50a86fd2d9a756dbf85695798868f611e04a03ec009/globalmount" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372350 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-auth-proxy-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372370 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372386 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372418 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" volumeName="kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372435 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372450 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372473 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d0dcce3-d96e-48cb-9b9f-362105911589" volumeName="kubernetes.io/configmap/9d0dcce3-d96e-48cb-9b9f-362105911589-mcd-auth-proxy-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372489 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372505 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" volumeName="kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372521 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372538 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" volumeName="kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372553 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d0dcce3-d96e-48cb-9b9f-362105911589" volumeName="kubernetes.io/projected/9d0dcce3-d96e-48cb-9b9f-362105911589-kube-api-access-xkzjk" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372570 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372587 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372603 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372622 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" volumeName="kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372636 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372649 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372662 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf1a8b70-3856-486f-9912-a2de1d57c3fb" volumeName="kubernetes.io/projected/bf1a8b70-3856-486f-9912-a2de1d57c3fb-kube-api-access-6z2n9" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372675 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="12e733dd-0939-4f1b-9cbb-13897e093787" volumeName="kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372688 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372702 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372716 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ed024e5d-8fc2-4c22-803d-73f3c9795f19" volumeName="kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372732 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4092a9f8-5acc-4932-9e90-ef962eeb301a" volumeName="kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372748 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="71af81a9-7d43-49b2-9287-c375900aa905" volumeName="kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372765 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9fb762d1-812f-43f1-9eac-68034c1ecec7" volumeName="kubernetes.io/projected/9fb762d1-812f-43f1-9eac-68034c1ecec7-kube-api-access" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372779 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372795 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372814 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4092a9f8-5acc-4932-9e90-ef962eeb301a" volumeName="kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372829 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372844 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/projected/51a02bbf-2d40-4f84-868a-d399ea18a846-kube-api-access-zjg2w" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372862 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372876 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="13045510-8717-4a71-ade4-be95a76440a7" volumeName="kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372900 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" volumeName="kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372918 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372935 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372951 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-bound-sa-token" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372968 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" volumeName="kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.372985 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="59748b9b-c309-4712-aa85-bb38d71c4915" volumeName="kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373003 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="cf1a8966-f594-490a-9fbb-eec5bafd13d3" volumeName="kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373018 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373034 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" volumeName="kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-binary-copy" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373057 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-default-certificate" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373073 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="475321a1-8b7e-4033-8f72-b05a8b377347" volumeName="kubernetes.io/projected/475321a1-8b7e-4033-8f72-b05a8b377347-kube-api-access-c2f8t" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373088 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373103 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/secret/410cf605-1970-4691-9c95-53fdc123b1f3-ovn-control-plane-metrics-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373117 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6d67253e-2acd-4bc1-8185-793587da4f17" volumeName="kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373131 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c782cf62-a827-4677-b3c2-6f82c5f09cbb" volumeName="kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373144 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373160 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-ovnkube-identity-cm" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373175 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373189 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373203 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c085412c-b875-46c9-ae3e-e6b0d8067091" volumeName="kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373216 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ed024e5d-8fc2-4c22-803d-73f3c9795f19" volumeName="kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373230 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373243 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7d51f445-054a-4e4f-a67b-a828f5a32511" volumeName="kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-bound-sa-token" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373256 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c085412c-b875-46c9-ae3e-e6b0d8067091" volumeName="kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373272 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373286 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-env-overrides" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373301 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" volumeName="kubernetes.io/projected/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-kube-api-access-bwbqm" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373315 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373330 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" volumeName="kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373344 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="13045510-8717-4a71-ade4-be95a76440a7" volumeName="kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373366 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="887d596e-c519-4bfa-af90-3edd9e1b2f0f" volumeName="kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373380 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" volumeName="kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373394 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373427 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373441 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/configmap/aa90b3c2-febd-4588-a063-7fbbe82f00c1-service-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373454 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/projected/aa90b3c2-febd-4588-a063-7fbbe82f00c1-kube-api-access-v45vm" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373469 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" volumeName="kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373483 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d3992789-6f8b-4806-8ce0-261a7623ca46" volumeName="kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373498 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373512 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373526 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373540 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="71af81a9-7d43-49b2-9287-c375900aa905" volumeName="kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373571 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373586 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0f394926-bdb9-425c-b36e-264d7fd34550" volumeName="kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373599 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6d67253e-2acd-4bc1-8185-793587da4f17" volumeName="kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373614 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373630 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6639609b-906b-4193-883e-ed1160aa5d50" volumeName="kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373643 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" volumeName="kubernetes.io/projected/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-kube-api-access-8svnk" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373658 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="297ab9b6-2186-4d5b-a952-2bfd59af63c4" volumeName="kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373672 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="530553aa-0a1d-423e-8a22-f5eb4bdbb883" volumeName="kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373686 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="71af81a9-7d43-49b2-9287-c375900aa905" volumeName="kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373700 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="2b6d14a5-ca00-40c7-af7a-051a98a24eed" volumeName="kubernetes.io/configmap/2b6d14a5-ca00-40c7-af7a-051a98a24eed-iptables-alerter-script" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373742 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/projected/410cf605-1970-4691-9c95-53fdc123b1f3-kube-api-access-cx4f9" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373759 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373773 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373788 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="2b6d14a5-ca00-40c7-af7a-051a98a24eed" volumeName="kubernetes.io/projected/2b6d14a5-ca00-40c7-af7a-051a98a24eed-kube-api-access-j4qn7" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373803 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" volumeName="kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373818 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3f4dca86-e6ee-4ec9-8324-86aff960225e" volumeName="kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373832 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" volumeName="kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-sysctl-allowlist" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373847 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="aa90b3c2-febd-4588-a063-7fbbe82f00c1" volumeName="kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-metrics-certs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373861 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc291782-27d2-4a74-af79-c7dcb31535d2" volumeName="kubernetes.io/projected/cc291782-27d2-4a74-af79-c7dcb31535d2-kube-api-access-4sfhc" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373877 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-auth-proxy-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373892 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="34a48baf-1bee-4921-8bb2-9b7320e76f79" volumeName="kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373906 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373922 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373937 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" volumeName="kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373951 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="51a02bbf-2d40-4f84-868a-d399ea18a846" volumeName="kubernetes.io/secret/51a02bbf-2d40-4f84-868a-d399ea18a846-webhook-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373966 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" volumeName="kubernetes.io/secret/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-machine-approver-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373982 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.373996 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" volumeName="kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374012 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="530553aa-0a1d-423e-8a22-f5eb4bdbb883" volumeName="kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374026 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a23c0ee-5648-448c-b772-83dced2891ce" volumeName="kubernetes.io/projected/6a23c0ee-5648-448c-b772-83dced2891ce-kube-api-access-gsxd9" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374041 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5c38ff-1fa8-4219-994d-15776acd4a4d" volumeName="kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374054 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3e19f9e8-9a37-4ca8-9790-c219750ab482" volumeName="kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374068 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf1a8b70-3856-486f-9912-a2de1d57c3fb" volumeName="kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-certs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374083 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374097 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" volumeName="kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374112 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="59748b9b-c309-4712-aa85-bb38d71c4915" volumeName="kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374127 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9fb762d1-812f-43f1-9eac-68034c1ecec7" volumeName="kubernetes.io/configmap/9fb762d1-812f-43f1-9eac-68034c1ecec7-service-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374142 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd556935-a077-45df-ba3f-d42c39326ccd" volumeName="kubernetes.io/empty-dir/bd556935-a077-45df-ba3f-d42c39326ccd-tmpfs" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374158 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="410cf605-1970-4691-9c95-53fdc123b1f3" volumeName="kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-ovnkube-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374172 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374187 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9fb762d1-812f-43f1-9eac-68034c1ecec7" volumeName="kubernetes.io/secret/9fb762d1-812f-43f1-9eac-68034c1ecec7-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374203 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" volumeName="kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374219 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" volumeName="kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374234 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b5d722a-1123-4935-9740-52a08d018bc9" volumeName="kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374249 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d0dcce3-d96e-48cb-9b9f-362105911589" volumeName="kubernetes.io/secret/9d0dcce3-d96e-48cb-9b9f-362105911589-proxy-tls" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374264 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="120b38dc-8236-4fa6-a452-642b8ad738ee" volumeName="kubernetes.io/projected/120b38dc-8236-4fa6-a452-642b8ad738ee-kube-api-access-bwvjb" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374278 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" volumeName="kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374292 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e" volumeName="kubernetes.io/configmap/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-serviceca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374307 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="297ab9b6-2186-4d5b-a952-2bfd59af63c4" volumeName="kubernetes.io/projected/297ab9b6-2186-4d5b-a952-2bfd59af63c4-kube-api-access-vtgqn" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374322 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" volumeName="kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374337 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5cad292d-912c-4787-a5fa-0ade98e731eb" volumeName="kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374352 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" volumeName="kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374365 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="887d596e-c519-4bfa-af90-3edd9e1b2f0f" volumeName="kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374378 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="8a5ae51d-d173-4531-8975-f164c975ce1f" volumeName="kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374418 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="4092a9f8-5acc-4932-9e90-ef962eeb301a" volumeName="kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374436 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374452 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="b54e8941-2fc4-432a-9e51-39684df9089e" volumeName="kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-kube-api-access-9x6dp" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374469 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6268b7fe-8910-4505-b404-6f1df638105c" volumeName="kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374486 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" volumeName="kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374502 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" volumeName="kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374519 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="5bacb25d-97b6-4491-8fb4-99feae1d802a" volumeName="kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374534 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="bab054c9-6c83-40ee-896d-6459b22a6b4b" volumeName="kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374550 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="0f394926-bdb9-425c-b36e-264d7fd34550" volumeName="kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374568 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="530553aa-0a1d-423e-8a22-f5eb4bdbb883" volumeName="kubernetes.io/empty-dir/530553aa-0a1d-423e-8a22-f5eb4bdbb883-available-featuregates" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374584 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="6d67253e-2acd-4bc1-8185-793587da4f17" volumeName="kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374600 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="887d596e-c519-4bfa-af90-3edd9e1b2f0f" volumeName="kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374618 3552 reconstruct_new.go:135] "Volume is marked as uncertain and added into the actual state" pod="" podName="475321a1-8b7e-4033-8f72-b05a8b377347" volumeName="kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-multus-daemon-config" seLinuxMountContext="" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374629 3552 reconstruct_new.go:102] "Volume reconstruction finished" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.374640 3552 reconciler_new.go:29] "Reconciler: start to sync state" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.385556 3552 manager.go:296] "Starting Device Plugin manager" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.385616 3552 manager.go:479] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.385627 3552 server.go:79] "Starting device plugin registration server" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.386110 3552 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.386215 3552 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.386235 3552 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.426540 3552 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.429024 3552 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.429103 3552 status_manager.go:217] "Starting to sync pod status with apiserver" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.429139 3552 kubelet.go:2343] "Starting kubelet main sync loop" Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.429209 3552 kubelet.go:2367] "Skipping pod synchronization" err="PLEG is not healthy: pleg has yet to be successful" Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.430464 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.430530 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.469748 3552 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.200:6443: connect: connection refused" interval="400ms" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.529653 3552 kubelet.go:2429] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.529712 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d3ae206906481b4831fd849b559269c8" podNamespace="openshift-machine-config-operator" podName="kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.529762 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.531108 3552 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532489 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532526 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532538 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532634 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b2a6a3b2ca08062d24afa4c01aaf9e4f" podNamespace="openshift-etcd" podName="etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532673 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532749 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.532776 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533624 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533668 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533691 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533701 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533668 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533777 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533923 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.533946 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.534041 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" podNamespace="openshift-kube-apiserver" podName="kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.534191 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.535530 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.535591 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.535607 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536091 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536119 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536128 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536277 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4faaac70bf21c7d77dcb526af466bffa" podNamespace="openshift-kube-controller-manager" podName="kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536306 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536595 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.536749 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.537743 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.537767 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.537778 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.537852 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6a57a7fb1944b43a6bd11a349520d301" podNamespace="openshift-kube-scheduler" podName="openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.537875 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.538107 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.538138 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.538150 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.538263 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.538390 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.539724 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.539787 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.539807 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.540946 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.540995 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.544198 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.544528 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.544550 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.544609 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.544625 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.544639 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.570901 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.572365 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.572429 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.572443 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.572469 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.573840 3552 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.200:6443: connect: connection refused" node="crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.576873 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.576921 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.576952 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.576976 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-usr-local-bin\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577002 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577027 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577055 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577130 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-static-pod-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577258 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-log-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577417 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-data-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577472 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577496 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577516 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577535 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-resource-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.577553 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-cert-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.679216 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-cert-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.679391 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-cert-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.679919 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-data-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680007 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680048 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680057 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680079 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-data-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680125 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680134 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680173 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680181 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-resource-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680260 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680276 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-resource-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680301 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680341 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-usr-local-bin\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680351 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a3f6a3e226d5c60ea73cb7fac85e9195-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"a3f6a3e226d5c60ea73cb7fac85e9195\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680376 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-log-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680374 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680433 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3ae206906481b4831fd849b559269c8-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d3ae206906481b4831fd849b559269c8\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680542 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680447 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/4faaac70bf21c7d77dcb526af466bffa-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"4faaac70bf21c7d77dcb526af466bffa\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680626 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680483 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-log-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680657 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680483 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-usr-local-bin\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680689 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-static-pod-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680776 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680951 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/6a57a7fb1944b43a6bd11a349520d301-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"6a57a7fb1944b43a6bd11a349520d301\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.680976 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/b2a6a3b2ca08062d24afa4c01aaf9e4f-static-pod-dir\") pod \"etcd-crc\" (UID: \"b2a6a3b2ca08062d24afa4c01aaf9e4f\") " pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.871602 3552 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.200:6443: connect: connection refused" interval="800ms" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.891884 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.917846 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.926529 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.941242 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3f6a3e226d5c60ea73cb7fac85e9195.slice/crio-28268f15c67fbee0c61da461cfe9375fd08e2871eaca4c46d41bfd33abcb8b6c WatchSource:0}: Error finding container 28268f15c67fbee0c61da461cfe9375fd08e2871eaca4c46d41bfd33abcb8b6c: Status 404 returned error can't find the container with id 28268f15c67fbee0c61da461cfe9375fd08e2871eaca4c46d41bfd33abcb8b6c Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.948501 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.956524 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.961943 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4faaac70bf21c7d77dcb526af466bffa.slice/crio-ea00cd4d18ac5b1c503ad42a164130825262a754d7404900168ed87a6b822bfa WatchSource:0}: Error finding container ea00cd4d18ac5b1c503ad42a164130825262a754d7404900168ed87a6b822bfa: Status 404 returned error can't find the container with id ea00cd4d18ac5b1c503ad42a164130825262a754d7404900168ed87a6b822bfa Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.974455 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:01 crc kubenswrapper[3552]: W0320 15:25:01.975177 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a57a7fb1944b43a6bd11a349520d301.slice/crio-f81af9fd147c5d8ba3204d7ca458ab0481a5c0672440382813cc9376990fb349 WatchSource:0}: Error finding container f81af9fd147c5d8ba3204d7ca458ab0481a5c0672440382813cc9376990fb349: Status 404 returned error can't find the container with id f81af9fd147c5d8ba3204d7ca458ab0481a5c0672440382813cc9376990fb349 Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.975963 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.976006 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.976020 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:01 crc kubenswrapper[3552]: I0320 15:25:01.976050 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:01 crc kubenswrapper[3552]: E0320 15:25:01.977266 3552 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.200:6443: connect: connection refused" node="crc" Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.263437 3552 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: W0320 15:25:02.349116 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: E0320 15:25:02.349190 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: W0320 15:25:02.388668 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: E0320 15:25:02.388730 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.434484 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"f81af9fd147c5d8ba3204d7ca458ab0481a5c0672440382813cc9376990fb349"} Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.435199 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"ea00cd4d18ac5b1c503ad42a164130825262a754d7404900168ed87a6b822bfa"} Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.436116 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"28268f15c67fbee0c61da461cfe9375fd08e2871eaca4c46d41bfd33abcb8b6c"} Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.437326 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"c4b2df33164224c3c21d2b19d72b67178fd83cf3c685e4ebb0e99e9149abbc3f"} Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.438271 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d3ae206906481b4831fd849b559269c8","Type":"ContainerStarted","Data":"4f164518b5d856e2e370eb2cc24c4cdfaf8f9eca5929ba6ec31d543f3cd9a66c"} Mar 20 15:25:02 crc kubenswrapper[3552]: W0320 15:25:02.662349 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: E0320 15:25:02.662549 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: E0320 15:25:02.674518 3552 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.200:6443: connect: connection refused" interval="1.6s" Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.777422 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.778953 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.779010 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.779027 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:02 crc kubenswrapper[3552]: I0320 15:25:02.779055 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:02 crc kubenswrapper[3552]: E0320 15:25:02.780033 3552 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.200:6443: connect: connection refused" node="crc" Mar 20 15:25:02 crc kubenswrapper[3552]: W0320 15:25:02.952771 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:02 crc kubenswrapper[3552]: E0320 15:25:02.952865 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.264452 3552 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.444118 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"117e21001e0a3ad897c132717618b6577368b96ec9b51f5ea4b8588484a231c4"} Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.444175 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"4aee425f0ce579d0dfe2ec66cb741c24e45717fe40e2946cabafcd22b84b380c"} Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.447392 3552 generic.go:334] "Generic (PLEG): container finished" podID="a3f6a3e226d5c60ea73cb7fac85e9195" containerID="d25d3a48b3d6f22c85c20a3602fb2e8ade00d8547e1c4c70f67590644725b429" exitCode=0 Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.447529 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerDied","Data":"d25d3a48b3d6f22c85c20a3602fb2e8ade00d8547e1c4c70f67590644725b429"} Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.447582 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.448990 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.449027 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.449041 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.449892 3552 generic.go:334] "Generic (PLEG): container finished" podID="b2a6a3b2ca08062d24afa4c01aaf9e4f" containerID="7a17e2218bd1867718ffcaf19c3b82b066495ff50d382d942ee4412a9d15b0eb" exitCode=0 Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.449940 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerDied","Data":"7a17e2218bd1867718ffcaf19c3b82b066495ff50d382d942ee4412a9d15b0eb"} Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.450015 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.450764 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451276 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451305 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451317 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451675 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451712 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451728 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451843 3552 generic.go:334] "Generic (PLEG): container finished" podID="d3ae206906481b4831fd849b559269c8" containerID="1f419a886a89c86b5a5411a6c6d72ee563edbed19a78990933b16803669d9662" exitCode=0 Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451888 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.451965 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d3ae206906481b4831fd849b559269c8","Type":"ContainerDied","Data":"1f419a886a89c86b5a5411a6c6d72ee563edbed19a78990933b16803669d9662"} Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.452676 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.452705 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.452717 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.454483 3552 generic.go:334] "Generic (PLEG): container finished" podID="6a57a7fb1944b43a6bd11a349520d301" containerID="0ca87dc9857ae7133245fe1d8aed7dfd01a5e8b87834144e18106cb914792d53" exitCode=0 Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.454526 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerDied","Data":"0ca87dc9857ae7133245fe1d8aed7dfd01a5e8b87834144e18106cb914792d53"} Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.454600 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.456433 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.456494 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:03 crc kubenswrapper[3552]: I0320 15:25:03.456515 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.264195 3552 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: E0320 15:25:04.276274 3552 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.200:6443: connect: connection refused" interval="3.2s" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.380761 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.383009 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.383048 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.383063 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.383098 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:04 crc kubenswrapper[3552]: E0320 15:25:04.384196 3552 kubelet_node_status.go:100] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.200:6443: connect: connection refused" node="crc" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.459246 3552 generic.go:334] "Generic (PLEG): container finished" podID="b2a6a3b2ca08062d24afa4c01aaf9e4f" containerID="e0daf34d1d927751011e8fb8e69bc808eb5a8948fd0a93d2e2890b057f57cd05" exitCode=0 Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.459343 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerDied","Data":"e0daf34d1d927751011e8fb8e69bc808eb5a8948fd0a93d2e2890b057f57cd05"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.459434 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.460692 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.460738 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.460754 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.461151 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d3ae206906481b4831fd849b559269c8","Type":"ContainerStarted","Data":"24b139cd037a86af7ecea0fb8c66f0be79ccc8c8cefd560303e1dacb2c54dbbc"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.461186 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.461885 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.461937 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.461948 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.463633 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"03805260f31b5c88b57bb94bf816a698e58e12f3d139dd6b59c95bd55e34479c"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.463665 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"e98943f48f2887a99d0993da317df7ec972516393e9293b01fceb19059d958d5"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.463675 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"6a57a7fb1944b43a6bd11a349520d301","Type":"ContainerStarted","Data":"abbdc8dda3053204d33fba33de5ab914928cc5fac1c51a85b8f45f9091120785"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.463711 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.465023 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.465048 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.465058 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.466696 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"2eb8c2f0e6e4322471228788ade221fad12a07875f2bd05314b1471fa1cad13c"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.466715 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.466733 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"4faaac70bf21c7d77dcb526af466bffa","Type":"ContainerStarted","Data":"563632d47551c7334f7bb82b3379200515f11eca3325f6f306cfec9eac82df52"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.467447 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.467477 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.467489 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.471879 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"9ec58ea5161bf8f81d77402f06d030d09c7a3d50df82a617d172e3313acf0247"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.471917 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"c3aea37270bcbaf31f846bb21a1f07d63a1eb3ebff0fdc03357086e5c712cb27"} Mar 20 15:25:04 crc kubenswrapper[3552]: I0320 15:25:04.471928 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"a8a7868bb25b54f06d36010d655543b0c5d6c55f38c8c228e5381c102c407652"} Mar 20 15:25:04 crc kubenswrapper[3552]: W0320 15:25:04.565107 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: E0320 15:25:04.565182 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: W0320 15:25:04.598652 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: E0320 15:25:04.598790 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: W0320 15:25:04.610774 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: E0320 15:25:04.610895 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: W0320 15:25:04.750569 3552 reflector.go:539] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:04 crc kubenswrapper[3552]: E0320 15:25:04.751023 3552 reflector.go:147] k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229: Failed to watch *v1.Node: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.263426 3552 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": dial tcp 38.102.83.200:6443: connect: connection refused Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.478942 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"15357b2d9c1d605218b8ddffccc235a7d2313ef728e4642a392f00e709e43cfa"} Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.478985 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"730c2de2a565da1d4958fcf5fd9a4bfc900dc2a4b272532d168530a66c8f2282"} Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.479087 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.480387 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.480437 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.480450 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.483490 3552 generic.go:334] "Generic (PLEG): container finished" podID="b2a6a3b2ca08062d24afa4c01aaf9e4f" containerID="fbb9b6bf726cbd2bd02f5d71e464cc4941f8d3a9d88267d641d698a3748e0d2e" exitCode=0 Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.483569 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.483593 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.484066 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.484442 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerDied","Data":"fbb9b6bf726cbd2bd02f5d71e464cc4941f8d3a9d88267d641d698a3748e0d2e"} Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.484529 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.484888 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.485792 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.485904 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.485919 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.485873 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.486064 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.486084 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.486108 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.486120 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.486130 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.490999 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.491040 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.491053 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:05 crc kubenswrapper[3552]: I0320 15:25:05.952862 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.490565 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"7945accc53f7b729fcdbd8e5697bb564e7201a84b2b4c58e882a8f1c7b7ab762"} Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.490625 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.490637 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"1cbd9b297dc042832d0081accf983df52d65b126cb08cc1cfbe1c7e4d0df0826"} Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.490750 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.490814 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.494353 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.494412 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.494429 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.494779 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.494831 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:06 crc kubenswrapper[3552]: I0320 15:25:06.494866 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.498953 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.499032 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"8c838f4de8044fe81527a3856f3cff8a602834ec45ce4421c5e68b0ea86fed49"} Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.499166 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"b2a6a3b2ca08062d24afa4c01aaf9e4f","Type":"ContainerStarted","Data":"ca6b2f22ae94f4accd854d8560fbd3a49211293d8b84810fd8dad88ab763eced"} Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.498963 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.500189 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.500224 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.500241 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.500855 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.500910 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.500932 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.585272 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.586981 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.587032 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.587051 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:07 crc kubenswrapper[3552]: I0320 15:25:07.587085 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.502226 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.503805 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.503890 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.503915 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.563716 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.563782 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.563915 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.565291 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.565343 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.565362 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.574283 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:08 crc kubenswrapper[3552]: I0320 15:25:08.714752 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.355661 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.355916 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.357921 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.357990 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.358010 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.378005 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.504477 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.504582 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.505573 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.505617 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.505643 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.505661 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.505694 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.505712 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.755385 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.755587 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.756842 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.756901 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:09 crc kubenswrapper[3552]: I0320 15:25:09.756920 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.381941 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.506767 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.506773 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.508387 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.508485 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.508511 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.508387 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.508587 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:10 crc kubenswrapper[3552]: I0320 15:25:10.508605 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:11 crc kubenswrapper[3552]: E0320 15:25:11.531734 3552 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:25:11 crc kubenswrapper[3552]: I0320 15:25:11.564825 3552 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:25:11 crc kubenswrapper[3552]: I0320 15:25:11.565034 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.208559 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.208830 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.216197 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.216619 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.216697 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.217066 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.515139 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.515793 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.515827 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:13 crc kubenswrapper[3552]: I0320 15:25:13.515839 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:15 crc kubenswrapper[3552]: I0320 15:25:15.509217 3552 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:36240->192.168.126.11:17697: read: connection reset by peer" start-of-body= Mar 20 15:25:15 crc kubenswrapper[3552]: I0320 15:25:15.509333 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:36240->192.168.126.11:17697: read: connection reset by peer" Mar 20 15:25:16 crc kubenswrapper[3552]: E0320 15:25:16.033126 3552 event.go:355] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.189e961098fd82ee default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-03-20 15:25:01.260948206 +0000 UTC m=+0.954645076,LastTimestamp:2026-03-20 15:25:01.260948206 +0000 UTC m=+0.954645076,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.264788 3552 csi_plugin.go:880] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc": net/http: TLS handshake timeout Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.525012 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_a3f6a3e226d5c60ea73cb7fac85e9195/kube-apiserver-check-endpoints/3.log" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.527517 3552 generic.go:334] "Generic (PLEG): container finished" podID="a3f6a3e226d5c60ea73cb7fac85e9195" containerID="15357b2d9c1d605218b8ddffccc235a7d2313ef728e4642a392f00e709e43cfa" exitCode=255 Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.527569 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerDied","Data":"15357b2d9c1d605218b8ddffccc235a7d2313ef728e4642a392f00e709e43cfa"} Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.528672 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.530794 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.530843 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.530859 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.532639 3552 scope.go:117] "RemoveContainer" containerID="15357b2d9c1d605218b8ddffccc235a7d2313ef728e4642a392f00e709e43cfa" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.796730 3552 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.796806 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.801280 3552 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} Mar 20 15:25:16 crc kubenswrapper[3552]: I0320 15:25:16.801341 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a3f6a3e226d5c60ea73cb7fac85e9195" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Mar 20 15:25:17 crc kubenswrapper[3552]: I0320 15:25:17.532765 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_a3f6a3e226d5c60ea73cb7fac85e9195/kube-apiserver-check-endpoints/3.log" Mar 20 15:25:17 crc kubenswrapper[3552]: I0320 15:25:17.535128 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"a3f6a3e226d5c60ea73cb7fac85e9195","Type":"ContainerStarted","Data":"74093ab8a547539ec96bc2fb7b5f3ec647564a4617fc4ce718c43c5fef82bc9d"} Mar 20 15:25:17 crc kubenswrapper[3552]: I0320 15:25:17.535291 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:17 crc kubenswrapper[3552]: I0320 15:25:17.536383 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:17 crc kubenswrapper[3552]: I0320 15:25:17.536458 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:17 crc kubenswrapper[3552]: I0320 15:25:17.536481 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:18 crc kubenswrapper[3552]: I0320 15:25:18.779186 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Mar 20 15:25:18 crc kubenswrapper[3552]: I0320 15:25:18.779406 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:18 crc kubenswrapper[3552]: I0320 15:25:18.780993 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:18 crc kubenswrapper[3552]: I0320 15:25:18.781069 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:18 crc kubenswrapper[3552]: I0320 15:25:18.781087 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:18 crc kubenswrapper[3552]: I0320 15:25:18.798163 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.539786 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.540575 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.540645 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.540674 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.761241 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.761391 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.761466 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.762463 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.762496 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.762511 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:19 crc kubenswrapper[3552]: I0320 15:25:19.767575 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:20 crc kubenswrapper[3552]: I0320 15:25:20.541990 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:20 crc kubenswrapper[3552]: I0320 15:25:20.542985 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:20 crc kubenswrapper[3552]: I0320 15:25:20.543009 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:20 crc kubenswrapper[3552]: I0320 15:25:20.543017 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:21 crc kubenswrapper[3552]: E0320 15:25:21.531930 3552 eviction_manager.go:282] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.544350 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.545750 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.545961 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.545991 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.565024 3552 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.565099 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="4faaac70bf21c7d77dcb526af466bffa" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:25:21 crc kubenswrapper[3552]: E0320 15:25:21.799948 3552 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.806934 3552 trace.go:236] Trace[1426077610]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 (20-Mar-2026 15:25:09.942) (total time: 11863ms): Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[1426077610]: ---"Objects listed" error: 11863ms (15:25:21.805) Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[1426077610]: [11.863185451s] [11.863185451s] END Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.806987 3552 reflector.go:351] Caches populated for *v1.CSIDriver from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.806983 3552 trace.go:236] Trace[1168383980]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 (20-Mar-2026 15:25:10.466) (total time: 11339ms): Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[1168383980]: ---"Objects listed" error: 11339ms (15:25:21.805) Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[1168383980]: [11.339158876s] [11.339158876s] END Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.807043 3552 reflector.go:351] Caches populated for *v1.RuntimeClass from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.807763 3552 reconstruct_new.go:210] "DevicePaths of reconstructed volumes updated" Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.808084 3552 trace.go:236] Trace[985778105]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 (20-Mar-2026 15:25:08.937) (total time: 12870ms): Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[985778105]: ---"Objects listed" error: 12870ms (15:25:21.807) Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[985778105]: [12.870805138s] [12.870805138s] END Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.808136 3552 reflector.go:351] Caches populated for *v1.Node from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.812056 3552 trace.go:236] Trace[235285290]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 (20-Mar-2026 15:25:10.561) (total time: 11250ms): Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[235285290]: ---"Objects listed" error: 11250ms (15:25:21.811) Mar 20 15:25:21 crc kubenswrapper[3552]: Trace[235285290]: [11.250272645s] [11.250272645s] END Mar 20 15:25:21 crc kubenswrapper[3552]: I0320 15:25:21.812133 3552 reflector.go:351] Caches populated for *v1.Service from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 20 15:25:21 crc kubenswrapper[3552]: E0320 15:25:21.813843 3552 kubelet_node_status.go:100] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.257442 3552 apiserver.go:52] "Watching apiserver" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.284804 3552 reflector.go:351] Caches populated for *v1.Pod from k8s.io/client-go@v0.29.0/tools/cache/reflector.go:229 Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287235 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m","openshift-console/downloads-65476884b9-9wcvx","openshift-kube-controller-manager/installer-9-crc","openshift-kube-controller-manager/revision-pruner-9-crc","openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm","openshift-marketplace/redhat-marketplace-8s8pc","openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7","openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd","openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg","openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh","openshift-kube-controller-manager/installer-12-crc","openshift-kube-controller-manager/revision-pruner-12-crc","openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh","openshift-marketplace/redhat-operators-f4jkp","openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb","openshift-multus/multus-admission-controller-6c7c885997-4hbbc","openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b","openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46","openshift-console-operator/console-operator-5dbbc74dc9-cp5cd","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-operator/network-operator-767c585db5-zd56b","openshift-kube-scheduler/installer-7-crc","openshift-multus/network-metrics-daemon-qdfr4","openshift-network-node-identity/network-node-identity-7xghp","openshift-dns/node-resolver-dn27q","openshift-image-registry/node-ca-l92hr","openshift-kube-apiserver/installer-11-crc","openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7","openshift-marketplace/marketplace-operator-8b455464d-f9xdt","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-controller-manager/revision-pruner-11-crc","openshift-kube-controller-manager/revision-pruner-8-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz","openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv","openshift-kube-scheduler/revision-pruner-8-crc","openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx","openshift-service-ca/service-ca-cd974775-4nsv5","openshift-console/console-8568c59db8-fspjn","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-marketplace/certified-operators-7287f","openshift-marketplace/community-operators-8jhz6","openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf","openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh","openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv","openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz","openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9","openshift-ingress/router-default-5c9bf7bc58-6jctv","openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr","openshift-network-operator/iptables-alerter-wwpnd","openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht","openshift-multus/multus-additional-cni-plugins-bzj2p","hostpath-provisioner/csi-hostpathplugin-hvm8g","openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8","openshift-dns-operator/dns-operator-75f687757b-nz2xb","openshift-kube-controller-manager/revision-pruner-10-crc","openshift-kube-scheduler/installer-8-crc","openshift-kube-scheduler/revision-pruner-7-crc","openshift-apiserver/apiserver-6cdf967d79-ffdf8","openshift-console-operator/console-conversion-webhook-595f9969b-l6z49","openshift-etcd/etcd-crc","openshift-image-registry/image-registry-6fbd648f87-j4bk5","openshift-network-diagnostics/network-check-target-v54bt","openshift-ovn-kubernetes/ovnkube-node-44qcg","openshift-authentication/oauth-openshift-6499cf79cf-qdfbh","openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc","openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z","openshift-dns/dns-default-gbw49","openshift-ingress-canary/ingress-canary-2vhcn","openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw","openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb","openshift-multus/multus-q88th","openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl","openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp","openshift-etcd-operator/etcd-operator-768d5b5d86-722mg","openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t","openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7","openshift-machine-config-operator/machine-config-daemon-zpnhg","openshift-machine-config-operator/machine-config-server-v65wr","openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2"] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287336 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" podNamespace="openshift-etcd-operator" podName="etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287541 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" podNamespace="openshift-operator-lifecycle-manager" podName="olm-operator-6d8474f75f-x54mh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287715 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287737 3552 topology_manager.go:215] "Topology Admit Handler" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" podNamespace="openshift-operator-lifecycle-manager" podName="package-server-manager-84d578d794-jw7r2" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.287812 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287717 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287850 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" podNamespace="openshift-service-ca-operator" podName="service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.287876 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.287953 3552 topology_manager.go:215] "Topology Admit Handler" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" podNamespace="openshift-machine-config-operator" podName="machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288061 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" podNamespace="openshift-operator-lifecycle-manager" podName="catalog-operator-857456c46-7f5wf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288164 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" podNamespace="openshift-marketplace" podName="marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288274 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cc291782-27d2-4a74-af79-c7dcb31535d2" podNamespace="openshift-network-operator" podName="network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288394 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" podNamespace="openshift-machine-api" podName="machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288562 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288578 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.289093 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288654 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.289206 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288687 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288714 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.288760 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.289295 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.289469 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" podNamespace="openshift-kube-apiserver-operator" podName="kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.289497 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.289624 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.289736 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.289755 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" podNamespace="openshift-controller-manager-operator" podName="openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.289829 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.289761 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.290106 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290114 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" podNamespace="openshift-kube-controller-manager-operator" podName="kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290163 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.290299 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290356 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" podNamespace="openshift-kube-storage-version-migrator-operator" podName="kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290514 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.290615 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290686 3552 topology_manager.go:215] "Topology Admit Handler" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" podNamespace="openshift-config-operator" podName="openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.290957 3552 topology_manager.go:215] "Topology Admit Handler" podUID="10603adc-d495-423c-9459-4caa405960bb" podNamespace="openshift-dns-operator" podName="dns-operator-75f687757b-nz2xb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.291021 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.291256 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.291353 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.291463 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.291510 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.291563 3552 topology_manager.go:215] "Topology Admit Handler" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" podNamespace="openshift-apiserver-operator" podName="openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.291721 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.291801 3552 topology_manager.go:215] "Topology Admit Handler" podUID="71af81a9-7d43-49b2-9287-c375900aa905" podNamespace="openshift-kube-scheduler-operator" podName="openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.291973 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.292048 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" podNamespace="openshift-authentication-operator" podName="authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.292112 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.292335 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.292342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.292474 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.292528 3552 topology_manager.go:215] "Topology Admit Handler" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" podNamespace="openshift-machine-api" podName="control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.292557 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.292909 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" podNamespace="openshift-image-registry" podName="cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293121 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293208 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" podNamespace="openshift-multus" podName="multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.293270 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293524 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293609 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293608 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293659 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.293660 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.293545 3552 topology_manager.go:215] "Topology Admit Handler" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" podNamespace="openshift-multus" podName="multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294085 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" podNamespace="openshift-multus" podName="network-metrics-daemon-qdfr4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294226 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294356 3552 topology_manager.go:215] "Topology Admit Handler" podUID="410cf605-1970-4691-9c95-53fdc123b1f3" podNamespace="openshift-ovn-kubernetes" podName="ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294483 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.294611 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294658 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" podNamespace="openshift-network-diagnostics" podName="network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294735 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.294898 3552 topology_manager.go:215] "Topology Admit Handler" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" podNamespace="openshift-network-diagnostics" podName="network-check-target-v54bt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.295213 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.295245 3552 topology_manager.go:215] "Topology Admit Handler" podUID="51a02bbf-2d40-4f84-868a-d399ea18a846" podNamespace="openshift-network-node-identity" podName="network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.295218 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.295327 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.295473 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.295594 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" podNamespace="openshift-ovn-kubernetes" podName="ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.295699 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.301167 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2b6d14a5-ca00-40c7-af7a-051a98a24eed" podNamespace="openshift-network-operator" podName="iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.302113 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.303643 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.305828 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.307983 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.308072 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.308398 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.308514 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" podNamespace="openshift-kube-storage-version-migrator" podName="migrator-f7c6d88df-q2fnv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.308718 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309594 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309647 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309876 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.308742 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.310079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.310131 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.308879 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309154 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.310290 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309175 3552 topology_manager.go:215] "Topology Admit Handler" podUID="13045510-8717-4a71-ade4-be95a76440a7" podNamespace="openshift-dns" podName="dns-default-gbw49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309210 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.309349 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.310716 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.310992 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.311054 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.311226 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6a23c0ee-5648-448c-b772-83dced2891ce" podNamespace="openshift-dns" podName="node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.311362 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.311625 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.311888 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9fb762d1-812f-43f1-9eac-68034c1ecec7" podNamespace="openshift-cluster-version" podName="cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.312437 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.312414 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" podNamespace="openshift-oauth-apiserver" podName="apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.312708 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.314098 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.314142 3552 topology_manager.go:215] "Topology Admit Handler" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" podNamespace="openshift-operator-lifecycle-manager" podName="packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.314536 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" podNamespace="openshift-ingress-operator" podName="ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.314770 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.314890 3552 topology_manager.go:215] "Topology Admit Handler" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" podNamespace="openshift-cluster-samples-operator" podName="cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.314903 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.315040 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.315104 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.315169 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.315186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.315446 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ec1bae8b-3200-4ad9-b33b-cf8701f3027c" podNamespace="openshift-cluster-machine-approver" podName="machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.315729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.315792 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.316061 3552 topology_manager.go:215] "Topology Admit Handler" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" podNamespace="openshift-ingress" podName="router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.316389 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" podNamespace="openshift-machine-config-operator" podName="machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.316471 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.316655 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.316737 3552 topology_manager.go:215] "Topology Admit Handler" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" podNamespace="openshift-console-operator" podName="console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.316968 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.317027 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" podNamespace="openshift-console-operator" podName="console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.317468 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.317780 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.317801 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.317857 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.318293 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.318389 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.318563 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.318937 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.318967 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.319037 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.319340 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.319479 3552 topology_manager.go:215] "Topology Admit Handler" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" podNamespace="openshift-machine-config-operator" podName="machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.320083 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6268b7fe-8910-4505-b404-6f1df638105c" podNamespace="openshift-console" podName="downloads-65476884b9-9wcvx" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.320258 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.320496 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.320572 3552 topology_manager.go:215] "Topology Admit Handler" podUID="bf1a8b70-3856-486f-9912-a2de1d57c3fb" podNamespace="openshift-machine-config-operator" podName="machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.321014 3552 topology_manager.go:215] "Topology Admit Handler" podUID="f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e" podNamespace="openshift-image-registry" podName="node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.321210 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.321299 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.321554 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" podNamespace="openshift-ingress-canary" podName="ingress-canary-2vhcn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.321853 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.321950 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322053 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322266 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322289 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322319 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322353 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.321562 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322536 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322625 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322634 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" podNamespace="openshift-multus" podName="multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322655 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322745 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322861 3552 topology_manager.go:215] "Topology Admit Handler" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" podNamespace="hostpath-provisioner" podName="csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.322941 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.323055 3552 topology_manager.go:215] "Topology Admit Handler" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" podNamespace="openshift-marketplace" podName="certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.323096 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.323493 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.323652 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.323996 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.324140 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.324635 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.325057 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.326211 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.326276 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.326449 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.326219 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.326656 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.326712 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.329438 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" podNamespace="openshift-marketplace" podName="community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.329527 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.329606 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.329776 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" podNamespace="openshift-marketplace" podName="redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330028 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" podNamespace="openshift-marketplace" podName="redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330232 3552 topology_manager.go:215] "Topology Admit Handler" podUID="72854c1e-5ae2-4ed6-9e50-ff3bccde2635" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-8-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330337 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330395 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.330579 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330605 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6639609b-906b-4193-883e-ed1160aa5d50" podNamespace="openshift-service-ca" podName="service-ca-cd974775-4nsv5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330637 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-8-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.330994 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.331164 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.331268 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.331274 3552 topology_manager.go:215] "Topology Admit Handler" podUID="dc41379b-41a8-497f-8ac6-4ee19454d1d2" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-9-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.331370 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.331503 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.331812 3552 topology_manager.go:215] "Topology Admit Handler" podUID="972b3cfd-f9d0-485e-924b-b5258282d155" podNamespace="openshift-kube-controller-manager" podName="installer-9-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.332175 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.332318 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-9-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.332612 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" podNamespace="openshift-authentication" podName="oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.333108 3552 topology_manager.go:215] "Topology Admit Handler" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" podNamespace="openshift-console" podName="console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.333192 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.333273 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.333705 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" podNamespace="openshift-image-registry" podName="image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.333766 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.333841 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.334223 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ba116478-01f2-47d9-8b88-9db94f1478e3" podNamespace="openshift-kube-scheduler" podName="installer-7-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.334296 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.334369 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.334781 3552 topology_manager.go:215] "Topology Admit Handler" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" podNamespace="openshift-apiserver" podName="apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.334836 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-7-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.335335 3552 topology_manager.go:215] "Topology Admit Handler" podUID="410dbf23-e4f3-4307-910c-ad0a079c33e2" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-10-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.335392 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.335480 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.335994 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" podNamespace="openshift-controller-manager" podName="controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.336057 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-10-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.336577 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2bbbb77a-fabb-4250-a075-38a7c2a82752" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-11-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.336662 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.336770 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.337118 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e82cfcea-bcd5-4d25-9d17-4978f4452a3a" podNamespace="openshift-kube-controller-manager" podName="revision-pruner-12-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.337187 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-11-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.337723 3552 topology_manager.go:215] "Topology Admit Handler" podUID="334fb4b6-16b9-453a-9208-846feab2a2fa" podNamespace="openshift-kube-controller-manager" podName="installer-12-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.337780 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-12-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.338100 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/installer-12-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.338431 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555385-rxkwl" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.338797 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b541f220-6272-4285-8250-4474714fb6cd" podNamespace="openshift-kube-apiserver" podName="installer-11-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.338888 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.339148 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" podNamespace="openshift-route-controller-manager" podName="route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.339224 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-11-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.339540 3552 topology_manager.go:215] "Topology Admit Handler" podUID="67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f" podNamespace="openshift-kube-scheduler" podName="revision-pruner-7-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.339634 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.339721 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.339908 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b0f74f48-a515-4cf0-9196-37bfb966b31f" podNamespace="openshift-kube-scheduler" podName="revision-pruner-8-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.339986 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-7-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.340331 3552 topology_manager.go:215] "Topology Admit Handler" podUID="252d78ec-e97f-4fdb-9104-4464f1cb6172" podNamespace="openshift-kube-scheduler" podName="installer-8-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.340451 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-8-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.340875 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/installer-8-crc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.340977 3552 topology_manager.go:215] "Topology Admit Handler" podUID="079b7b69-b036-48d0-ab94-f3d1e03777f9" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555400-kb5zp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.341277 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d1f18111-2e43-40c4-ae3c-0f02d431999d" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29555415-s64ht" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.341469 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.343511 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.355569 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01904e9caa36dbc7772b537a148f3270c1b6a855aab806556aac5544f9540dc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:37:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:36:12Z\\\"}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.367119 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c085412c-b875-46c9-ae3e-e6b0d8067091\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [olm-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"olm-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"olm-operator-6d8474f75f-x54mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.377470 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b6d14a5-ca00-40c7-af7a-051a98a24eed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d6994ed1b1593f7638e3a8732c503356885a02dc245451ceddc3809f61023dce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-wwpnd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.381799 3552 desired_state_of_world_populator.go:159] "Finished populating initial desired state of world" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.391459 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa90b3c2-febd-4588-a063-7fbbe82f00c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [router]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4c593fc9fefc335235a7118c3b526f9f265397b62293169959e09a693033db15\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6227ee4a8496f1cb024665458d33453ee5216601cd460a108dc527e674a4a58b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:39:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:36:17Z\\\"}},\\\"name\\\":\\\"router\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-ingress\"/\"router-default-5c9bf7bc58-6jctv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.401513 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf1a8b70-3856-486f-9912-a2de1d57c3fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6ee79b28e5b577df5d2e78c5d20b367b69a4eb87a6cd831a6c711e24daab251\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"machine-config-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-server-v65wr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-stats-auth\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411201 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411250 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411286 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411343 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411369 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-ovnkube-config\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.411535 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411724 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [route-controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1fd628f40d321354832b0f409d2bf9b89910de27bc6263a4fb5a55c25e160a99\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"route-controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-route-controller-manager\"/\"route-controller-manager-6f75dd68cc-gcdzx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.411715 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.911607924 +0000 UTC m=+22.605304784 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.411848 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-auth-proxy-config\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412105 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412160 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412208 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412245 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412283 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412324 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412466 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4qr9t\" (UniqueName: \"kubernetes.io/projected/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-kube-api-access-4qr9t\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412511 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/297ab9b6-2186-4d5b-a952-2bfd59af63c4-mcc-auth-proxy-config\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412553 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.412565 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412592 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.412619 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.912604821 +0000 UTC m=+22.606301661 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412649 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412681 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412713 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zjg2w\" (UniqueName: \"kubernetes.io/projected/51a02bbf-2d40-4f84-868a-d399ea18a846-kube-api-access-zjg2w\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412740 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-default-certificate\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.412766 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413141 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413199 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413243 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xkzjk\" (UniqueName: \"kubernetes.io/projected/9d0dcce3-d96e-48cb-9b9f-362105911589-kube-api-access-xkzjk\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413283 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.413362 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.413437 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.913395912 +0000 UTC m=+22.607092752 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413515 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bwbqm\" (UniqueName: \"kubernetes.io/projected/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-kube-api-access-bwbqm\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413587 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413632 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413677 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413728 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413816 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413878 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-serviceca\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.413949 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414035 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-k8s-cni-cncf-io\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414092 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414183 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-env-overrides\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414203 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414231 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9x6dp\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-kube-api-access-9x6dp\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414239 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414304 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.914281546 +0000 UTC m=+22.607978386 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414281 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414184 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414368 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.914336997 +0000 UTC m=+22.608033867 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414454 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414457 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414537 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.914517282 +0000 UTC m=+22.608214192 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414584 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414636 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414660 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414730 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/cc291782-27d2-4a74-af79-c7dcb31535d2-host-etc-kube\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414776 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.914756059 +0000 UTC m=+22.608452979 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414803 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414813 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414672 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414464 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414179 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414867 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.914833471 +0000 UTC m=+22.608530311 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414307 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.414464 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.414989 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-auth-proxy-config\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.415141 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.915130499 +0000 UTC m=+22.608827349 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.415153 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.415255 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.415296 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.415593 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.915154569 +0000 UTC m=+22.608851409 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.415616 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.915606841 +0000 UTC m=+22.609303681 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.415641 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.415699 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.416015 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-env-overrides\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.416090 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.916052873 +0000 UTC m=+22.609749743 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.416199 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.416290 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.416886 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.416942 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-ovnkube-config\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.417127 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.420126 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/297ab9b6-2186-4d5b-a952-2bfd59af63c4-mcc-auth-proxy-config\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.421312 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.426938 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-serviceca\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.427217 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.916118245 +0000 UTC m=+22.609815175 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.427247 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-stats-auth\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.427262 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.927247653 +0000 UTC m=+22.620944493 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.427281 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.927272014 +0000 UTC m=+22.620968854 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.427308 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.927293714 +0000 UTC m=+22.620990554 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.427324 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.927316165 +0000 UTC m=+22.621013005 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.427365 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.427373 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.927346776 +0000 UTC m=+22.621043626 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.427436 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.427466 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-certs\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.427496 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-dir\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.428271 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.428327 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.428356 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-hostroot\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.428383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-multus-daemon-config\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.429756 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.429831 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.929795841 +0000 UTC m=+22.623492681 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.429884 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.429936 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.929927515 +0000 UTC m=+22.623624355 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.432639 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.432738 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.932708129 +0000 UTC m=+22.626405009 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.435092 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-certs\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436491 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436565 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436596 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436629 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit-dir\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436660 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9d0dcce3-d96e-48cb-9b9f-362105911589-mcd-auth-proxy-config\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436685 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-multus-certs\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436713 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436740 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436767 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cnibin\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436806 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-binary-copy\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436835 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa90b3c2-febd-4588-a063-7fbbe82f00c1-service-ca-bundle\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436864 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436891 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436916 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436940 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-netns\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.436968 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/2b6d14a5-ca00-40c7-af7a-051a98a24eed-iptables-alerter-script\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437010 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437039 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437071 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437096 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-node-pullsecrets\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437120 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437147 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437173 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437199 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437224 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-cnibin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437249 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-conf-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437304 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437338 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437364 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437391 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437437 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-bin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437465 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437465 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437492 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437525 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437536 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.937522568 +0000 UTC m=+22.631219398 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437575 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/530553aa-0a1d-423e-8a22-f5eb4bdbb883-available-featuregates\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437628 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437685 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.937665842 +0000 UTC m=+22.631362682 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437686 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437630 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437728 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437769 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.937720924 +0000 UTC m=+22.631417764 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437775 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437801 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.437806 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.937796496 +0000 UTC m=+22.631493336 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437880 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rkkfv\" (UniqueName: \"kubernetes.io/projected/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-kube-api-access-rkkfv\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437946 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.437979 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438045 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51a02bbf-2d40-4f84-868a-d399ea18a846-webhook-cert\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.438050 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438079 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/530553aa-0a1d-423e-8a22-f5eb4bdbb883-available-featuregates\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.438084 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.938075113 +0000 UTC m=+22.631771953 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438111 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438148 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438218 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-registration-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438291 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438323 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438390 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.438463 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.438636 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.438705 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.93869279 +0000 UTC m=+22.632389630 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.438798 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.438862 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.938822603 +0000 UTC m=+22.632519443 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439227 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439270 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.939259015 +0000 UTC m=+22.632955855 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439536 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439575 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.939564823 +0000 UTC m=+22.633261663 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439736 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439769 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.939759318 +0000 UTC m=+22.633456158 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439836 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439864 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.939856201 +0000 UTC m=+22.633553051 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.439903 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.440043 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-default-certificate\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440168 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440203 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.94019306 +0000 UTC m=+22.633889910 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440371 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.940360624 +0000 UTC m=+22.634057464 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.440463 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440733 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.440756 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440777 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.940763845 +0000 UTC m=+22.634460695 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.440807 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4sfhc\" (UniqueName: \"kubernetes.io/projected/cc291782-27d2-4a74-af79-c7dcb31535d2-kube-api-access-4sfhc\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440837 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.440874 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.940863828 +0000 UTC m=+22.634560668 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.440903 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb762d1-812f-43f1-9eac-68034c1ecec7-kube-api-access\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.440943 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-csi-data-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.441080 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.441254 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.441441 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9d0dcce3-d96e-48cb-9b9f-362105911589-rootfs\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.441482 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442246 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-machine-approver-tls\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442294 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442327 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.441504 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/2b6d14a5-ca00-40c7-af7a-051a98a24eed-iptables-alerter-script\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441752 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442679 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cni-binary-copy\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442756 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9d0dcce3-d96e-48cb-9b9f-362105911589-proxy-tls\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442823 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-c2f8t\" (UniqueName: \"kubernetes.io/projected/475321a1-8b7e-4033-8f72-b05a8b377347-kube-api-access-c2f8t\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442855 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2b6d14a5-ca00-40c7-af7a-051a98a24eed-host-slash\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442918 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442949 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442953 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443012 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-node-bootstrap-token\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.443014 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443043 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.443056 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.943041456 +0000 UTC m=+22.636738296 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443112 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443206 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443240 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443305 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443337 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443439 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443469 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443529 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-plugins-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gsxd9\" (UniqueName: \"kubernetes.io/projected/6a23c0ee-5648-448c-b772-83dced2891ce-kube-api-access-gsxd9\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443631 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443660 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb762d1-812f-43f1-9eac-68034c1ecec7-service-ca\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443726 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443791 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vtgqn\" (UniqueName: \"kubernetes.io/projected/297ab9b6-2186-4d5b-a952-2bfd59af63c4-kube-api-access-vtgqn\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443825 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443887 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443916 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.443984 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444058 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444088 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-host\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444154 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444217 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444249 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444313 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444341 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6a23c0ee-5648-448c-b772-83dced2891ce-hosts-file\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.444454 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.944441084 +0000 UTC m=+22.638137934 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441867 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.444618 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.944486315 +0000 UTC m=+22.638183165 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444648 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd556935-a077-45df-ba3f-d42c39326ccd-tmpfs\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.444703 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.444734 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.944724551 +0000 UTC m=+22.638421401 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444802 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-ssl-certs\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.444830 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-os-release\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441910 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441947 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441941 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441978 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.441979 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.442055 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9d0dcce3-d96e-48cb-9b9f-362105911589-mcd-auth-proxy-config\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.442101 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.442188 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.445160 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.446196 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.94618359 +0000 UTC m=+22.639880430 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.446548 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.446573 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.446638 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.446679 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.446720 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.446759 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.446799 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.446841 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.446849 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.946834258 +0000 UTC m=+22.640531098 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.446929 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.94691947 +0000 UTC m=+22.640616310 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.446969 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447017 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947001272 +0000 UTC m=+22.640698122 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447055 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947042643 +0000 UTC m=+22.640739593 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447083 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447096 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947083525 +0000 UTC m=+22.640780485 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447131 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947123846 +0000 UTC m=+22.640820696 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.445377 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447144 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447167 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947159537 +0000 UTC m=+22.640856377 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447181 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947174437 +0000 UTC m=+22.640871277 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.445605 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.445761 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447823 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.445940 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447244 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.447258 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447275 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447311 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.94729801 +0000 UTC m=+22.640994860 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447880 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947861445 +0000 UTC m=+22.641558275 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447893 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947888156 +0000 UTC m=+22.641584986 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447904 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947898546 +0000 UTC m=+22.641595376 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447361 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447949 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947944318 +0000 UTC m=+22.641641148 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.447969 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.947964648 +0000 UTC m=+22.641661478 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.447993 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448015 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448033 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448053 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448093 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448113 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448138 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448157 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-multus\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448176 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448195 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448214 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448234 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448256 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448277 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.448384 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.948374369 +0000 UTC m=+22.642071199 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448522 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448607 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448885 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448915 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-socket-dir-parent\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448938 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.448960 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449002 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449032 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449069 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949056687 +0000 UTC m=+22.642753517 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449083 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949077908 +0000 UTC m=+22.642774738 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449105 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449132 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449181 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa90b3c2-febd-4588-a063-7fbbe82f00c1-service-ca-bundle\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449203 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449228 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949222092 +0000 UTC m=+22.642918922 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449228 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449256 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949251233 +0000 UTC m=+22.642948063 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449234 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449274 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449299 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449317 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449341 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449373 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449378 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449278 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949270633 +0000 UTC m=+22.642967463 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449420 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449384 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449415 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949409897 +0000 UTC m=+22.643106727 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449444 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949438388 +0000 UTC m=+22.643135218 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449458 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949452468 +0000 UTC m=+22.643149298 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449473 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949467838 +0000 UTC m=+22.643164668 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449493 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-v45vm\" (UniqueName: \"kubernetes.io/projected/aa90b3c2-febd-4588-a063-7fbbe82f00c1-kube-api-access-v45vm\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449521 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949500099 +0000 UTC m=+22.643196979 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449543 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.94953536 +0000 UTC m=+22.643232350 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449563 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949554491 +0000 UTC m=+22.643251461 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449577 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949571131 +0000 UTC m=+22.643268091 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449580 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449592 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949584732 +0000 UTC m=+22.643281562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449606 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949599352 +0000 UTC m=+22.643296302 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449629 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449648 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949632873 +0000 UTC m=+22.643329743 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449678 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949665014 +0000 UTC m=+22.643361884 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449710 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449725 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-etc-kubernetes\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449777 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949765496 +0000 UTC m=+22.643462326 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449795 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949789217 +0000 UTC m=+22.643486037 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449782 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f9495\" (UniqueName: \"kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449828 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.449844 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.949831938 +0000 UTC m=+22.643528808 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449865 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-system-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449971 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.449996 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8svnk\" (UniqueName: \"kubernetes.io/projected/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-kube-api-access-8svnk\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450031 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450052 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450095 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450102 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450125 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.950118856 +0000 UTC m=+22.643815686 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450137 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-bound-sa-token\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450154 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.950141806 +0000 UTC m=+22.643838646 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450312 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-multus-daemon-config\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450344 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-env-overrides\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450371 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450431 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450454 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.950447855 +0000 UTC m=+22.644144685 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.450618 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.950611879 +0000 UTC m=+22.644308709 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450644 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-metrics-certs\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450667 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450689 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450709 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450730 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450750 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/13045510-8717-4a71-ade4-be95a76440a7-kube-api-access-dtjml\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450769 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cc291782-27d2-4a74-af79-c7dcb31535d2-metrics-tls\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450795 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450815 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450823 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/410cf605-1970-4691-9c95-53fdc123b1f3-env-overrides\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.450838 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451116 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451175 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bwvjb\" (UniqueName: \"kubernetes.io/projected/120b38dc-8236-4fa6-a452-642b8ad738ee-kube-api-access-bwvjb\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451269 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451346 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451355 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451387 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.951376129 +0000 UTC m=+22.645072969 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451442 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-bound-sa-token\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451477 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451506 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451533 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451591 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451602 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451620 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451629 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.951622776 +0000 UTC m=+22.645319606 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451659 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451699 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451736 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-socket-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451766 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451793 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.451823 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-ovnkube-identity-cm\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451865 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451927 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.951909674 +0000 UTC m=+22.645606544 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.451305 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.452000 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.951980946 +0000 UTC m=+22.645677896 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.452019 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.452047 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.952039927 +0000 UTC m=+22.645736857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.452059 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.452080 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.452133 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.452205 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/410cf605-1970-4691-9c95-53fdc123b1f3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.452264 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-system-cni-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.452313 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.456164 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.456190 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.456257 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd556935-a077-45df-ba3f-d42c39326ccd-tmpfs\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.456509 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9d0dcce3-d96e-48cb-9b9f-362105911589-proxy-tls\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.456564 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.456880 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.457722 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa90b3c2-febd-4588-a063-7fbbe82f00c1-metrics-certs\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.461280 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cc291782-27d2-4a74-af79-c7dcb31535d2-metrics-tls\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461417 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461430 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461440 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461475 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.96146326 +0000 UTC m=+22.655160090 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.461539 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461542 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461589 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461601 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461608 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461633 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.961626024 +0000 UTC m=+22.655322854 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461651 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.961638724 +0000 UTC m=+22.655335644 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.461719 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [openshift-apiserver-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e2c5a70fe7e9b625f5ef26f458c54d20eb41da9ac60e96442f3a33dacfae5ce\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2da14cea2e9328cb16f7e4d671c9f21a6d2615667035cc26a4e4d0f634f80b82\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:37:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:36:14Z\\\"}},\\\"name\\\":\\\"openshift-apiserver-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-apiserver-operator\"/\"openshift-apiserver-operator-7c88c4c865-kn67m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461720 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461751 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461758 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461780 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.961772788 +0000 UTC m=+22.655469618 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461844 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.461867 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.96186167 +0000 UTC m=+22.655558500 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.463949 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.963916815 +0000 UTC m=+22.657613705 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464027 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464049 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464067 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464128 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.964112021 +0000 UTC m=+22.657808941 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464192 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464210 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464223 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.464266 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.964253164 +0000 UTC m=+22.657950084 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.466806 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.466822 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.466830 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.466860 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.966850984 +0000 UTC m=+22.660547814 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.466984 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.467010 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.967003788 +0000 UTC m=+22.660700618 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.467057 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.467067 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.467074 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.467095 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.96708944 +0000 UTC m=+22.660786270 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.467634 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.468023 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x6dp\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-kube-api-access-9x6dp\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.468837 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.473534 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb762d1-812f-43f1-9eac-68034c1ecec7-service-ca\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474497 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474552 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j4qn7\" (UniqueName: \"kubernetes.io/projected/2b6d14a5-ca00-40c7-af7a-051a98a24eed-kube-api-access-j4qn7\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474575 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474612 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474636 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474658 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474694 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474717 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474737 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-cni-binary-copy\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474775 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474798 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6z2n9\" (UniqueName: \"kubernetes.io/projected/bf1a8b70-3856-486f-9912-a2de1d57c3fb-kube-api-access-6z2n9\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474819 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474856 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb762d1-812f-43f1-9eac-68034c1ecec7-serving-cert\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474877 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474897 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474933 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-os-release\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474953 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.474974 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475010 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475032 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-cx4f9\" (UniqueName: \"kubernetes.io/projected/410cf605-1970-4691-9c95-53fdc123b1f3-kube-api-access-cx4f9\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475054 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475091 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d7jw8\" (UniqueName: \"kubernetes.io/projected/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-kube-api-access-d7jw8\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475113 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475132 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-dir\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475176 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475200 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.475880 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkzjk\" (UniqueName: \"kubernetes.io/projected/9d0dcce3-d96e-48cb-9b9f-362105911589-kube-api-access-xkzjk\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.476161 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/410cf605-1970-4691-9c95-53fdc123b1f3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476478 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476495 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476535 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.976521563 +0000 UTC m=+22.670218473 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476577 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476591 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476601 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476661 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.976651016 +0000 UTC m=+22.670347946 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476700 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476714 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476729 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.476750 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-machine-approver-tls\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476763 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.976751559 +0000 UTC m=+22.670448499 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476839 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476855 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476864 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.476896 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.976886093 +0000 UTC m=+22.670583033 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.478706 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.478764 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.978747483 +0000 UTC m=+22.672444323 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.478823 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.478865 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.978856375 +0000 UTC m=+22.672553215 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.478964 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.478996 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.978987739 +0000 UTC m=+22.672684579 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.479237 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.479272 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.979262546 +0000 UTC m=+22.672959386 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.479375 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.479412 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.97939837 +0000 UTC m=+22.673095210 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.479489 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.479518 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.979510613 +0000 UTC m=+22.673207453 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.480474 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/51a02bbf-2d40-4f84-868a-d399ea18a846-ovnkube-identity-cm\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.480536 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.480560 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.480563 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.980554851 +0000 UTC m=+22.674251681 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.480621 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.980612533 +0000 UTC m=+22.674309363 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.480642 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.480669 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.980662234 +0000 UTC m=+22.674359064 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.482187 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.493932 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bf1a8b70-3856-486f-9912-a2de1d57c3fb-node-bootstrap-token\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.494092 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-auth-proxy-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.494128 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.494171 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-mountpoint-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.494192 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-kubelet\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.494542 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/475321a1-8b7e-4033-8f72-b05a8b377347-cni-binary-copy\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.494645 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.494804 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.494826 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.494838 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.494878 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:22.994866394 +0000 UTC m=+22.688563224 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.499004 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.499150 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.499236 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.500311 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjg2w\" (UniqueName: \"kubernetes.io/projected/51a02bbf-2d40-4f84-868a-d399ea18a846-kube-api-access-zjg2w\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.500399 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-auth-proxy-config\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.500724 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51a02bbf-2d40-4f84-868a-d399ea18a846-webhook-cert\") pod \"network-node-identity-7xghp\" (UID: \"51a02bbf-2d40-4f84-868a-d399ea18a846\") " pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.500744 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qr9t\" (UniqueName: \"kubernetes.io/projected/ec1bae8b-3200-4ad9-b33b-cf8701f3027c-kube-api-access-4qr9t\") pod \"machine-approver-7874c8775-kh4j9\" (UID: \"ec1bae8b-3200-4ad9-b33b-cf8701f3027c\") " pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.501131 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb762d1-812f-43f1-9eac-68034c1ecec7-kube-api-access\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.501233 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwbqm\" (UniqueName: \"kubernetes.io/projected/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-kube-api-access-bwbqm\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.501799 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.501814 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.501825 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.501964 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.501976 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.501984 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.505002 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtgqn\" (UniqueName: \"kubernetes.io/projected/297ab9b6-2186-4d5b-a952-2bfd59af63c4-kube-api-access-vtgqn\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.505679 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb762d1-812f-43f1-9eac-68034c1ecec7-serving-cert\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.505851 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.505904 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.505919 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.505934 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.505964 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.505978 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506028 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506089 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506097 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506411 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506502 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506555 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506931 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.506956 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507010 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507031 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507093 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507103 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507112 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507426 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507513 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507578 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.507649 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-v45vm\" (UniqueName: \"kubernetes.io/projected/aa90b3c2-febd-4588-a063-7fbbe82f00c1-kube-api-access-v45vm\") pod \"router-default-5c9bf7bc58-6jctv\" (UID: \"aa90b3c2-febd-4588-a063-7fbbe82f00c1\") " pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507776 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507787 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507812 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507826 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507820 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507854 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507919 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507937 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507943 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507944 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507955 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.507963 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508302 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008208482 +0000 UTC m=+22.701905312 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508340 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008331505 +0000 UTC m=+22.702028335 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508357 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008350505 +0000 UTC m=+22.702047335 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508374 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008364106 +0000 UTC m=+22.702060936 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508390 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008383976 +0000 UTC m=+22.702080806 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508414 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008397447 +0000 UTC m=+22.702094277 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508431 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008422627 +0000 UTC m=+22.702119457 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508447 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008441078 +0000 UTC m=+22.702137908 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508461 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008455058 +0000 UTC m=+22.702151888 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508478 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008471959 +0000 UTC m=+22.702168789 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508492 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008485389 +0000 UTC m=+22.702182219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508584 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008548671 +0000 UTC m=+22.702245501 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508604 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008595202 +0000 UTC m=+22.702292032 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508617 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008611452 +0000 UTC m=+22.702308272 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.508754 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.008732816 +0000 UTC m=+22.702429656 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.510338 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-bound-sa-token\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.510643 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2f8t\" (UniqueName: \"kubernetes.io/projected/475321a1-8b7e-4033-8f72-b05a8b377347-kube-api-access-c2f8t\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.511205 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkkfv\" (UniqueName: \"kubernetes.io/projected/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-kube-api-access-rkkfv\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.512246 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b54e8941-2fc4-432a-9e51-39684df9089e-bound-sa-token\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.512806 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.512847 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.512879 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.512965 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.012924138 +0000 UTC m=+22.706620968 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.513058 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9495\" (UniqueName: \"kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.513541 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsxd9\" (UniqueName: \"kubernetes.io/projected/6a23c0ee-5648-448c-b772-83dced2891ce-kube-api-access-gsxd9\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.514695 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sfhc\" (UniqueName: \"kubernetes.io/projected/cc291782-27d2-4a74-af79-c7dcb31535d2-kube-api-access-4sfhc\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.514763 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8svnk\" (UniqueName: \"kubernetes.io/projected/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-kube-api-access-8svnk\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.516107 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwvjb\" (UniqueName: \"kubernetes.io/projected/120b38dc-8236-4fa6-a452-642b8ad738ee-kube-api-access-bwvjb\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.516123 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.516345 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-7xghp" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.517578 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.518927 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.518946 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.518956 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.518990 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.01897835 +0000 UTC m=+22.712675170 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.526580 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.529433 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podec1bae8b_3200_4ad9_b33b_cf8701f3027c.slice/crio-5d42b3cea0bcd6f2be59c256989ebe77bcc7f51031376dfc2cc179efd73aae6a WatchSource:0}: Error finding container 5d42b3cea0bcd6f2be59c256989ebe77bcc7f51031376dfc2cc179efd73aae6a: Status 404 returned error can't find the container with id 5d42b3cea0bcd6f2be59c256989ebe77bcc7f51031376dfc2cc179efd73aae6a Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.530663 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51a02bbf_2d40_4f84_868a_d399ea18a846.slice/crio-708ce9360347d8468da389477be5a3fe0bdb117459d726d14ae2865f3c638b1a WatchSource:0}: Error finding container 708ce9360347d8468da389477be5a3fe0bdb117459d726d14ae2865f3c638b1a: Status 404 returned error can't find the container with id 708ce9360347d8468da389477be5a3fe0bdb117459d726d14ae2865f3c638b1a Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.532065 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.532093 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.532105 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.532158 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.032141023 +0000 UTC m=+22.725837853 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.538516 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa90b3c2_febd_4588_a063_7fbbe82f00c1.slice/crio-f09d4e8d1edd621465cac77fd5904b29a5028a14831fa66b0a8c9e66eed6fc76 WatchSource:0}: Error finding container f09d4e8d1edd621465cac77fd5904b29a5028a14831fa66b0a8c9e66eed6fc76: Status 404 returned error can't find the container with id f09d4e8d1edd621465cac77fd5904b29a5028a14831fa66b0a8c9e66eed6fc76 Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.547601 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"f09d4e8d1edd621465cac77fd5904b29a5028a14831fa66b0a8c9e66eed6fc76"} Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.548963 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"708ce9360347d8468da389477be5a3fe0bdb117459d726d14ae2865f3c638b1a"} Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.551869 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"5d42b3cea0bcd6f2be59c256989ebe77bcc7f51031376dfc2cc179efd73aae6a"} Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.559087 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/13045510-8717-4a71-ade4-be95a76440a7-kube-api-access-dtjml\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.572364 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.572556 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.572643 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.572777 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.07275355 +0000 UTC m=+22.766450380 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601078 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2b6d14a5-ca00-40c7-af7a-051a98a24eed-host-slash\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601195 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601258 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601288 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601315 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-plugins-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601386 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601487 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-host\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601570 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-ssl-certs\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601599 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-os-release\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601645 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601684 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6a23c0ee-5648-448c-b772-83dced2891ce-hosts-file\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601787 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601879 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601915 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601941 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-multus\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.601973 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2b6d14a5-ca00-40c7-af7a-051a98a24eed-host-slash\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602009 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602047 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-multus\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602061 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-socket-dir-parent\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602085 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-host\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602117 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-ssl-certs\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602125 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-etc-kubernetes\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602165 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-system-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602263 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602287 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-os-release\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602326 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602343 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602381 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6a23c0ee-5648-448c-b772-83dced2891ce-hosts-file\") pod \"node-resolver-dn27q\" (UID: \"6a23c0ee-5648-448c-b772-83dced2891ce\") " pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602406 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-socket-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602447 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602485 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9fb762d1-812f-43f1-9eac-68034c1ecec7-etc-cvo-updatepayloads\") pod \"cluster-version-operator-6d5d9649f6-x6d46\" (UID: \"9fb762d1-812f-43f1-9eac-68034c1ecec7\") " pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602518 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-etc-kubernetes\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602547 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-system-cni-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602700 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602792 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-socket-dir-parent\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602823 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-socket-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602831 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602852 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-os-release\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602888 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602900 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-os-release\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602912 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602926 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602959 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-plugins-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.602992 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-dir\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-mountpoint-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603099 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-kubelet\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603103 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-dir\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603138 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-system-cni-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603150 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603229 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603243 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-system-cni-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603257 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603265 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-kubelet\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603439 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-mountpoint-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603490 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.603548 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604173 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-k8s-cni-cncf-io\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604265 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/cc291782-27d2-4a74-af79-c7dcb31535d2-host-etc-kube\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604382 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-k8s-cni-cncf-io\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604410 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-dir\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604466 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/cc291782-27d2-4a74-af79-c7dcb31535d2-host-etc-kube\") pod \"network-operator-767c585db5-zd56b\" (UID: \"cc291782-27d2-4a74-af79-c7dcb31535d2\") " pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604486 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-dir\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604564 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit-dir\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604641 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-hostroot\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604647 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604697 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit-dir\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604710 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cnibin\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604720 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-hostroot\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604811 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-multus-certs\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604846 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8-cnibin\") pod \"multus-additional-cni-plugins-bzj2p\" (UID: \"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8\") " pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604863 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-netns\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604885 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-multus-certs\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604913 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604913 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-run-netns\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604951 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604963 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.604997 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-node-pullsecrets\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605040 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605063 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bab054c9-6c83-40ee-896d-6459b22a6b4b-node-pullsecrets\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605095 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-conf-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605174 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-multus-conf-dir\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605184 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-cnibin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605233 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-cnibin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605277 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-bin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605393 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-registration-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605491 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/475321a1-8b7e-4033-8f72-b05a8b377347-host-var-lib-cni-bin\") pod \"multus-q88th\" (UID: \"475321a1-8b7e-4033-8f72-b05a8b377347\") " pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605575 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-csi-data-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605616 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9d0dcce3-d96e-48cb-9b9f-362105911589-rootfs\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.605665 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.606008 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-csi-data-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.606049 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/12e733dd-0939-4f1b-9cbb-13897e093787-registration-dir\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.606088 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.606108 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9d0dcce3-d96e-48cb-9b9f-362105911589-rootfs\") pod \"machine-config-daemon-zpnhg\" (UID: \"9d0dcce3-d96e-48cb-9b9f-362105911589\") " pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.606127 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"ovnkube-node-44qcg\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.618957 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-767c585db5-zd56b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.619307 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.619368 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.619384 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.619475 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.119454031 +0000 UTC m=+22.813150881 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.636164 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc291782_27d2_4a74_af79_c7dcb31535d2.slice/crio-881804007e6970459ad7c78428155b5f8215d7006708a3b582229c2b06e3b059 WatchSource:0}: Error finding container 881804007e6970459ad7c78428155b5f8215d7006708a3b582229c2b06e3b059: Status 404 returned error can't find the container with id 881804007e6970459ad7c78428155b5f8215d7006708a3b582229c2b06e3b059 Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.640459 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.641870 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx4f9\" (UniqueName: \"kubernetes.io/projected/410cf605-1970-4691-9c95-53fdc123b1f3-kube-api-access-cx4f9\") pod \"ovnkube-control-plane-77c846df58-6l97b\" (UID: \"410cf605-1970-4691-9c95-53fdc123b1f3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.653018 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.653059 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.653073 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.653141 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.153120583 +0000 UTC m=+22.846817433 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.662588 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7dbadf0a_ba02_47d6_96a9_0995c1e8e4a8.slice/crio-2bdeec7ccf8809bb62bcad294e740fd2182de29e70e147015e3606a3819ea857 WatchSource:0}: Error finding container 2bdeec7ccf8809bb62bcad294e740fd2182de29e70e147015e3606a3819ea857: Status 404 returned error can't find the container with id 2bdeec7ccf8809bb62bcad294e740fd2182de29e70e147015e3606a3819ea857 Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.673612 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.673652 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.673666 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.673743 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.173720405 +0000 UTC m=+22.867417255 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.698060 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.720265 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4qn7\" (UniqueName: \"kubernetes.io/projected/2b6d14a5-ca00-40c7-af7a-051a98a24eed-kube-api-access-j4qn7\") pod \"iptables-alerter-wwpnd\" (UID: \"2b6d14a5-ca00-40c7-af7a-051a98a24eed\") " pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.731968 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.732006 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.732021 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.732093 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.232070737 +0000 UTC m=+22.925767567 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.763297 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z2n9\" (UniqueName: \"kubernetes.io/projected/bf1a8b70-3856-486f-9912-a2de1d57c3fb-kube-api-access-6z2n9\") pod \"machine-config-server-v65wr\" (UID: \"bf1a8b70-3856-486f-9912-a2de1d57c3fb\") " pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.771236 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.771273 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.771287 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.771355 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.271333359 +0000 UTC m=+22.965030219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.794481 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7jw8\" (UniqueName: \"kubernetes.io/projected/f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e-kube-api-access-d7jw8\") pod \"node-ca-l92hr\" (UID: \"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\") " pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.805586 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-wwpnd" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.809271 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-q88th" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.812521 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.812591 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.812612 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.812621 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.812667 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.312650856 +0000 UTC m=+23.006347686 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.813052 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.814807 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.816762 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dn27q" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.832711 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.832742 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.832754 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.832805 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.332788575 +0000 UTC m=+23.026485405 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.834236 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.837672 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b6d14a5_ca00_40c7_af7a_051a98a24eed.slice/crio-ba639396073e86472ccf20204bcb6999463b510c265a179e2e7d7962912cb142 WatchSource:0}: Error finding container ba639396073e86472ccf20204bcb6999463b510c265a179e2e7d7962912cb142: Status 404 returned error can't find the container with id ba639396073e86472ccf20204bcb6999463b510c265a179e2e7d7962912cb142 Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.844743 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-v65wr" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.846319 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-l92hr" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.853850 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.853887 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.853901 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.853966 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.353944682 +0000 UTC m=+23.047641512 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.868278 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b54e8941-2fc4-432a-9e51-39684df9089e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-image-registry-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d97bc5ceeb803fbb8b6f82967607071bcbcf0540932be1b9f59fc5e29e8c646d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://51a0ed51573e54783eaec0b562a7d00b746823f3e4730c5f84cee47fb9d258c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:38:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:35:58Z\\\"}},\\\"name\\\":\\\"cluster-image-registry-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-image-registry\"/\"cluster-image-registry-operator-7769bd8d7d-q5cvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.869228 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fb762d1_812f_43f1_9eac_68034c1ecec7.slice/crio-7fd27464c917a94294d43440d5dbc544723227f8a5445b56b0e4ae0144a433da WatchSource:0}: Error finding container 7fd27464c917a94294d43440d5dbc544723227f8a5445b56b0e4ae0144a433da: Status 404 returned error can't find the container with id 7fd27464c917a94294d43440d5dbc544723227f8a5445b56b0e4ae0144a433da Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.877406 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a23c0ee_5648_448c_b772_83dced2891ce.slice/crio-4bca30021a3aa17909651fbecc0fdb2e2e9d314432b8d8f9bbf057363029ee16 WatchSource:0}: Error finding container 4bca30021a3aa17909651fbecc0fdb2e2e9d314432b8d8f9bbf057363029ee16: Status 404 returned error can't find the container with id 4bca30021a3aa17909651fbecc0fdb2e2e9d314432b8d8f9bbf057363029ee16 Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.907192 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cc389b05ef555b742646390ef180ad25a8f5111c68fec6df1cfa1c6c492e98da\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qdfr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.913078 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.913221 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.913511 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.913575 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.913622 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.913632 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.913580599 +0000 UTC m=+23.607277439 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.913647 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.913687 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.913669471 +0000 UTC m=+23.607366301 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.916478 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.913724243 +0000 UTC m=+23.607421143 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.916630 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.916712 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.916757 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.916836 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.916897 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.916907 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.916956 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.916937829 +0000 UTC m=+23.610634659 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.916971 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.917050 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.916977 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.91697005 +0000 UTC m=+23.610666880 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.917134 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.917098843 +0000 UTC m=+23.610795743 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: E0320 15:25:22.917156 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:23.917147804 +0000 UTC m=+23.610844704 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.919821 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf1a8b70_3856_486f_9912_a2de1d57c3fb.slice/crio-81074656c8c02ba9292f84bdc9d342fcfd5c6ed7187e0512d6d49618fb3a0793 WatchSource:0}: Error finding container 81074656c8c02ba9292f84bdc9d342fcfd5c6ed7187e0512d6d49618fb3a0793: Status 404 returned error can't find the container with id 81074656c8c02ba9292f84bdc9d342fcfd5c6ed7187e0512d6d49618fb3a0793 Mar 20 15:25:22 crc kubenswrapper[3552]: W0320 15:25:22.920493 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d0dcce3_d96e_48cb_9b9f_362105911589.slice/crio-1f02874fa1b8322bac5028d683028d2f63c7de5eb3193a44096ebc102a466664 WatchSource:0}: Error finding container 1f02874fa1b8322bac5028d683028d2f63c7de5eb3193a44096ebc102a466664: Status 404 returned error can't find the container with id 1f02874fa1b8322bac5028d683028d2f63c7de5eb3193a44096ebc102a466664 Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.948163 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [console-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0e8f29122aea315d5914a7a44fb2b651ebb1927330eedafd6e148dee989e5e6b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39ebdae230cdbbdee5d4d29f2b22052e045fb4e9bdb1ddc921c70774a4858df5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:40:14Z\\\",\\\"message\\\":\\\"I0312 13:39:52.195626 1 cmd.go:241] Using service-serving-cert provided certificates\\\\nI0312 13:39:52.196046 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:39:52.206633 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:39:52.254076 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://10.217.4.1:443/api/v1/namespaces/openshift-console-operator/pods/console-operator-5dbbc74dc9-cp5cd\\\\\\\": dial tcp 10.217.4.1:443: connect: connection refused\\\\nI0312 13:39:52.274753 1 builder.go:299] console-operator version -\\\\nI0312 13:40:14.137065 1 cmd.go:129] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF0312 13:40:14.137981 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-console-operator/leases/console-operator-lock\\\\\\\": dial tcp 10.217.4.1:443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:39:51Z\\\"}},\\\"name\\\":\\\"console-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console-operator\"/\"console-operator-5dbbc74dc9-cp5cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:22 crc kubenswrapper[3552]: I0320 15:25:22.986454 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-image-registry/node-ca-l92hr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52096a0ef4d0f0ac66bf0d6c0924464d59aee852f6e83195b7c1608de4a289b8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-image-registry\"/\"node-ca-l92hr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019012 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019076 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019108 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019128 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019152 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019174 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019195 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019222 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019256 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019282 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019307 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019330 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019351 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019373 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019395 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019437 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019461 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019492 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019523 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019552 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019573 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019594 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019616 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019640 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019680 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019702 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019725 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019762 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019786 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019807 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019864 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019885 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019916 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019935 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019958 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.019981 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020022 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020042 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020083 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020105 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020133 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020156 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020201 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020233 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020256 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020292 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020312 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020332 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020358 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020378 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020401 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020438 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020458 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020481 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020500 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020521 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020544 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020583 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020604 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020626 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020647 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020668 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020688 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020708 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020731 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020754 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020774 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020803 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020823 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020844 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020865 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020884 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020903 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020923 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020944 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020964 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.020983 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021004 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021025 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021045 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021066 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021088 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021109 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021130 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021150 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021170 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021191 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021212 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021231 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021256 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021275 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021296 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021319 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021340 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021360 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021379 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021398 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021435 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021456 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021478 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021502 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021522 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021542 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021562 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021582 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021603 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021623 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021643 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021662 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021687 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021707 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021729 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021753 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021774 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021794 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021815 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021835 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021854 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.021874 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.021967 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022009 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.021996153 +0000 UTC m=+23.715692983 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022201 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022195178 +0000 UTC m=+23.715892008 (durationBeforeRetry 1s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022255 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022267 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022297 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.0222898 +0000 UTC m=+23.715986630 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022333 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022352 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022346602 +0000 UTC m=+23.716043432 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022385 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022410 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022399953 +0000 UTC m=+23.716096783 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022460 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022479 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022473885 +0000 UTC m=+23.716170715 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022511 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022532 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022526407 +0000 UTC m=+23.716223237 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022561 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022579 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022574078 +0000 UTC m=+23.716270908 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022616 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022628 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022635 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022656 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.0226507 +0000 UTC m=+23.716347520 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022693 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022702 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022708 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022727 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022721772 +0000 UTC m=+23.716418602 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022769 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022798 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022789674 +0000 UTC m=+23.716486514 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022849 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022858 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022865 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022884 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022878716 +0000 UTC m=+23.716575546 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022914 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022931 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.022926738 +0000 UTC m=+23.716623568 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022979 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022988 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.022994 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023014 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02300736 +0000 UTC m=+23.716704190 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023060 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023070 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023075 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023093 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.023088082 +0000 UTC m=+23.716784912 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023131 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023140 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023146 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023164 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.023159054 +0000 UTC m=+23.716855874 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023195 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023215 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.023209105 +0000 UTC m=+23.716905925 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023452 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023482 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.023475052 +0000 UTC m=+23.717171882 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023519 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023539 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.023533244 +0000 UTC m=+23.717230074 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023578 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023588 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023621 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023637 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023641 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023679 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023731 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023738 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023784 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023803 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023813 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023823 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023862 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023876 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023917 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023936 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023947 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023953 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023981 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023999 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024047 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024077 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024107 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024118 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024124 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024159 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024168 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024197 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024210 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024273 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024285 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024318 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024339 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024349 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024356 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024360 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024401 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024406 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024438 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024446 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024462 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024492 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024517 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024526 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024533 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024559 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024581 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024602 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024629 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024636 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024663 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024673 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024678 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024715 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024725 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024732 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024750 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024767 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024775 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024782 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024847 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024888 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024954 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024969 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024977 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024985 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025048 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025065 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025075 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025079 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.025123 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/revision-pruner-7-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67b26a02-5c35-4df6-a4b9-c47c6dbf5c9f\\\"},\\\"status\\\":{\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler\"/\"revision-pruner-7-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025170 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025198 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025233 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025240 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.023596 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.023591095 +0000 UTC m=+23.717287925 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025265 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025290 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02527141 +0000 UTC m=+23.718968240 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025296 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025305 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025325 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025332 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025309 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.025297711 +0000 UTC m=+23.718994531 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025348 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024534 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025357 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.025345722 +0000 UTC m=+23.719042552 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025367 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025395 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024169 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025479 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025500 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025506 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025530 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025591 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025595 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025618 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025823 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025833 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025847 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025850 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025871 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025609 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025881 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025888 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025905 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025409 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024959 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026015 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026029 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025173 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024051 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025334 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025600 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025632 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025485 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025374 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.024125 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025378 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.025366003 +0000 UTC m=+23.719062833 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026555 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026537184 +0000 UTC m=+23.720234074 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026570 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026562245 +0000 UTC m=+23.720259075 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026583 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026577005 +0000 UTC m=+23.720273825 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026596 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026590066 +0000 UTC m=+23.720286896 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026609 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026603096 +0000 UTC m=+23.720299926 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026621 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026614656 +0000 UTC m=+23.720311576 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026636 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026629807 +0000 UTC m=+23.720326637 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026649 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026643207 +0000 UTC m=+23.720340037 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026663 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026656647 +0000 UTC m=+23.720353477 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026675 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026669438 +0000 UTC m=+23.720366378 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026689 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026681578 +0000 UTC m=+23.720378408 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026701 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026694998 +0000 UTC m=+23.720391828 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026713 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026707199 +0000 UTC m=+23.720404029 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026725 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026719429 +0000 UTC m=+23.720416259 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026738 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026732569 +0000 UTC m=+23.720429399 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026751 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02674518 +0000 UTC m=+23.720442010 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026763 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02675685 +0000 UTC m=+23.720453670 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026776 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02676933 +0000 UTC m=+23.720466170 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026792 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026784621 +0000 UTC m=+23.720481451 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026805 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026799561 +0000 UTC m=+23.720496391 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026817 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026811442 +0000 UTC m=+23.720508262 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026830 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026823832 +0000 UTC m=+23.720520782 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026843 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026836372 +0000 UTC m=+23.720533202 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026855 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026849053 +0000 UTC m=+23.720545883 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026867 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026861183 +0000 UTC m=+23.720558013 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026881 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026873693 +0000 UTC m=+23.720570513 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026894 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026887324 +0000 UTC m=+23.720584154 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026906 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026900904 +0000 UTC m=+23.720597734 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026918 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026912454 +0000 UTC m=+23.720609284 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026931 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026924805 +0000 UTC m=+23.720621635 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026945 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026937475 +0000 UTC m=+23.720634305 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026961 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026952985 +0000 UTC m=+23.720649915 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026977 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026968266 +0000 UTC m=+23.720665196 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.026992 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026984176 +0000 UTC m=+23.720681106 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027006 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.026998497 +0000 UTC m=+23.720695437 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027022 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027013437 +0000 UTC m=+23.720710377 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027039 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027029877 +0000 UTC m=+23.720726817 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027054 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027046688 +0000 UTC m=+23.720743618 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027069 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027061598 +0000 UTC m=+23.720758518 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027085 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027076469 +0000 UTC m=+23.720773389 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027142 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027097549 +0000 UTC m=+23.720794479 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027159 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027151691 +0000 UTC m=+23.720848521 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027173 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027165861 +0000 UTC m=+23.720862691 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027185 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027179781 +0000 UTC m=+23.720876611 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027199 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027192412 +0000 UTC m=+23.720889332 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027215 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027206592 +0000 UTC m=+23.720903522 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027232 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027223313 +0000 UTC m=+23.720920233 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027248 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027240343 +0000 UTC m=+23.720937283 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027261 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027255033 +0000 UTC m=+23.720951963 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027276 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027268564 +0000 UTC m=+23.720965404 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027289 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027282834 +0000 UTC m=+23.720979764 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025422 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027304 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027295764 +0000 UTC m=+23.720992694 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027323 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027314745 +0000 UTC m=+23.721011675 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025452 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027340 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027330835 +0000 UTC m=+23.721027755 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025450 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027365 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027357576 +0000 UTC m=+23.721054526 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025612 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027386 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027373507 +0000 UTC m=+23.721070337 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025641 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025667 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025677 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025677 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027488 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027495 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025680 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027522 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027529 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025688 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025727 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025740 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025741 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025747 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025741 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025758 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025757 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025756 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025759 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025763 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025782 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027810 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027819 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025779 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025789 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025801 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025805 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.025813 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027405 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027393397 +0000 UTC m=+23.721090217 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027949 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027938692 +0000 UTC m=+23.721635512 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027962 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027955422 +0000 UTC m=+23.721652252 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027974 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027968043 +0000 UTC m=+23.721664873 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027983 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027978873 +0000 UTC m=+23.721675703 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.027994 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027988453 +0000 UTC m=+23.721685283 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028005 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.027999793 +0000 UTC m=+23.721696623 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028029 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028019484 +0000 UTC m=+23.721716314 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028043 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028035794 +0000 UTC m=+23.721732744 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028058 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028050645 +0000 UTC m=+23.721747585 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028072 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028064285 +0000 UTC m=+23.721761235 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028091 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028083936 +0000 UTC m=+23.721780886 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028105 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028097676 +0000 UTC m=+23.721794616 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028125 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028117357 +0000 UTC m=+23.721814297 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028142 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028130647 +0000 UTC m=+23.721827587 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028160 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028153707 +0000 UTC m=+23.721850647 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028174 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028167868 +0000 UTC m=+23.721864818 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028193 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028182758 +0000 UTC m=+23.721879708 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028209 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028200239 +0000 UTC m=+23.721897169 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028223 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028215719 +0000 UTC m=+23.721912659 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028246 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02823859 +0000 UTC m=+23.721935530 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028267 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.0282582 +0000 UTC m=+23.721955150 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028288 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028280001 +0000 UTC m=+23.721976961 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028326 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028317792 +0000 UTC m=+23.722014632 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028348 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028339452 +0000 UTC m=+23.722036392 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028370 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028363973 +0000 UTC m=+23.722060803 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028394 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028383064 +0000 UTC m=+23.722079894 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028429 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028407454 +0000 UTC m=+23.722104284 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028446 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028440175 +0000 UTC m=+23.722137005 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028461 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028455866 +0000 UTC m=+23.722152806 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028477 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028471816 +0000 UTC m=+23.722168756 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028492 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028487546 +0000 UTC m=+23.722184366 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028507 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028502327 +0000 UTC m=+23.722199277 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028523 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028517527 +0000 UTC m=+23.722214347 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028537 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028532388 +0000 UTC m=+23.722229218 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028552 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028547078 +0000 UTC m=+23.722243908 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028568 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028563288 +0000 UTC m=+23.722260118 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028583 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028578429 +0000 UTC m=+23.722275259 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028597 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.028592709 +0000 UTC m=+23.722289649 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.028612 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.02860705 +0000 UTC m=+23.722303880 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.067529 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5d722a-1123-4935-9740-52a08d018bc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [serve-healthcheck-canary]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a6ead43bdb764cbbb4c3390efab755e94af49cb95729c3c5d78be72155f2cf72\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"serve-healthcheck-canary\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-ingress-canary\"/\"ingress-canary-2vhcn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.111276 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-operators-f4jkp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4092a9f8-5acc-4932-9e90-ef962eeb301a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-operators-f4jkp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.124633 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.124672 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.124684 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.124747 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.124728864 +0000 UTC m=+23.818425694 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.124950 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.125039 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.125083 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125250 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125270 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125281 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125273 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125312 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.125303729 +0000 UTC m=+23.819000559 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125316 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125334 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.125464 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.125383372 +0000 UTC m=+23.819080292 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.147392 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager/revision-pruner-10-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"410dbf23-e4f3-4307-910c-ad0a079c33e2\\\"},\\\"status\\\":{\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager\"/\"revision-pruner-10-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.209864 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33b7f421-18ed-4980-bd54-2fec77176e75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:03Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:04Z\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:04Z\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:01Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24b139cd037a86af7ecea0fb8c66f0be79ccc8c8cefd560303e1dacb2c54dbbc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-20T15:25:03Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f419a886a89c86b5a5411a6c6d72ee563edbed19a78990933b16803669d9662\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f419a886a89c86b5a5411a6c6d72ee563edbed19a78990933b16803669d9662\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-20T15:25:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-20T15:25:02Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-20T15:25:01Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.227779 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.227824 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228147 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228179 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228195 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228257 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.228235876 +0000 UTC m=+23.921932706 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228387 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228442 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228457 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.228530 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.228509094 +0000 UTC m=+23.922205974 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.235458 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-storage-version-migrator-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:730f5b20164dd87b074b356636cdfa4848f1159b412ccf7e09ab0c4554232730\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://690c3e41c88de001213b70a1c67447cbdf7b536c279ebd273a32e03268f91192\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:37:34Z\\\",\\\"message\\\":\\\"pback-client@1773322594\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1773322588\\\\\\\\\\\\\\\" (2026-03-12 12:36:27 +0000 UTC to 2027-03-12 12:36:27 +0000 UTC (now=2026-03-12 13:37:33.119235812 +0000 UTC))\\\\\\\"\\\\nI0312 13:37:34.434451 1 observer_polling.go:120] Observed file \\\\\\\"/var/run/secrets/serving-cert/tls.crt\\\\\\\" has been modified (old=\\\\\\\"43548186e7ce5eab21976aea3b471207a358b9f8fb63bf325b8f4755a5142ae9\\\\\\\", new=\\\\\\\"772b132d205d6ab3b67fe43634e15ca4fa0dbded77cd4f1e5c79b6a07aa87dc7\\\\\\\")\\\\nW0312 13:37:34.434997 1 builder.go:132] Restart triggered because of file /var/run/secrets/serving-cert/tls.crt was modified\\\\nI0312 13:37:34.435087 1 observer_polling.go:120] Observed file \\\\\\\"/var/run/secrets/serving-cert/tls.key\\\\\\\" has been modified (old=\\\\\\\"9e10b51cb3256c60ae44b395564462b79050e988d1626d5f34804f849a3655a7\\\\\\\", new=\\\\\\\"cfeb3635d6fe21d3ece1a467c2574c40be43b6aeedf1f44042ecfea382437bf7\\\\\\\")\\\\nI0312 13:37:34.435913 1 genericapiserver.go:681] \\\\\\\"[graceful-termination] pre-shutdown hooks completed\\\\\\\" name=\\\\\\\"PreShutdownHooksStopped\\\\\\\"\\\\nI0312 13:37:34.435965 1 genericapiserver.go:538] \\\\\\\"[graceful-termination] shutdown event\\\\\\\" name=\\\\\\\"ShutdownInitiated\\\\\\\"\\\\nI0312 13:37:34.436300 1 base_controller.go:172] Shutting down StatusSyncer_kube-storage-version-migrator ...\\\\nI0312 13:37:34.436307 1 base_controller.go:150] All StatusSyncer_kube-storage-version-migrator post start hooks have been terminated\\\\nI0312 13:37:34.436340 1 base_controller.go:172] Shutting down RemoveStaleConditionsController ...\\\\nI0312 13:37:34.436361 1 base_controller.go:172] Shutting down KubeStorageVersionMigratorStaticResources ...\\\\nI0312 13:37:34.436383 1 base_controller.go:172] Shutting down KubeStorageVersionMigrator ...\\\\nI0312 13:37:34.436402 1 base_controller.go:172] Shutting down LoggingSyncer ...\\\\nI0312 13:37:34.436426 1 base_controller.go:172] Shutting down StaticConditionsController ...\\\\nW0312 13:37:34.436628 1 builder.go:109] graceful termination failed, controllers failed with error: stopped\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:36:07Z\\\"}},\\\"name\\\":\\\"kube-storage-version-migrator-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-686c6c748c-qbnnr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.269206 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71af81a9-7d43-49b2-9287-c375900aa905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler-operator-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://254ad9a98529033932dc1b9c446efaa247d53e9d673f4d28116134c8c0e44635\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-03-12T13:39:44Z\\\",\\\"message\\\":\\\"I0312 13:39:14.236110 1 cmd.go:241] Using service-serving-cert provided certificates\\\\nI0312 13:39:14.236303 1 leaderelection.go:122] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI0312 13:39:14.238018 1 observer_polling.go:159] Starting file observer\\\\nW0312 13:39:14.250335 1 builder.go:267] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler-operator/pods/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\\\\\\\": dial tcp 10.217.4.1:443: connect: connection refused\\\\nI0312 13:39:14.251943 1 builder.go:299] openshift-cluster-kube-scheduler-operator version 4.16.0-202406131906.p0.g630f63b.assembly.stream.el9-630f63b-630f63bc7a30d2662bbb5115233144079de6eef6\\\\nF0312 13:39:44.693314 1 cmd.go:170] failed checking apiserver connectivity: Get \\\\\\\"https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-scheduler-operator/leases/openshift-cluster-kube-scheduler-operator-lock\\\\\\\": dial tcp 10.217.4.1:443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:39:14Z\\\"}},\\\"name\\\":\\\"kube-scheduler-operator-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.309092 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b5c38ff-1fa8-4219-994d-15776acd4a4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [etcd-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b3971f82b444869fdbecbfd54ef7a319b608fe63eef0e09d3f7a65b652ffafc3\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01904e9caa36dbc7772b537a148f3270c1b6a855aab806556aac5544f9540dc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:37:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:36:12Z\\\"}},\\\"name\\\":\\\"etcd-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-etcd-operator\"/\"etcd-operator-768d5b5d86-722mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.330382 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.330877 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331003 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331017 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331098 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.331080381 +0000 UTC m=+24.024777211 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.331152 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.331277 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331279 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331325 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331339 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331426 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.331387069 +0000 UTC m=+24.025083959 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331607 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331658 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331673 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.331758 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.331733588 +0000 UTC m=+24.025430418 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.348549 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [marketplace-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c3c2223e85e89c657ef6687dc57f1075aa0d16e5f1cccebc9f6a48911233b46\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:39:59Z\\\",\\\"message\\\":\\\"time=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=info msg=\\\\\\\"Go Version: go1.21.9 (Red Hat 1.21.9-1.el9_4) X:strictfipsruntime\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=info msg=\\\\\\\"Go OS/Arch: linux/amd64\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=info msg=\\\\\\\"[metrics] Registering marketplace metrics\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=info msg=\\\\\\\"[metrics] Serving marketplace metrics\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=info msg=\\\\\\\"TLS keys set, using https for metrics\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=warning msg=\\\\\\\"Config API is not available\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=info msg=\\\\\\\"setting up scheme\\\\\\\"\\\\ntime=\\\\\\\"2026-03-12T13:39:58Z\\\\\\\" level=fatal msg=\\\\\\\"failed to determine if *v1.ConfigMap is namespaced: failed to get restmapping: failed to get server groups: Get \\\\\\\\\\\\\\\"https://10.217.4.1:443/api\\\\\\\\\\\\\\\": dial tcp 10.217.4.1:443: connect: connection refused\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:39:52Z\\\"}},\\\"name\\\":\\\"marketplace-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"marketplace-operator-8b455464d-f9xdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.388028 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd556935-a077-45df-ba3f-d42c39326ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [packageserver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2858d5039ccec571b6cd26627bcc15672b705846caefb817b9c8fdc52c91b2a8\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"packageserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-operator-lifecycle-manager\"/\"packageserver-8464bcc55b-sjnqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.428849 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-dns/dns-default-gbw49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13045510-8717-4a71-ade4-be95a76440a7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4100378bdad23dfbaf635cc71846262fc1e11f874ca8829d9325daa5394f31d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"dns\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:69f0ea9c4dd64fcb95e7d523331e9e46cf36132427af07bd759cbd1837eaf903\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-dns\"/\"dns-default-gbw49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430092 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430254 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430334 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430261 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430102 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430115 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430110 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430146 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430167 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430192 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430197 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430103 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.430579 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430211 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430212 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430225 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430230 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.430268 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.430653 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.430735 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.430821 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.430897 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.430997 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431061 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431191 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431390 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431602 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431701 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431800 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.431882 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.432002 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.432155 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.432251 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.432437 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.435557 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.435595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435692 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435714 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435729 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435812 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435845 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435861 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.435899 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.435875797 +0000 UTC m=+24.129572627 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.436040 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:24.4360208 +0000 UTC m=+24.129717630 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.470049 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-console/downloads-65476884b9-9wcvx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6268b7fe-8910-4505-b404-6f1df638105c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [download-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f58cff96550345ff1cbd0c3df73e478f38310996ac8a0a77006b25cc2e3351f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://903cc09b29eb6b57a4de9a646e0d2d20b91d23eac8fd8cd6470da91e14b35e89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:38:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:37:49Z\\\"}},\\\"name\\\":\\\"download-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-console\"/\"downloads-65476884b9-9wcvx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.511824 3552 status_manager.go:877] "Failed to update status for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"12e733dd-0939-4f1b-9cbb-13897e093787\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [hostpath-provisioner node-driver-registrar liveness-probe csi-provisioner]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"csi-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9:v4.13\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"hostpath-provisioner\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-livenessprobe:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"liveness-probe\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"registry.redhat.io/openshift4/ose-csi-node-driver-registrar:latest\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"node-driver-registrar\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"hostpath-provisioner\"/\"csi-hostpathplugin-hvm8g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.546566 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/certified-operators-7287f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"887d596e-c519-4bfa-af90-3edd9e1b2f0f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"certified-operators-7287f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.557165 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"31bf7d401cea8360cb571a96460f6fb1204baf8040be63c13250f135860fc54a"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.557225 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"6ca39e978d01eda74942a426f0604e4d4f5e9ca91b0d2821a78787798a318e64"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.557237 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"1f02874fa1b8322bac5028d683028d2f63c7de5eb3193a44096ebc102a466664"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.560480 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" event={"ID":"bf1a8b70-3856-486f-9912-a2de1d57c3fb","Type":"ContainerStarted","Data":"c3713a013b819210e9df4e667972f933d2bc885c2726840ed241556e31494621"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.560524 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-v65wr" event={"ID":"bf1a8b70-3856-486f-9912-a2de1d57c3fb","Type":"ContainerStarted","Data":"81074656c8c02ba9292f84bdc9d342fcfd5c6ed7187e0512d6d49618fb3a0793"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.562346 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"d0204b88bd9af9c573be7f43532842626783d09f35557c5f209dee98bf250535"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.562389 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-7874c8775-kh4j9" event={"ID":"ec1bae8b-3200-4ad9-b33b-cf8701f3027c","Type":"ContainerStarted","Data":"776304f9c10014264768203cf8a5009374ba9a6b3c2a9a05f936d8c560be933a"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.563877 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerStarted","Data":"7ef2b86d7ef9c5f2ee243289566afef67a9480dfb2813320ebcf58fd2d098b45"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.563953 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-767c585db5-zd56b" event={"ID":"cc291782-27d2-4a74-af79-c7dcb31535d2","Type":"ContainerStarted","Data":"881804007e6970459ad7c78428155b5f8215d7006708a3b582229c2b06e3b059"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.565682 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"f2fc1bbb04e9f24c81dedc1ebfc1c4c37de91d1c47d106e43d82018b32541b83"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.565733 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" event={"ID":"51a02bbf-2d40-4f84-868a-d399ea18a846","Type":"ContainerStarted","Data":"e537b99d46a6e6ce01adcdef0975b64b07ae555b175089fde017722b831d9fe8"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.566925 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" exitCode=0 Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.566973 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.566992 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"8b371ed36566aceb82d5304c51128cd66dd3e2fa866c5de347ea1d53f8de7f78"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.569507 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dn27q" event={"ID":"6a23c0ee-5648-448c-b772-83dced2891ce","Type":"ContainerStarted","Data":"0d5e631997d4fab103a71b78f8897cae61300f1872e1b31226d31d9521f92359"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.569537 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dn27q" event={"ID":"6a23c0ee-5648-448c-b772-83dced2891ce","Type":"ContainerStarted","Data":"4bca30021a3aa17909651fbecc0fdb2e2e9d314432b8d8f9bbf057363029ee16"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.571901 3552 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="276365e163f3352abcbfd06f65e751d413a96e2aa3b03b9956c3508c97d88a2c" exitCode=0 Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.571924 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"276365e163f3352abcbfd06f65e751d413a96e2aa3b03b9956c3508c97d88a2c"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.571954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"2bdeec7ccf8809bb62bcad294e740fd2182de29e70e147015e3606a3819ea857"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.575011 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" event={"ID":"9fb762d1-812f-43f1-9eac-68034c1ecec7","Type":"ContainerStarted","Data":"b9c9986e88fa6425848beff02a582c40c3b2f0a8773d1613d7b7ad5169034053"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.575050 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-6d5d9649f6-x6d46" event={"ID":"9fb762d1-812f-43f1-9eac-68034c1ecec7","Type":"ContainerStarted","Data":"7fd27464c917a94294d43440d5dbc544723227f8a5445b56b0e4ae0144a433da"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.578134 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" event={"ID":"410cf605-1970-4691-9c95-53fdc123b1f3","Type":"ContainerStarted","Data":"f5a1db52e2c7f37dd57d85c9fce3ee3b1f5503e1bfc864c5aab015993e4642d5"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.578170 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" event={"ID":"410cf605-1970-4691-9c95-53fdc123b1f3","Type":"ContainerStarted","Data":"5d372a9adf973d44a7309f0945b9c7a52304ade13f6f818179fc50a26a42151d"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.578184 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-77c846df58-6l97b" event={"ID":"410cf605-1970-4691-9c95-53fdc123b1f3","Type":"ContainerStarted","Data":"32365406878a49b33ae70a90fb98b4b56261a521b3a94c02c407369c62a50921"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.591008 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-l92hr" event={"ID":"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e","Type":"ContainerStarted","Data":"bb5cb09c3ac4b1627e3d298370732d23035a9a5cfe1457526201b6ab8106413e"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.591061 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-l92hr" event={"ID":"f8175ef1-0983-4bfe-a64e-fc6f5c5f7d2e","Type":"ContainerStarted","Data":"42f249dd527062b0901343284aa3d41f278385c84613a364cb9582ca373b39b6"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.592360 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"f403c1698de4def7aadd43b02d3a259f8649cf04c13ee0e528df40e0215d2870"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.592420 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"23acf1fe4a3694b3e2c14e5183e33ed3bbc055216b2a0ae11440a316071b5864"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.594029 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" event={"ID":"2b6d14a5-ca00-40c7-af7a-051a98a24eed","Type":"ContainerStarted","Data":"ba639396073e86472ccf20204bcb6999463b510c265a179e2e7d7962912cb142"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.596594 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"f072010141e0432a82d0cd5bce4ef78ec3ed40c5f8bb481a2055e25005db596d"} Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.599557 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c782cf62-a827-4677-b3c2-6f82c5f09cbb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [registry-server]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fd647545b1e1e8133f835b5842318c4a574964a1089d0c79e368492f43f4be0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"registry-server\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-marketplace\"/\"redhat-marketplace-8s8pc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.631318 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d309c909-80da-46ea-a83c-6c81e6cb0582\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:03Z\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-scheduler]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:01Z\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://abbdc8dda3053204d33fba33de5ab914928cc5fac1c51a85b8f45f9091120785\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-20T15:25:03Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e98943f48f2887a99d0993da317df7ec972516393e9293b01fceb19059d958d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-20T15:25:03Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://03805260f31b5c88b57bb94bf816a698e58e12f3d139dd6b59c95bd55e34479c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fcfcd442b37827a1acbd2953c1e4f8103f31fec151e6666b9c5bb0045feada8f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-03-20T15:25:04Z\\\"}}}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ca87dc9857ae7133245fe1d8aed7dfd01a5e8b87834144e18106cb914792d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fc7ec6aed1866d8244774ffcad733f9395679f49cc12580f349b9c47358f842\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ca87dc9857ae7133245fe1d8aed7dfd01a5e8b87834144e18106cb914792d53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-20T15:25:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-20T15:25:02Z\\\"}}}],\\\"startTime\\\":\\\"2026-03-20T15:25:01Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.671853 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-controller-manager-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:bca1f698f7f613e9e8f2626aacc55323c6a5bd50ca26c920a042e5b8c9ab9c0f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf3cc5d384c697ede552b2e7310ef4955e8ac25586b981a98c7f89ae2248f131\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-03-12T13:37:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-03-12T13:36:00Z\\\"}},\\\"name\\\":\\\"kube-controller-manager-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-6f6cb54958-rbddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.707114 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-7xghp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"51a02bbf-2d40-4f84-868a-d399ea18a846\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16c2d2dc4a3bada4291ed55bdb20df4380cf97fabcaac157210b3eba0d84e086\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-03-12T13:38:44Z\\\",\\\"message\\\":\\\"nect: connection refused - backing off\\\\nI0312 13:38:44.128072 1 leaderelection.go:285] failed to renew lease openshift-network-node-identity/ovnkube-identity: timed out waiting for the condition\\\\nE0312 13:38:44.129968 1 leaderelection.go:308] Failed to release lock: Put \\\\\\\"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-network-node-identity/leases/ovnkube-identity\\\\\\\": dial tcp 192.168.130.11:6443: connect: connection refused\\\\nI0312 13:38:44.130463 1 recorder.go:104] \\\\\\\"crc_d814d3c9-5599-4d24-a0ad-0bbb877a1efa stopped leading\\\\\\\" logger=\\\\\\\"events\\\\\\\" type=\\\\\\\"Normal\\\\\\\" object={\\\\\\\"kind\\\\\\\":\\\\\\\"Lease\\\\\\\",\\\\\\\"namespace\\\\\\\":\\\\\\\"openshift-network-node-identity\\\\\\\",\\\\\\\"name\\\\\\\":\\\\\\\"ovnkube-identity\\\\\\\",\\\\\\\"uid\\\\\\\":\\\\\\\"5bf9f7f1-1bc1-4ccd-8cd6-296722bc5f45\\\\\\\",\\\\\\\"apiVersion\\\\\\\":\\\\\\\"coordination.k8s.io/v1\\\\\\\",\\\\\\\"resourceVersion\\\\\\\":\\\\\\\"30770\\\\\\\"} reason=\\\\\\\"LeaderElection\\\\\\\"\\\\nI0312 13:38:44.130882 1 internal.go:516] \\\\\\\"Stopping and waiting for non leader election runnables\\\\\\\"\\\\nI0312 13:38:44.131055 1 internal.go:520] \\\\\\\"Stopping and waiting for leader election runnables\\\\\\\"\\\\nI0312 13:38:44.131164 1 internal.go:526] \\\\\\\"Stopping and waiting for caches\\\\\\\"\\\\nI0312 13:38:44.131319 1 internal.go:530] \\\\\\\"Stopping and waiting for webhooks\\\\\\\"\\\\nI0312 13:38:44.131349 1 internal.go:533] \\\\\\\"Stopping and waiting for HTTP servers\\\\\\\"\\\\nI0312 13:38:44.131434 1 internal.go:537] \\\\\\\"Wait completed, proceeding to shutdown the manager\\\\\\\"\\\\nI0312 13:38:44.131449 1 controller.go:240] \\\\\\\"Shutdown signal received, waiting for all workers to finish\\\\\\\" controller=\\\\\\\"certificatesigningrequest\\\\\\\" controllerGroup=\\\\\\\"certificates.k8s.io\\\\\\\" controllerKind=\\\\\\\"CertificateSigningRequest\\\\\\\"\\\\nI0312 13:38:44.131496 1 controller.go:242] \\\\\\\"All workers finished\\\\\\\" controller=\\\\\\\"certificatesigningrequest\\\\\\\" controllerGroup=\\\\\\\"certificates.k8s.io\\\\\\\" controllerKind=\\\\\\\"CertificateSigningRequest\\\\\\\"\\\\nI0312 13:38:44.131518 1 reflector.go:295] Stopping reflector *v1.CertificateSigningRequest (9h26m15.772368371s) from sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:105\\\\nerror running approver: leader election lost\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-03-12T13:27:44Z\\\"}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:528874097a1d537796a103d2482d59cbd1a4d75aebe63f802a74e22cedaa1009\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-7xghp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.748136 3552 status_manager.go:877] "Failed to update status for pod" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3992789-6f8b-4806-8ce0-261a7623ca46\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-03-20T15:25:22Z\\\",\\\"message\\\":\\\"containers with unready status: [controller-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4f852821a513f8bab2eae4047b6c603e36a7cd202001638900ca14fab436403\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"controller-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}}}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-controller-manager\"/\"controller-manager-7fdc5fd4dd-zdxlh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949019 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949137 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949164 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949186 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949235 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.949217226 +0000 UTC m=+25.642914056 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949258 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949296 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.949284427 +0000 UTC m=+25.642981257 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949263 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949346 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949363 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949373 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.94936528 +0000 UTC m=+25.643062110 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949449 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949467 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949491 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.949484423 +0000 UTC m=+25.643181253 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949524 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949536 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949558 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.949546264 +0000 UTC m=+25.643243094 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949572 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.949565085 +0000 UTC m=+25.643261915 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: I0320 15:25:23.949611 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949766 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:23 crc kubenswrapper[3552]: E0320 15:25:23.949792 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:25.949785591 +0000 UTC m=+25.643482421 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.052877 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.052955 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.052999 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053036 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053049 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053081 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053118 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053099438 +0000 UTC m=+25.746796268 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053169 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053212 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053242 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053259 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053275 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053312 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053335 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053310224 +0000 UTC m=+25.747007054 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053363 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053378 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053392 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053385516 +0000 UTC m=+25.747082346 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053460 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053468 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053484 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053478728 +0000 UTC m=+25.747175558 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053509 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053541 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053565 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053586 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053611 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053628 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053642 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053211 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053671 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053660393 +0000 UTC m=+25.747357413 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053697 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053685144 +0000 UTC m=+25.747382204 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053709 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053716 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053706154 +0000 UTC m=+25.747403204 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053733 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053725635 +0000 UTC m=+25.747422465 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053788 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053795 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053807 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053820 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053843 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053854 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053843998 +0000 UTC m=+25.747541038 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053878 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.053866498 +0000 UTC m=+25.747563548 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053882 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053921 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.05391238 +0000 UTC m=+25.747609430 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053926 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054147 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.054140676 +0000 UTC m=+25.747837506 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053613 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054163 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054172 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.054165946 +0000 UTC m=+25.747862776 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054197 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.054187157 +0000 UTC m=+25.747884217 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.053811 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054222 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054264 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.054254899 +0000 UTC m=+25.747951959 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.053588 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054318 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054349 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054428 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054457 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054484 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054510 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054536 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054560 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054591 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054621 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054647 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054680 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054704 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054763 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054794 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054817 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054844 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054866 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054891 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054917 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054948 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.054973 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055030 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055059 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055086 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055111 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055135 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055158 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055182 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055225 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055249 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055273 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055300 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055325 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055353 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055376 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055411 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055466 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055498 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055534 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055556 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055580 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055602 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055631 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055655 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055679 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055706 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055734 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055758 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055781 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055805 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055829 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055853 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055875 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055897 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055921 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055946 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055968 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.055991 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056012 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056039 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056067 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056111 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056141 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056175 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056208 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056233 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056258 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056296 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056330 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056365 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056421 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056456 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056493 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056538 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056580 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056621 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056652 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056699 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056731 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056762 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056870 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056901 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056943 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056969 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.056995 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057018 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057040 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057096 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057119 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057143 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057164 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057197 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057229 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057251 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057276 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057319 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057343 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057367 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057408 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.057482 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057557 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057584 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057576738 +0000 UTC m=+25.751273568 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057627 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057647 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.05764078 +0000 UTC m=+25.751337610 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057660 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057722 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057731 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057719302 +0000 UTC m=+25.751416342 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057737 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057784 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057771823 +0000 UTC m=+25.751468873 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.054207 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057826 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057816614 +0000 UTC m=+25.751513684 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057884 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057921 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057911837 +0000 UTC m=+25.751608887 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057962 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057955358 +0000 UTC m=+25.751652188 (durationBeforeRetry 2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057968 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058007 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.057992789 +0000 UTC m=+25.751689839 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058070 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058086 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058115 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058106952 +0000 UTC m=+25.751804012 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058169 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058191 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058202 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058190744 +0000 UTC m=+25.751887804 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058223 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058214335 +0000 UTC m=+25.751911165 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058259 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058270 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058281 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058274737 +0000 UTC m=+25.751971567 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058289 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058304 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058322 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058336 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058326328 +0000 UTC m=+25.752023378 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058359 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058348299 +0000 UTC m=+25.752045369 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058368 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058391 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.05838503 +0000 UTC m=+25.752081860 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058426 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058465 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058452611 +0000 UTC m=+25.752149671 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058518 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058566 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058556134 +0000 UTC m=+25.752253204 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058574 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058606 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058599455 +0000 UTC m=+25.752296285 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058633 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058654 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058665 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058696 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058703 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058690348 +0000 UTC m=+25.752387388 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058667 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058723 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058716298 +0000 UTC m=+25.752413118 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058749 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058740539 +0000 UTC m=+25.752437609 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058759 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058806 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058798491 +0000 UTC m=+25.752495571 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058844 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058872 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058879 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058868932 +0000 UTC m=+25.752565982 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058884 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058904 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058916 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058943 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058932734 +0000 UTC m=+25.752629804 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058964 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058967 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058955795 +0000 UTC m=+25.752652865 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058998 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.058989476 +0000 UTC m=+25.752686536 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059022 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059055 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059070 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059079 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059057 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059048157 +0000 UTC m=+25.752745227 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059105 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059114 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059107129 +0000 UTC m=+25.752803959 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059139 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059130749 +0000 UTC m=+25.752827789 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059168 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059195 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059186541 +0000 UTC m=+25.752883371 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059193 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059229 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059223392 +0000 UTC m=+25.752920222 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059249 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059277 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059286 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059276653 +0000 UTC m=+25.752973683 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059289 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059309 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059321 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059347 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059338015 +0000 UTC m=+25.753035085 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059369 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059358956 +0000 UTC m=+25.753056026 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059380 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059389 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059435 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059410827 +0000 UTC m=+25.753107657 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059461 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059481 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059495 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059502 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059525 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.05951904 +0000 UTC m=+25.753215870 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059481 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059543 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059569 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059563731 +0000 UTC m=+25.753260561 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059607 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059629 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059623163 +0000 UTC m=+25.753319993 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059659 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059688 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059681564 +0000 UTC m=+25.753378394 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059697 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.057679 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059752 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059738456 +0000 UTC m=+25.753435436 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059771 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059796 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059777907 +0000 UTC m=+25.753474947 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059811 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059825 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059810958 +0000 UTC m=+25.753508018 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059840 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059850 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059838888 +0000 UTC m=+25.753535978 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059856 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059888 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059919 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.05991291 +0000 UTC m=+25.753609740 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059862 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059935 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059927691 +0000 UTC m=+25.753624521 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059944 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059974 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059977 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059984 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.059975042 +0000 UTC m=+25.753672092 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.058637 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060025 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059921 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060015 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060000963 +0000 UTC m=+25.753697793 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.059735 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060078 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060063924 +0000 UTC m=+25.753760994 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060091 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060099 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060088955 +0000 UTC m=+25.753786035 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060104 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060119 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060110146 +0000 UTC m=+25.753807366 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060132 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060074 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060144 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060132036 +0000 UTC m=+25.753829116 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060168 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060156337 +0000 UTC m=+25.753853427 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060188 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060237 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060249 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060261 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060238 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060287 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060187 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060177837 +0000 UTC m=+25.753874917 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060324 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060311411 +0000 UTC m=+25.754008501 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060343 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060333972 +0000 UTC m=+25.754031042 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060360 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060351512 +0000 UTC m=+25.754048572 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060377 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060367763 +0000 UTC m=+25.754064843 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060385 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060399 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060387273 +0000 UTC m=+25.754084353 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060406 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060461 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060468 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060457225 +0000 UTC m=+25.754154295 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060472 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060343 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060481 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060491 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060492 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060844 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060887 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060963 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060983 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060985 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061022 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061022 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060378 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060101 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061072 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061081 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060268 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060503 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060300 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060504 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.060498056 +0000 UTC m=+25.754194886 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061124 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061132 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061154 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061120083 +0000 UTC m=+25.754816913 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061157 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061168 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061162464 +0000 UTC m=+25.754859294 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060546 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061183 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061174104 +0000 UTC m=+25.754870934 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061199 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061189345 +0000 UTC m=+25.754886175 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061211 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061204935 +0000 UTC m=+25.754901765 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061214 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061222 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061217735 +0000 UTC m=+25.754914565 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061154 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061237 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061242 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061227906 +0000 UTC m=+25.754924736 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061245 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061185 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061279 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061296 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061253 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061306 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061320 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061331 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061238 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061342 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061347 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061381 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061431 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061438 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060632 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061464 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061483 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061493 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060648 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060654 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060676 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060685 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060687 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060694 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061757 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061779 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061781 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060713 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060734 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060736 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060757 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060772 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060781 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061066 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061080 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060406 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062234 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060579 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062307 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062318 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061227 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062396 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062434 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.060617 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.061254 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.061248856 +0000 UTC m=+25.754945686 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062553 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062537151 +0000 UTC m=+25.756234001 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062584 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062569112 +0000 UTC m=+25.756266182 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062608 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062596192 +0000 UTC m=+25.756293272 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062628 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062617803 +0000 UTC m=+25.756314873 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062649 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062638533 +0000 UTC m=+25.756335613 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062707 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062659374 +0000 UTC m=+25.756356454 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062731 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062721326 +0000 UTC m=+25.756418406 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062753 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062740976 +0000 UTC m=+25.756438066 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062774 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062763297 +0000 UTC m=+25.756460377 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062796 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062785007 +0000 UTC m=+25.756482077 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062817 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062805648 +0000 UTC m=+25.756502718 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062890 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.06287585 +0000 UTC m=+25.756572820 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062913 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062904731 +0000 UTC m=+25.756601801 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062933 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062923201 +0000 UTC m=+25.756620261 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062953 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062942312 +0000 UTC m=+25.756639402 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062974 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062962532 +0000 UTC m=+25.756659662 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.062994 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.062984053 +0000 UTC m=+25.756681143 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063049 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063003913 +0000 UTC m=+25.756700993 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063070 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063062645 +0000 UTC m=+25.756759725 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063085 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063077315 +0000 UTC m=+25.756774395 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063101 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063093226 +0000 UTC m=+25.756790306 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063118 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063109596 +0000 UTC m=+25.756806686 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063137 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063127036 +0000 UTC m=+25.756824106 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063157 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063146547 +0000 UTC m=+25.756843597 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063213 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063165267 +0000 UTC m=+25.756862348 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063237 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063228219 +0000 UTC m=+25.756925289 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063257 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.06324855 +0000 UTC m=+25.756945630 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063272 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.06326477 +0000 UTC m=+25.756961860 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063289 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063279391 +0000 UTC m=+25.756976451 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063307 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063298751 +0000 UTC m=+25.756995801 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063323 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063315402 +0000 UTC m=+25.757012462 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063374 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063332362 +0000 UTC m=+25.757029412 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063419 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063387913 +0000 UTC m=+25.757084983 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063447 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063437855 +0000 UTC m=+25.757134935 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063464 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063456135 +0000 UTC m=+25.757153215 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063481 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.063473356 +0000 UTC m=+25.757170416 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063582 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.063641 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.0636177 +0000 UTC m=+25.757314740 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.159203 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.159304 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.159353 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159389 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159452 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159468 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159521 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159549 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159564 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159565 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159584 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159593 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159528 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.159509988 +0000 UTC m=+25.853206828 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159796 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.159781255 +0000 UTC m=+25.853478095 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.159826 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.159816956 +0000 UTC m=+25.853513796 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.262923 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.262978 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263459 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263487 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263502 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263510 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263516 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263520 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263582 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.263562875 +0000 UTC m=+25.957259705 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.263629 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.263609646 +0000 UTC m=+25.957306476 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.365956 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.366047 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.366111 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366315 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366375 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366395 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366330 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366514 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.366488322 +0000 UTC m=+26.060185322 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366541 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366559 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366319 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366644 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366659 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366619 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.366600345 +0000 UTC m=+26.060297175 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.366850 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.366825221 +0000 UTC m=+26.060522051 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429610 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429658 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429693 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429761 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429784 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429802 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429845 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429894 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429894 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429973 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.429976 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430052 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430108 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430076 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430135 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430082 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430176 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430185 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430203 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430256 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430290 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430309 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430319 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430333 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430371 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430372 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430443 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430506 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430613 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430626 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430694 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430733 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430809 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430897 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.430920 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.430972 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431063 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431112 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431157 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431245 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431368 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.431370 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.431510 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.431511 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431482 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431626 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431676 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431851 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431863 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431912 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.431965 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.432036 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.432098 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.432158 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.432213 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.432267 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.432315 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.470011 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.470087 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471063 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471095 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471111 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471173 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.471153465 +0000 UTC m=+26.164850295 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471243 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471263 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471273 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: E0320 15:25:24.471310 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:26.471299439 +0000 UTC m=+26.164996269 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.527695 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.534912 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:24 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:24 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:24 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.535062 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.602171 3552 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="b34d916602136d3fdd96329c77c1eef27be96db9c340631e85fd32b601756d16" exitCode=0 Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.602234 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"b34d916602136d3fdd96329c77c1eef27be96db9c340631e85fd32b601756d16"} Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.607501 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.607536 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.607551 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.607562 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.607572 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} Mar 20 15:25:24 crc kubenswrapper[3552]: I0320 15:25:24.607581 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.429719 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.429983 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.430048 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.430099 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.429795 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.429885 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.430538 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.430656 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.430611 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.430698 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.430855 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.430976 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.430860 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.431079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.431182 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.431189 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.431375 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.431507 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.431601 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.431733 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.431804 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.431850 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.431741 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.432198 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.432269 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.432624 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.432647 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.432939 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.433150 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.433351 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.433489 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.433727 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.433844 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:25 crc kubenswrapper[3552]: E0320 15:25:25.433963 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.530456 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:25 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:25 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:25 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.530534 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:25 crc kubenswrapper[3552]: I0320 15:25:25.614879 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"2bd404a872316ec541bbd52a1d2e005641c19974057a1e02f0a7ee5fc5962bf8"} Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.021863 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.022197 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.022308 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.022280029 +0000 UTC m=+29.715976879 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.023979 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.024088 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.024175 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.024223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.024295 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.024330 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024474 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024515 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.024504899 +0000 UTC m=+29.718201739 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024566 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024595 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.024587071 +0000 UTC m=+29.718283921 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024637 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024666 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.024657793 +0000 UTC m=+29.718354633 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024705 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024731 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.024722885 +0000 UTC m=+29.718419725 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024805 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024835 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.024827688 +0000 UTC m=+29.718524528 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024876 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.024903 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.024894489 +0000 UTC m=+29.718591329 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125734 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125792 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125828 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125862 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125898 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125928 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125962 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.125997 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126031 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126068 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126100 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126133 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126168 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126201 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126236 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126286 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126319 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126367 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126448 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126482 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126531 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126562 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126592 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126621 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126652 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126739 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126773 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126805 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126895 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126930 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.126964 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127011 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127043 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127079 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127111 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127142 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127185 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127219 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127260 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127304 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127340 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127372 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127423 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127467 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127508 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127540 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127574 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127618 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127652 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127685 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127721 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127754 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127787 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127823 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127855 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127886 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127921 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127951 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.127982 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128019 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128052 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128087 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128121 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128152 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128188 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128257 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128290 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128320 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128352 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128382 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128462 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128493 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128546 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128577 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128607 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128639 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128672 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128704 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128741 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128775 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128806 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128837 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128868 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128902 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128934 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128964 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.128999 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129031 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129060 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129091 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129126 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129187 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129218 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129251 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129285 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129319 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129352 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129433 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129464 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129496 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129529 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129566 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129626 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129656 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129683 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129714 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129744 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129777 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129809 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129842 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129874 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129904 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129937 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.129976 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.130007 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130175 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130195 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130250 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130231611 +0000 UTC m=+29.823928451 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130321 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130352 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130342594 +0000 UTC m=+29.824039434 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130404 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130464 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130455397 +0000 UTC m=+29.824152237 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130525 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130552 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130545129 +0000 UTC m=+29.824241969 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130615 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130630 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130641 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130673 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130662652 +0000 UTC m=+29.824359492 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130725 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130781 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130771305 +0000 UTC m=+29.824468145 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130823 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130850 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.130843167 +0000 UTC m=+29.824540007 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130903 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130917 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130926 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.130954 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.1309457 +0000 UTC m=+29.824642540 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131008 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131022 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131031 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131059 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131050923 +0000 UTC m=+29.824747763 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131103 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131130 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131122295 +0000 UTC m=+29.824819135 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131171 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131197 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131189216 +0000 UTC m=+29.824886066 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131251 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131266 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131276 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131306 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131297229 +0000 UTC m=+29.824994079 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131362 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131376 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131385 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131435 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131425753 +0000 UTC m=+29.825122593 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131492 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131505 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131514 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131542 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131534516 +0000 UTC m=+29.825231366 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131589 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131616 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131607188 +0000 UTC m=+29.825304028 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131668 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131683 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131693 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131721 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13171329 +0000 UTC m=+29.825410130 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131767 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131793 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131785982 +0000 UTC m=+29.825482832 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131832 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131859 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131851124 +0000 UTC m=+29.825547964 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131930 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131959 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.131950267 +0000 UTC m=+29.825647107 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.131998 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132024 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132015869 +0000 UTC m=+29.825712709 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132063 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132091 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13208416 +0000 UTC m=+29.825781010 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132130 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132155 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132147792 +0000 UTC m=+29.825844632 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132203 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132229 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132222074 +0000 UTC m=+29.825918914 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132263 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132287 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132279606 +0000 UTC m=+29.825976456 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132324 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132349 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132342057 +0000 UTC m=+29.826038897 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132394 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132441 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13243363 +0000 UTC m=+29.826130470 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132487 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132514 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132506572 +0000 UTC m=+29.826203412 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132558 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132585 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132577564 +0000 UTC m=+29.826274414 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132629 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132655 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132646905 +0000 UTC m=+29.826343745 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132701 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132727 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132719247 +0000 UTC m=+29.826416097 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132778 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132791 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132801 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132826 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13281844 +0000 UTC m=+29.826515280 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132879 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132892 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132901 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132931 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132920233 +0000 UTC m=+29.826617083 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132973 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.132999 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.132991245 +0000 UTC m=+29.826688085 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133040 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133066 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133058586 +0000 UTC m=+29.826755426 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133106 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133135 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133127148 +0000 UTC m=+29.826823998 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133186 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133199 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133209 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133235 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133226461 +0000 UTC m=+29.826923301 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133284 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133310 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133302273 +0000 UTC m=+29.826999123 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133350 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133379 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133370285 +0000 UTC m=+29.827067125 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133442 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133472 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133463837 +0000 UTC m=+29.827160687 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133515 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133539 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133531929 +0000 UTC m=+29.827228769 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133591 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133603 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133614 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133641 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133632652 +0000 UTC m=+29.827329492 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133698 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133712 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133721 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133748 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133740475 +0000 UTC m=+29.827437325 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133799 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133824 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133816497 +0000 UTC m=+29.827513337 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133876 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133888 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133897 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133923 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133915919 +0000 UTC m=+29.827612759 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133965 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.133989 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.133982331 +0000 UTC m=+29.827679181 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134026 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134053 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.134045783 +0000 UTC m=+29.827742623 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134103 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134117 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134143 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.134135275 +0000 UTC m=+29.827832125 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134195 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134208 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134217 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134243 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.134234388 +0000 UTC m=+29.827931238 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134283 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134306 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13429864 +0000 UTC m=+29.827995480 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134724 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134758 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.134748812 +0000 UTC m=+29.828445662 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134821 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134847 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.134839884 +0000 UTC m=+29.828536724 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134888 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134914 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.134906356 +0000 UTC m=+29.828603196 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134962 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134976 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.134986 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135011 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135003349 +0000 UTC m=+29.828700189 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135051 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135078 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13506996 +0000 UTC m=+29.828766810 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135122 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135149 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135141632 +0000 UTC m=+29.828838472 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135185 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135213 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135205434 +0000 UTC m=+29.828902274 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135249 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135277 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135267796 +0000 UTC m=+29.828964636 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135319 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135347 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135338778 +0000 UTC m=+29.829035618 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135389 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135436 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13542904 +0000 UTC m=+29.829125880 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135492 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135505 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135516 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135543 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135535533 +0000 UTC m=+29.829232383 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135590 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135617 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135608385 +0000 UTC m=+29.829305235 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135672 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135686 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135695 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135722 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135713988 +0000 UTC m=+29.829410828 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135766 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135791 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135783439 +0000 UTC m=+29.829480279 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135829 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135853 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135845791 +0000 UTC m=+29.829542631 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135890 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135914 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135906813 +0000 UTC m=+29.829603653 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135950 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.135974 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.135966974 +0000 UTC m=+29.829663814 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136015 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136040 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136032496 +0000 UTC m=+29.829729336 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136082 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136112 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136105028 +0000 UTC m=+29.829801868 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136163 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136175 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136199 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.13619192 +0000 UTC m=+29.829888770 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136243 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136269 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136261582 +0000 UTC m=+29.829958422 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136319 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136335 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136346 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136373 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136364345 +0000 UTC m=+29.830061185 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136434 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136460 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136451927 +0000 UTC m=+29.830148767 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136500 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136525 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136518239 +0000 UTC m=+29.830215089 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136560 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136584 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136575901 +0000 UTC m=+29.830272741 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136625 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136652 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136644122 +0000 UTC m=+29.830340972 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136698 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136724 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136716104 +0000 UTC m=+29.830412944 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136763 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136787 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136780196 +0000 UTC m=+29.830477036 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136823 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136851 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136840248 +0000 UTC m=+29.830537088 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136889 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136914 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.136905479 +0000 UTC m=+29.830602319 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136968 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136981 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.136991 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137017 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137010162 +0000 UTC m=+29.830707012 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137055 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137079 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137072034 +0000 UTC m=+29.830768874 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137128 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137140 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137149 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137178 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137169127 +0000 UTC m=+29.830865967 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137226 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137251 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137243489 +0000 UTC m=+29.830940329 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137301 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137310 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137336 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137327401 +0000 UTC m=+29.831024251 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137384 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137403 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137439 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137468 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137459324 +0000 UTC m=+29.831156174 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137507 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137530 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137523396 +0000 UTC m=+29.831220246 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137570 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137597 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137589928 +0000 UTC m=+29.831286768 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137649 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137662 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137675 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137708 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137698191 +0000 UTC m=+29.831395041 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137746 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137772 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137764583 +0000 UTC m=+29.831461433 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137823 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137837 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137845 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137871 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137863665 +0000 UTC m=+29.831560505 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137917 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137943 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.137935417 +0000 UTC m=+29.831632257 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.137985 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138012 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138004129 +0000 UTC m=+29.831700979 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138052 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138077 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138068671 +0000 UTC m=+29.831765511 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138127 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138140 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138149 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138180 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138172273 +0000 UTC m=+29.831869113 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138217 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138242 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138235095 +0000 UTC m=+29.831931935 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138277 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138301 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138294057 +0000 UTC m=+29.831990907 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138342 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138367 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138360218 +0000 UTC m=+29.832057069 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138444 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138474 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138466191 +0000 UTC m=+29.832163031 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138511 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138539 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138531803 +0000 UTC m=+29.832228643 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138582 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138606 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138598735 +0000 UTC m=+29.832295575 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138658 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138674 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138683 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138713 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138704418 +0000 UTC m=+29.832401258 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138753 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138780 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138771529 +0000 UTC m=+29.832468379 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138844 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138871 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138863142 +0000 UTC m=+29.832559992 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138910 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138935 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138928314 +0000 UTC m=+29.832625164 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.138975 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139001 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.138992275 +0000 UTC m=+29.832689125 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139036 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139060 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139053327 +0000 UTC m=+29.832750167 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139100 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139126 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139118279 +0000 UTC m=+29.832815119 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139167 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139193 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139185321 +0000 UTC m=+29.832882161 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139246 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139262 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139272 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139300 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139291993 +0000 UTC m=+29.832988843 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139336 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139360 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139352915 +0000 UTC m=+29.833049765 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139393 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139442 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139434237 +0000 UTC m=+29.833131087 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139480 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139508 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139499529 +0000 UTC m=+29.833196379 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139548 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139572 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139564801 +0000 UTC m=+29.833261651 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139610 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139637 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139629412 +0000 UTC m=+29.833326262 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139672 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139696 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139689424 +0000 UTC m=+29.833386264 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139736 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139762 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139754676 +0000 UTC m=+29.833451516 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139798 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139821 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139814817 +0000 UTC m=+29.833511667 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139869 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139894 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139886889 +0000 UTC m=+29.833583729 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139932 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.139958 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.139950661 +0000 UTC m=+29.833647501 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.140248 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.140239989 +0000 UTC m=+29.833936829 (durationBeforeRetry 4s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.232968 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.233025 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.233065 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234011 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234035 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234046 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234091 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.234077902 +0000 UTC m=+29.927774732 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234144 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234156 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234163 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234184 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.234178135 +0000 UTC m=+29.927874955 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234225 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234236 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234243 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.234265 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.234259177 +0000 UTC m=+29.927955997 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.337288 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.337369 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337739 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337793 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337810 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337814 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337828 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337839 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337878 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.337865962 +0000 UTC m=+30.031562792 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.337916 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.337888442 +0000 UTC m=+30.031585312 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.429597 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.429828 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.429880 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.429994 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430057 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.430150 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430187 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.430270 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430325 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.430431 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430468 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.430551 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430585 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.430640 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430678 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.430776 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430813 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.430973 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431046 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431092 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431131 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431209 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431240 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431265 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431317 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431345 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431364 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431008 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431532 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431585 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431545 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431662 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431685 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431720 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431750 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.431818 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.431847 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.432064 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.432112 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.432151 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.432182 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.432230 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.432449 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.432625 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.432684 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.432775 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.432884 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.432931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.433018 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.433107 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.433237 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.433294 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.433508 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.433841 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.433994 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.434147 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.434242 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.434340 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.442566 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442734 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.442777 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442857 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442873 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442886 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442783 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.442939 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442956 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.442940536 +0000 UTC m=+30.136637366 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.442959 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.443058 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.443029379 +0000 UTC m=+30.136726219 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.443483 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.443546 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.443578 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.443676 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.443642785 +0000 UTC m=+30.137339655 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.529980 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:26 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:26 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:26 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.530100 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.546010 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.546085 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.546938 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.546966 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.546980 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.547034 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.547015614 +0000 UTC m=+30.240712464 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.547098 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.547113 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.547123 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: E0320 15:25:26.547154 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:30.547143017 +0000 UTC m=+30.240839857 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.627203 3552 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="2bd404a872316ec541bbd52a1d2e005641c19974057a1e02f0a7ee5fc5962bf8" exitCode=0 Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.627235 3552 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="fecb5c27ea0eed87e48075a9586fc4846b2a5f047d53492a2e0ea5d6e14f7fe2" exitCode=0 Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.627279 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"2bd404a872316ec541bbd52a1d2e005641c19974057a1e02f0a7ee5fc5962bf8"} Mar 20 15:25:26 crc kubenswrapper[3552]: I0320 15:25:26.627321 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"fecb5c27ea0eed87e48075a9586fc4846b2a5f047d53492a2e0ea5d6e14f7fe2"} Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.430446 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.430531 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.430725 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.430738 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.430803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.430859 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.430810 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.431263 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431528 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.431865 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431557 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431965 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.432014 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.431655 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431712 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.432087 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.431801 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431816 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.432127 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.431635 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.432068 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.432545 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.432907 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433073 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433313 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433541 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433602 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433671 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433729 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.433939 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.434128 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.434355 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:27 crc kubenswrapper[3552]: E0320 15:25:27.434367 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.531047 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:27 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:27 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:27 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.531203 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.639247 3552 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="ed01fbd35403f31615ea8fb1401f7d7a0218e6855875894a01da8e71d7416ee1" exitCode=0 Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.639344 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"ed01fbd35403f31615ea8fb1401f7d7a0218e6855875894a01da8e71d7416ee1"} Mar 20 15:25:27 crc kubenswrapper[3552]: I0320 15:25:27.646512 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.214489 3552 kubelet_node_status.go:402] "Setting node annotation to enable volume controller attach/detach" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.216545 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.216574 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.216586 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.216826 3552 kubelet_node_status.go:77] "Attempting to register node" node="crc" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.227022 3552 kubelet_node_status.go:116] "Node was previously registered" node="crc" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.227365 3552 kubelet_node_status.go:80] "Successfully registered node" node="crc" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.230865 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeNotReady" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.230959 3552 setters.go:574] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-03-20T15:25:28Z","lastTransitionTime":"2026-03-20T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.429919 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.430598 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.430731 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.430928 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.431105 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.431311 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.431460 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.431639 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.431761 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.431962 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.432072 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.432250 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.432368 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.432607 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.432717 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.432887 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.432982 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.433143 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.433243 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.433451 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.433549 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.433742 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.433890 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434086 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.434176 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434195 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.434437 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434472 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434528 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.434676 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434802 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.434891 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434904 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.434997 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.435093 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.435175 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.435189 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.435272 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.435382 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.435382 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.435602 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.435729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.435889 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.435966 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.436076 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.436205 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.436375 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.436535 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.436678 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.436783 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.436957 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.437423 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.437685 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.438033 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.438087 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.438237 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:28 crc kubenswrapper[3552]: E0320 15:25:28.438482 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.529189 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:28 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:28 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:28 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.529306 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.572516 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.599650 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.658135 3552 generic.go:334] "Generic (PLEG): container finished" podID="7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8" containerID="e1f34900fbeeb4a98838edab7330f10be436ec082b185053a8b4a4b6a6bb2196" exitCode=0 Mar 20 15:25:28 crc kubenswrapper[3552]: I0320 15:25:28.659980 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerDied","Data":"e1f34900fbeeb4a98838edab7330f10be436ec082b185053a8b4a4b6a6bb2196"} Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.429916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430435 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430470 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430565 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430445 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.429980 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430022 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430132 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430197 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430255 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430260 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430318 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430323 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430327 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430343 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430349 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.430055 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.430891 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.430990 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.431268 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.431546 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.431877 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.431736 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432011 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432119 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432255 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432435 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432566 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432702 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432787 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.432948 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.433110 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.433243 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:29 crc kubenswrapper[3552]: E0320 15:25:29.433332 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.529548 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:29 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:29 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:29 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.529642 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.675874 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bzj2p" event={"ID":"7dbadf0a-ba02-47d6-96a9-0995c1e8e4a8","Type":"ContainerStarted","Data":"ecd940af17798045d85440aa7707e38253a2fc30251ffe456871b0a112a71c73"} Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.685341 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerStarted","Data":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.685646 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:29 crc kubenswrapper[3552]: I0320 15:25:29.774695 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.075426 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.075823 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.075966 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.075922559 +0000 UTC m=+37.769619419 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.077453 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.077615 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.077677 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.077744 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.077784 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.077760948 +0000 UTC m=+37.771457818 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.077885 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.077936 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.077905452 +0000 UTC m=+37.771602312 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.077995 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078046 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.078031656 +0000 UTC m=+37.771728516 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.078091 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.078148 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.078235 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078303 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078319 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078565 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.07854914 +0000 UTC m=+37.772246010 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078573 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078635 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.078620491 +0000 UTC m=+37.772317361 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.078680 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.078661833 +0000 UTC m=+37.772358813 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180504 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180551 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180643 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180687 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180731 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180775 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180820 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180863 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180908 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.180954 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181051 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181097 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181139 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181185 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181229 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181273 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181317 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181367 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181454 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181501 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181549 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181593 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181635 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181687 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181729 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181774 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181853 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181916 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.181958 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182001 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182046 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182112 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182155 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182197 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182239 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182338 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182392 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182465 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182622 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182668 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182727 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182774 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182820 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182863 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182904 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.182962 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183051 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183113 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183160 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183222 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183267 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183308 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183387 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183471 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183520 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183565 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183628 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183689 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183733 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183776 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183826 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183869 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183917 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.183968 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184010 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184058 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184100 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184145 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184190 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184238 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184283 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184332 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184375 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184448 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184494 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184539 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184586 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184659 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184725 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184784 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184834 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184877 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184921 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.184963 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185007 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185049 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185096 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185153 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185196 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185242 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185293 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185340 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185513 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185561 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185603 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185651 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185696 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185780 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185843 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185887 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185936 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.185983 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186046 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186106 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186150 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186197 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186244 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186291 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186340 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186476 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186523 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186566 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.186613 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187167 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187249 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.18722775 +0000 UTC m=+37.880924610 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187352 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187384 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187401 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187501 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.187483737 +0000 UTC m=+37.881180607 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187575 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187624 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.187612371 +0000 UTC m=+37.881309231 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187678 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187715 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.187704383 +0000 UTC m=+37.881401253 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187769 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187817 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.187805046 +0000 UTC m=+37.881501916 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187888 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187937 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.187925619 +0000 UTC m=+37.881622479 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.187995 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188037 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.188024482 +0000 UTC m=+37.881721342 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188094 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188136 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.188124544 +0000 UTC m=+37.881821404 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188203 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188252 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.188240367 +0000 UTC m=+37.881937237 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188305 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188342 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.18833107 +0000 UTC m=+37.882027930 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188521 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188580 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.188564256 +0000 UTC m=+37.882261126 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188663 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188688 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188729 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.18871711 +0000 UTC m=+37.882413980 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188741 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.188869 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.188838993 +0000 UTC m=+37.882535853 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189012 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189114 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189214 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189301 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189318 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189392 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189456 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189466 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189477 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189548 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189594 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189646 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189670 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189678 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189692 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189711 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189738 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189754 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189776 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189794 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189846 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189857 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189895 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189915 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189932 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189954 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189968 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189985 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190024 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190073 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190128 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190149 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190173 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190194 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190202 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190213 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190223 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190238 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190327 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190441 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190463 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190479 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190496 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190522 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190594 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190648 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190670 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190681 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190689 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190699 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190650 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190803 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190820 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190855 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190880 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190944 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190883 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190977 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191003 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191006 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191020 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191083 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189462 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191133 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191154 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191191 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191207 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191229 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191242 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191286 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191332 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191361 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191424 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191448 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191503 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190532 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191531 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191542 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190237 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191565 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190328 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191581 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191600 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191614 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190398 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190484 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191661 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191675 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189796 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191697 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191706 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191137 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191761 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191768 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191834 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191844 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191906 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191920 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191957 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191996 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192036 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192070 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192088 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190132 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192119 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191846 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191710 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192171 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190980 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192204 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190597 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192251 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190078 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192282 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192291 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192300 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192315 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189933 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192338 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191545 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191910 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192378 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.190332 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192454 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192468 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191849 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192537 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192332 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192623 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192627 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192649 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192536 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192626 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192689 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192716 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192733 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189557 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192000 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192774 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192793 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192738 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192842 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192880 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192902 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192906 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192991 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193007 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193021 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193025 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193055 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.192396 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193086 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193115 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.191711 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193127 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193183 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193201 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.189041 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.189027668 +0000 UTC m=+37.882724538 (durationBeforeRetry 8s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193106 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193262 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193244611 +0000 UTC m=+37.886941481 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193267 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193287 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193275362 +0000 UTC m=+37.886972222 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193309 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193298783 +0000 UTC m=+37.886995653 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193331 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193320383 +0000 UTC m=+37.887017253 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193359 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193346644 +0000 UTC m=+37.887043504 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193386 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193373085 +0000 UTC m=+37.887069955 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193434 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193397616 +0000 UTC m=+37.887094476 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193460 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193448377 +0000 UTC m=+37.887145237 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193481 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193469417 +0000 UTC m=+37.887166287 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193502 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193490658 +0000 UTC m=+37.887187528 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193524 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193512149 +0000 UTC m=+37.887209019 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193548 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193536089 +0000 UTC m=+37.887232949 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193572 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19356045 +0000 UTC m=+37.887257320 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193595 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19358137 +0000 UTC m=+37.887278230 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193617 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193606181 +0000 UTC m=+37.887303041 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193641 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193629432 +0000 UTC m=+37.887326292 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193664 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193651972 +0000 UTC m=+37.887348842 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193685 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193674623 +0000 UTC m=+37.887371483 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193709 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193697624 +0000 UTC m=+37.887394484 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193737 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193721994 +0000 UTC m=+37.887419004 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193766 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193750305 +0000 UTC m=+37.887447295 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193799 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193782196 +0000 UTC m=+37.887479136 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193824 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193812397 +0000 UTC m=+37.887509267 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193846 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193835217 +0000 UTC m=+37.887532077 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193868 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193857198 +0000 UTC m=+37.887554058 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193893 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193880358 +0000 UTC m=+37.887577228 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193915 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193904289 +0000 UTC m=+37.887601159 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193937 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19392609 +0000 UTC m=+37.887622960 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193960 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19394842 +0000 UTC m=+37.887645290 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.193983 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193970521 +0000 UTC m=+37.887667381 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194005 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.193994232 +0000 UTC m=+37.887691092 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194028 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194015192 +0000 UTC m=+37.887712062 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194050 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194038393 +0000 UTC m=+37.887735263 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194075 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194061313 +0000 UTC m=+37.887758173 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194103 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194088264 +0000 UTC m=+37.887785134 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194129 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194117095 +0000 UTC m=+37.887813955 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194149 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194138305 +0000 UTC m=+37.887835165 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194170 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194159046 +0000 UTC m=+37.887855916 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194191 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194180417 +0000 UTC m=+37.887877287 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194213 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194201047 +0000 UTC m=+37.887897917 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194232 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194222268 +0000 UTC m=+37.887919128 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194255 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194242958 +0000 UTC m=+37.887939828 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194276 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194264819 +0000 UTC m=+37.887961689 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194299 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194287639 +0000 UTC m=+37.887984509 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194321 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19430995 +0000 UTC m=+37.888006810 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194350 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194333261 +0000 UTC m=+37.888030131 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194385 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194369952 +0000 UTC m=+37.888066962 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194453 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194398822 +0000 UTC m=+37.888095692 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194484 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194471554 +0000 UTC m=+37.888168414 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194508 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194496435 +0000 UTC m=+37.888193295 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194533 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194519056 +0000 UTC m=+37.888215926 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194563 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194547096 +0000 UTC m=+37.888244086 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194595 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194579367 +0000 UTC m=+37.888276227 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194623 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194611048 +0000 UTC m=+37.888307908 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194647 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194634929 +0000 UTC m=+37.888331789 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194670 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194658629 +0000 UTC m=+37.888355499 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194695 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19468322 +0000 UTC m=+37.888380090 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194716 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194705181 +0000 UTC m=+37.888402041 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194738 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194725621 +0000 UTC m=+37.888422491 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194758 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194747972 +0000 UTC m=+37.888444842 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194780 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194768712 +0000 UTC m=+37.888465572 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194805 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194791613 +0000 UTC m=+37.888488483 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194830 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194815844 +0000 UTC m=+37.888512704 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194852 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194840744 +0000 UTC m=+37.888537614 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194874 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194862515 +0000 UTC m=+37.888559375 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194898 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194886365 +0000 UTC m=+37.888583225 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194927 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194910376 +0000 UTC m=+37.888607236 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194956 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194940077 +0000 UTC m=+37.888637097 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.194987 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194971108 +0000 UTC m=+37.888668098 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195011 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.194999678 +0000 UTC m=+37.888696538 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195033 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195021919 +0000 UTC m=+37.888718789 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195087 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19504562 +0000 UTC m=+37.888742480 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195113 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195100741 +0000 UTC m=+37.888797601 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195136 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195124562 +0000 UTC m=+37.888821432 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195160 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195146762 +0000 UTC m=+37.888843622 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195184 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195173453 +0000 UTC m=+37.888870323 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195206 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195194244 +0000 UTC m=+37.888891114 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195229 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195216604 +0000 UTC m=+37.888913464 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195253 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195241665 +0000 UTC m=+37.888938535 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195275 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195263206 +0000 UTC m=+37.888960066 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195297 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195287166 +0000 UTC m=+37.888984036 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195322 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195309007 +0000 UTC m=+37.889005877 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195342 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195331607 +0000 UTC m=+37.889028477 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195363 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195352488 +0000 UTC m=+37.889049348 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195385 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195373709 +0000 UTC m=+37.889070569 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195441 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195395439 +0000 UTC m=+37.889092309 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195475 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195457751 +0000 UTC m=+37.889154701 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195504 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195492862 +0000 UTC m=+37.889189722 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195528 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195516792 +0000 UTC m=+37.889213652 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195550 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195539273 +0000 UTC m=+37.889236143 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195571 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195559853 +0000 UTC m=+37.889256713 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195592 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195582124 +0000 UTC m=+37.889278994 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195614 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195603145 +0000 UTC m=+37.889300005 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195637 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195625305 +0000 UTC m=+37.889322175 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195659 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195647476 +0000 UTC m=+37.889344346 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195680 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195669446 +0000 UTC m=+37.889366306 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195703 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195691617 +0000 UTC m=+37.889388487 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195728 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195715608 +0000 UTC m=+37.889412468 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195750 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195739288 +0000 UTC m=+37.889436158 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195773 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195761069 +0000 UTC m=+37.889457939 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195796 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195784389 +0000 UTC m=+37.889481259 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195821 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.19580999 +0000 UTC m=+37.889506860 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195842 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195831331 +0000 UTC m=+37.889528201 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195863 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195852001 +0000 UTC m=+37.889548871 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195887 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195874722 +0000 UTC m=+37.889571712 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.195912 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.195899453 +0000 UTC m=+37.889596313 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.290807 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.290891 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.290955 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291202 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291470 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291497 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291516 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291589 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.291567485 +0000 UTC m=+37.985264345 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291669 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291689 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291750 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291821 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291848 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291770 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.2917556 +0000 UTC m=+37.985452470 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.291995 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.291958825 +0000 UTC m=+37.985655685 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.393191 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.393345 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.393517 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.393575 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.393586 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.393599 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.393621 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.393636 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.394027 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.393990138 +0000 UTC m=+38.087687058 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.394094 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.3940652 +0000 UTC m=+38.087762180 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.430192 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.431289 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.431381 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.431507 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.431621 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.431689 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.431775 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.431924 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.431981 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432063 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.432168 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432223 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432302 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.432393 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432520 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432611 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.432733 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432789 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.432866 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.432968 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433021 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433107 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.433208 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433264 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433350 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.433479 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433534 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433616 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.433714 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433769 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433841 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.433936 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.433991 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434070 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.434205 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434274 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434359 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.434490 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434550 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434629 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.434741 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434794 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.434867 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.434959 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435069 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435208 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435374 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435556 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435687 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435806 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.435927 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436021 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436136 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436242 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436344 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436489 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436633 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.436770 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.498350 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.498496 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.498630 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500536 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500580 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500599 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500671 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.500649595 +0000 UTC m=+38.194346455 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500757 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500778 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500792 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500834 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.500821229 +0000 UTC m=+38.194518089 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500860 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500879 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.500892 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.501533 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.501511188 +0000 UTC m=+38.195208048 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.528772 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:30 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:30 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:30 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.528850 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.602866 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.602982 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603092 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603148 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603173 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603274 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.603246392 +0000 UTC m=+38.296943252 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603460 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603521 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603542 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: E0320 15:25:30.603650 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:38.603620942 +0000 UTC m=+38.297317812 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.690107 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.690305 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:30 crc kubenswrapper[3552]: I0320 15:25:30.781080 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.431910 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432421 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.431961 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432015 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432052 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432102 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432138 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432169 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432203 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432210 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432238 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432274 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.432279 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.433582 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.433651 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.433702 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.433810 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.433870 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.433891 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.433985 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.434363 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.434567 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.434739 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.434901 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.435033 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.435194 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.435349 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.435504 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.435592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.435793 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.436005 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.436173 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.436295 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:31 crc kubenswrapper[3552]: E0320 15:25:31.436461 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.530959 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:31 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:31 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:31 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:31 crc kubenswrapper[3552]: I0320 15:25:31.531068 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430295 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430961 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430365 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430368 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430384 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430396 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430432 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430454 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430470 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430481 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431500 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430501 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431561 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430511 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431635 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430533 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430536 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430555 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430576 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430639 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430653 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431769 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430757 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430756 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430790 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430823 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431819 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430860 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430852 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430889 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431868 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.430923 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431722 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.431947 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.432308 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.432364 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.432698 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.432745 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.432838 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.432952 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433099 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433216 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433256 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433489 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433632 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433740 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433816 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433872 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433927 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.433990 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.434095 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.434242 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.434314 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.434414 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:32 crc kubenswrapper[3552]: E0320 15:25:32.434525 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.528019 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.530996 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:32 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:32 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:32 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:32 crc kubenswrapper[3552]: I0320 15:25:32.531110 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430172 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430210 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430376 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430278 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430252 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430319 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430336 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430356 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430300 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430725 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.430964 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.430989 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.431129 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.431189 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.431600 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.431739 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.431843 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.431990 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.432079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.432300 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.432526 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.432658 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.432868 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433022 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433223 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433389 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433510 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433594 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433688 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433851 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433910 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:33 crc kubenswrapper[3552]: E0320 15:25:33.433978 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.530086 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:33 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:33 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:33 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:33 crc kubenswrapper[3552]: I0320 15:25:33.530211 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.047010 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430115 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430153 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430188 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430333 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430454 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430462 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430333 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430141 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.430677 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430924 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.430980 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.430982 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431042 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431045 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431100 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431167 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431216 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431280 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431296 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431309 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431101 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431219 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431167 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431103 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.431544 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431573 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431284 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.431739 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431767 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.431980 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.432094 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.432203 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.432242 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.432450 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.432578 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.432751 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.432888 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.433040 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.433161 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.433240 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.433335 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.433522 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.433603 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.433705 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.433819 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434085 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434101 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434226 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434458 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434563 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434679 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434767 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.434902 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.435044 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.435146 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.435255 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.435358 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:34 crc kubenswrapper[3552]: E0320 15:25:34.435528 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.530597 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:34 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:34 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:34 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:34 crc kubenswrapper[3552]: I0320 15:25:34.530753 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429533 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429582 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429631 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429638 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429667 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429664 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429720 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429595 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429745 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429684 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429547 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429859 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429890 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.429905 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430034 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430146 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430215 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.430293 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.430326 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430295 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430433 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430503 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430613 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430673 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430733 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430788 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430845 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430897 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.430948 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.431053 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.431154 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.431300 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:35 crc kubenswrapper[3552]: E0320 15:25:35.431469 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.529680 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:35 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:35 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:35 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:35 crc kubenswrapper[3552]: I0320 15:25:35.529795 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430493 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430532 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430617 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430666 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.430698 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430708 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430756 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430786 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430837 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.430847 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430839 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430871 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430920 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430925 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.430978 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431006 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431009 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431034 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431175 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431176 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431176 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431235 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431282 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431326 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431327 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431461 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431532 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431637 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431682 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431741 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431799 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.431898 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431935 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.431995 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432058 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.432093 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432183 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432281 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432377 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.432471 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.432515 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432572 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432599 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.432608 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432696 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432626 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.432742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432808 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.432899 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.432962 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433039 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433114 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433191 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433241 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433311 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433383 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433585 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:36 crc kubenswrapper[3552]: E0320 15:25:36.433688 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.528898 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:36 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:36 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:36 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:36 crc kubenswrapper[3552]: I0320 15:25:36.528979 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430599 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430647 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430748 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430790 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430819 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430821 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430871 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430882 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430910 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430906 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430845 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430930 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.431125 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430830 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.430944 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.431392 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.431665 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.431890 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.431959 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.432165 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.432355 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.432441 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.432578 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.432813 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.432917 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433095 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433129 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433215 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433296 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433420 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433451 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:37 crc kubenswrapper[3552]: E0320 15:25:37.433521 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.529515 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:37 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:37 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:37 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.529593 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:37 crc kubenswrapper[3552]: I0320 15:25:37.880364 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-wwpnd" event={"ID":"2b6d14a5-ca00-40c7-af7a-051a98a24eed","Type":"ContainerStarted","Data":"2a49adf6ca223df88892c54722a98cd7b5a3c1af58e96afce0b103fad40ed393"} Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087034 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087119 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087165 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087197 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087281 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087334 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087382 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087369431 +0000 UTC m=+53.781066261 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087479 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087441143 +0000 UTC m=+53.781138003 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087298 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087566 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087546566 +0000 UTC m=+53.781243436 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087608 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087639 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087656 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087643919 +0000 UTC m=+53.781340969 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087719 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087781 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.087806 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087830 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087816383 +0000 UTC m=+53.781513223 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087855 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087891 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087900 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087885695 +0000 UTC m=+53.781582715 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.087938 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.087925156 +0000 UTC m=+53.781622016 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.189946 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190002 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190037 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190062 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190130 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190112003 +0000 UTC m=+53.883808833 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190071 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190145 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190191 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190181955 +0000 UTC m=+53.883878785 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190192 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190213 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190226 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190231 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190251 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190275 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190302 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190296 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190232 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190361 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190369 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190313 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190337 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190340 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190329579 +0000 UTC m=+53.884026409 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190305 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190560 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190523224 +0000 UTC m=+53.884220084 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190604 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190588576 +0000 UTC m=+53.884285436 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190653 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190634527 +0000 UTC m=+53.884331387 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190694 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190682618 +0000 UTC m=+53.884379478 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190733 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190717589 +0000 UTC m=+53.884414459 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190775 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.19075839 +0000 UTC m=+53.884455250 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190864 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190957 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.190969 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.190989 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.190979986 +0000 UTC m=+53.884676816 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.191037 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.191126 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.191142 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.191172 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.191162161 +0000 UTC m=+53.884858991 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.191683 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.191665455 +0000 UTC m=+53.885362315 (durationBeforeRetry 16s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.192034 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.192151 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.192201 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.192189189 +0000 UTC m=+53.885886279 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.192440 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.192632 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.192711 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.192695452 +0000 UTC m=+53.886392322 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.293665 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.293749 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.293790 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.293857 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.293971 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.293978 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.293996 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294001 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.293987 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294068 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294093 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294124 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294163 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294145189 +0000 UTC m=+53.987842019 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294095 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294013 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294011 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294185 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.29417623 +0000 UTC m=+53.987873060 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294297 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294335 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294371 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294399 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294381716 +0000 UTC m=+53.988078566 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294429 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294432 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294422967 +0000 UTC m=+53.988119797 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294444 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294453 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294444397 +0000 UTC m=+53.988141227 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294534 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294565 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294587 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294597 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294633 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294621322 +0000 UTC m=+53.988318322 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294643 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294660 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294650953 +0000 UTC m=+53.988348033 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294682 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294671444 +0000 UTC m=+53.988368504 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294680 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294730 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294718285 +0000 UTC m=+53.988415315 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294733 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294773 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294778 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294810 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294846 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294895 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294922 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.29490616 +0000 UTC m=+53.988602990 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294958 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294973 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.294969 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.294998 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295007 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.294998302 +0000 UTC m=+53.988695132 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295022 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295038 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295047 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295057 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295065 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295079 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295068974 +0000 UTC m=+53.988765804 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295118 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295064 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295131 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295119296 +0000 UTC m=+53.988816126 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295181 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295170737 +0000 UTC m=+53.988867777 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295230 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295220598 +0000 UTC m=+53.988917428 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295239 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295247 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295239599 +0000 UTC m=+53.988936429 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295313 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295418 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295384173 +0000 UTC m=+53.989080993 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295426 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295469 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295459095 +0000 UTC m=+53.989156095 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295490 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295503 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295508 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295518 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295543 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295552 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295541297 +0000 UTC m=+53.989238307 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295582 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295615 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295667 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295684 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295687 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295690 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295693 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295724 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295713591 +0000 UTC m=+53.989410431 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295744 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295735202 +0000 UTC m=+53.989432032 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295759 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295751202 +0000 UTC m=+53.989448243 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295772 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295784 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295816 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295832 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295819924 +0000 UTC m=+53.989516914 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295868 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295883 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.295903 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.295892756 +0000 UTC m=+53.989589746 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295931 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295959 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.295989 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296012 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296029 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296056 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.29604959 +0000 UTC m=+53.989746420 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296029 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296073 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296076 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296065281 +0000 UTC m=+53.989762201 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296061 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296100 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296090602 +0000 UTC m=+53.989787672 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296114 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296107222 +0000 UTC m=+53.989804262 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296139 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296143 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296181 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296187 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296202 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296214 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296219 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296207645 +0000 UTC m=+53.989904475 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296264 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296278 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296296 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296286337 +0000 UTC m=+53.989983357 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296323 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296359 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296362 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296380 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296383 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296395 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296422 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296434 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296447 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296386689 +0000 UTC m=+53.990083699 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296466 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296457421 +0000 UTC m=+53.990154251 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296469 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296469 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296530 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296475102 +0000 UTC m=+53.990172152 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296584 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296607 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296646 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296633596 +0000 UTC m=+53.990330426 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296659 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296666 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296655597 +0000 UTC m=+53.990352537 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296682 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296674277 +0000 UTC m=+53.990371107 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296698 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296690588 +0000 UTC m=+53.990387418 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296729 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296739 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296798 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.29678704 +0000 UTC m=+53.990484050 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296816 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296831 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296840 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296827 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296871 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296862452 +0000 UTC m=+53.990559472 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296905 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296940 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.296975 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.296941 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297000 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297017 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297028 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297028 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.296963595 +0000 UTC m=+53.990660425 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297076 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297064628 +0000 UTC m=+53.990761678 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297109 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297146 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297147 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297162 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297185 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297197 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297185701 +0000 UTC m=+53.990882691 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297226 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297241 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297263 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297271 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297287 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297296 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297316 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297276 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297266273 +0000 UTC m=+53.990963243 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297328 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297344 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297351 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297353 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297347 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297337605 +0000 UTC m=+53.991034645 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297417 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297420 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297394777 +0000 UTC m=+53.991091607 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297431 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297438 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297440 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297432198 +0000 UTC m=+53.991129028 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297469 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297459478 +0000 UTC m=+53.991156318 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297479 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297488 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297476829 +0000 UTC m=+53.991173659 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297520 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297546 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297559 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297547471 +0000 UTC m=+53.991244301 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297587 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297603 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297625 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297635 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297626073 +0000 UTC m=+53.991322903 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297661 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297689 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297698 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297705 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297715 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297733 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297740 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297762 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297747 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297735746 +0000 UTC m=+53.991432756 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297783 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297793 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297792 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297847 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297801 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297791447 +0000 UTC m=+53.991488277 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.297804 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297939 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.29790921 +0000 UTC m=+53.991606080 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.297977 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.297961862 +0000 UTC m=+53.991658722 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298021 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298000893 +0000 UTC m=+53.991697763 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298056 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298043384 +0000 UTC m=+53.991740254 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298093 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298081875 +0000 UTC m=+53.991778745 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298158 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298217 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298259 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298300 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.2982893 +0000 UTC m=+53.991986290 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298327 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298346 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298388 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298457 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298377903 +0000 UTC m=+53.992074893 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298481 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298489 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298507 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298519 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298520 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298509366 +0000 UTC m=+53.992206396 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298529 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298554 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298543977 +0000 UTC m=+53.992241017 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298594 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298640 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298646 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298660 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298680 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298668831 +0000 UTC m=+53.992365841 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298702 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298693901 +0000 UTC m=+53.992390731 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298730 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298754 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298764 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298829 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298822065 +0000 UTC m=+53.992518895 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298836 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298878 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298866116 +0000 UTC m=+53.992563096 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.298877 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.298987 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299007 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.298990549 +0000 UTC m=+53.992687509 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299056 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299093 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299082922 +0000 UTC m=+53.992779912 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299094 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299146 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299181 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299200 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299190565 +0000 UTC m=+53.992887585 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299228 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299237 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299266 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299273 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299260156 +0000 UTC m=+53.992956986 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299320 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299335 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299358 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299366 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299356709 +0000 UTC m=+53.993053529 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299398 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299435 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299451 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299452 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299461 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299466 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299473 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299497 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299487923 +0000 UTC m=+53.993184753 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299518 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299508413 +0000 UTC m=+53.993205243 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299529 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299561 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299534634 +0000 UTC m=+53.993231504 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299485 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299581 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299592 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299594 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299594 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299579355 +0000 UTC m=+53.993276215 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299608 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299624 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299654 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299657 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299645107 +0000 UTC m=+53.993342157 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299699 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299735 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299727039 +0000 UTC m=+53.993424099 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299738 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299782 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299816 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299847 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299847 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299878 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299874 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299912 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299921 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.299948 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299978 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.299960265 +0000 UTC m=+53.993657135 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300014 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300033 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300088 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300098 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300116 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300125 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300137 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300151 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300159 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300171 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300140 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300190 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300201 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300207 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300221 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.299610 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300294 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300161 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.30014971 +0000 UTC m=+53.993846740 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300372 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300361186 +0000 UTC m=+53.994058026 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300034 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300435 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300426718 +0000 UTC m=+53.994123568 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300447 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300457 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300447428 +0000 UTC m=+53.994144278 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300477 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300468259 +0000 UTC m=+53.994165099 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300478 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300209 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300498 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300490629 +0000 UTC m=+53.994187479 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300506 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300246 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300524 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.30050882 +0000 UTC m=+53.994205670 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300534 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300546 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300535871 +0000 UTC m=+53.994232711 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300568 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300558301 +0000 UTC m=+53.994255141 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300548 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300422 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300257 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300618 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300607973 +0000 UTC m=+53.994304813 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300708 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300726 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300708585 +0000 UTC m=+53.994405435 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300766 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300805 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300791117 +0000 UTC m=+53.994488117 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300769 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300835 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300856 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300875 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300865079 +0000 UTC m=+53.994561929 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300904 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300924 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300937 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300928541 +0000 UTC m=+53.994625531 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300965 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.300975 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.300993 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301008 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.300999183 +0000 UTC m=+53.994696023 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301038 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301070 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301062625 +0000 UTC m=+53.994759655 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301042 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301128 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301155 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301163 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301152677 +0000 UTC m=+53.994849527 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301077 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301191 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301203 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301195028 +0000 UTC m=+53.994891868 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301234 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301252 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301286 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301276781 +0000 UTC m=+53.994973821 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301327 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301336 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301359 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301350812 +0000 UTC m=+53.995047652 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301392 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301394 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301458 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301465 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301502 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301513 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301517 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301531 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301514 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301503467 +0000 UTC m=+53.995200507 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301574 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301562378 +0000 UTC m=+53.995259428 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301575 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301579 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301594 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301606 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301620 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301608439 +0000 UTC m=+53.995305289 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301606 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301641 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.30163071 +0000 UTC m=+53.995327550 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301654 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301671 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301693 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301683931 +0000 UTC m=+53.995380971 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301710 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301723 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301741 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301733173 +0000 UTC m=+53.995430013 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301773 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301800 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301818 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301827 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301829 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301857 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301847696 +0000 UTC m=+53.995544526 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301889 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301931 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.301942 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301964 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301955179 +0000 UTC m=+53.995652019 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301967 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301989 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.301979029 +0000 UTC m=+53.995675869 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.301890 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.302012 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.30200245 +0000 UTC m=+53.995699300 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.302041 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.302029851 +0000 UTC m=+53.995726891 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.302052 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.302085 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.302076162 +0000 UTC m=+53.995773192 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.403126 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.403180 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403360 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403447 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403465 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403477 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403508 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403524 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403546 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.403521489 +0000 UTC m=+54.097218519 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.403604 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.403583691 +0000 UTC m=+54.097280711 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.430363 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.430558 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.430617 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.430700 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.430740 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.430810 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.430847 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.430917 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.430951 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431007 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431043 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431105 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431137 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431201 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431234 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431289 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431322 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431375 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431428 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431553 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431585 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431640 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431668 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431720 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431757 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431825 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431855 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431909 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.431940 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.431998 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432088 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432122 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432196 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432231 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432297 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432347 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432450 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432498 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432572 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432616 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432672 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432701 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432756 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432790 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432851 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432884 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.432938 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.432972 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.433033 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.433066 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.433117 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.433148 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.433224 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.433260 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.433314 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.433356 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.433441 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.506379 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.506492 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506557 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.506572 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506584 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506598 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506702 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506722 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506732 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506730 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506770 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506777 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.506758844 +0000 UTC m=+54.200455674 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506785 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506797 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.506790115 +0000 UTC m=+54.200486945 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.506853 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.506833386 +0000 UTC m=+54.200530216 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.529537 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:38 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:38 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:38 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.529630 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.608995 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:38 crc kubenswrapper[3552]: I0320 15:25:38.609052 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609321 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609373 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609387 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609396 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609457 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609471 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609498 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.609475525 +0000 UTC m=+54.303172345 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:38 crc kubenswrapper[3552]: E0320 15:25:38.609540 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:25:54.609519716 +0000 UTC m=+54.303216546 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.429851 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.429904 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430013 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430025 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.429866 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430007 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430071 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430104 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430116 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430150 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430227 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430269 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430273 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430307 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430360 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430504 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430570 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430648 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430707 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.430904 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.430916 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431001 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.431064 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431134 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431161 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.431212 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431291 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.431324 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431456 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431530 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431654 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431763 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.431841 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:39 crc kubenswrapper[3552]: E0320 15:25:39.432066 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.530503 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:39 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:39 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:39 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:39 crc kubenswrapper[3552]: I0320 15:25:39.530651 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430367 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430483 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430674 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.430799 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430819 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430915 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430917 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.430955 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431130 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431229 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.431208 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431237 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.431358 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431445 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431463 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431500 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431517 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.431590 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431642 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.431883 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.431946 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.432091 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.432178 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.432268 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.432294 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.432514 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.432702 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.432767 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.432861 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.432898 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.432948 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.432950 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433015 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433055 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433120 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433172 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433300 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433338 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433391 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433475 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433394 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433439 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433550 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433673 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433726 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.433846 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.433929 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434015 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434128 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.434176 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434306 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434469 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434574 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434698 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434788 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.434962 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.435071 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:40 crc kubenswrapper[3552]: E0320 15:25:40.435214 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.530247 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:40 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:40 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:40 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:40 crc kubenswrapper[3552]: I0320 15:25:40.530714 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.430198 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435529 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435587 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435634 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435686 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435688 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435633 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435733 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435747 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435598 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435799 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435591 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435883 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435910 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.435792 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.435888 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.435963 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.436186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.436311 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.436479 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.436790 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.436900 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437016 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437134 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437296 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.437333 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437452 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437606 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437762 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.437885 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.438497 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.439388 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.439527 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:41 crc kubenswrapper[3552]: E0320 15:25:41.440350 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.529765 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:41 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:41 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:41 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:41 crc kubenswrapper[3552]: I0320 15:25:41.529926 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.430090 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.430373 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.430461 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.430517 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.430747 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.430842 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.430950 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.431016 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.431049 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.431147 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.431151 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.431213 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.431212 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.431758 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.432104 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432181 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432280 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.432378 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.432726 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432800 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432813 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432884 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432918 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.432919 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.433023 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433091 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.433214 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433226 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433260 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.433354 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433370 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433467 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.433523 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.433692 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433719 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433790 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.433809 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.433952 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.434006 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.434045 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.434163 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.434232 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.434387 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.434578 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.434824 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.434831 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.434934 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.435129 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.435260 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.435511 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.435592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.435714 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.435877 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.436113 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.436289 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.436462 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:42 crc kubenswrapper[3552]: E0320 15:25:42.436578 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.530363 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:42 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:42 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:42 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:42 crc kubenswrapper[3552]: I0320 15:25:42.530484 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430531 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430987 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430638 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430678 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.431111 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.431226 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430684 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.431324 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430716 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430831 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430831 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430860 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430871 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430885 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.431670 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430897 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.431795 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430947 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.431925 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430940 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.430563 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.431508 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432071 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432216 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432356 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432487 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432775 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432845 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.432991 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.433162 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.433276 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.433369 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.433491 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:43 crc kubenswrapper[3552]: E0320 15:25:43.433572 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.529980 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:43 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:43 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:43 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:43 crc kubenswrapper[3552]: I0320 15:25:43.530096 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429450 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429540 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429594 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429655 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429652 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429559 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429739 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429784 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.429809 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429819 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429816 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429853 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429909 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429956 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.429988 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.430035 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430056 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430071 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430103 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.430320 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430594 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.430686 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430712 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430869 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.430959 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.430871 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431187 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431390 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431514 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.431578 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431587 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431704 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.431705 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.431758 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431842 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.431896 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.431958 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.432016 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432079 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432146 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432225 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.432279 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432348 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432469 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432640 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432678 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432775 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.432827 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.432883 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.432901 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.433029 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.433151 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.433487 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.433511 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.433821 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.433933 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.434054 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:44 crc kubenswrapper[3552]: E0320 15:25:44.434161 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.530696 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:44 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:44 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:44 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:44 crc kubenswrapper[3552]: I0320 15:25:44.530862 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.429784 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.429853 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.429885 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.429928 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.429803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.429856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.430047 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.430057 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.430153 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.430385 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.430467 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.430470 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.430541 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.430607 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.430731 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.430793 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.430912 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.431022 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.431084 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.431186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.431245 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.431328 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.431480 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.431625 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.431632 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.431670 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.431828 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.431951 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.432095 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.432229 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.432351 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.432488 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.432601 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:45 crc kubenswrapper[3552]: E0320 15:25:45.432721 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.529923 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:45 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:45 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:45 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:45 crc kubenswrapper[3552]: I0320 15:25:45.530040 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430182 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430372 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.430467 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430541 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430668 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.430691 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430731 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.430806 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430848 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430882 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430877 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430942 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.430850 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431102 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431118 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431161 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431226 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431343 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431398 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431497 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431532 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431617 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431789 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431821 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431870 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.431928 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.431957 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432015 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432076 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432134 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432192 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432221 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432294 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432323 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432378 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432459 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432491 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432544 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432608 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432685 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432742 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.432771 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432846 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.432943 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433025 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433112 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433196 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433278 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433352 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433459 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433525 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433585 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433640 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.433714 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.434516 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:46 crc kubenswrapper[3552]: E0320 15:25:46.434820 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.530158 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:46 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:46 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:46 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:46 crc kubenswrapper[3552]: I0320 15:25:46.530231 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.429951 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.429990 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430119 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.429956 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430460 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430519 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430626 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.430670 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430744 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.430845 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.430989 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.431028 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.431088 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.431164 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.431266 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.431469 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.431578 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.431755 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.431830 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.431952 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.432001 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.432078 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432165 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432324 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432446 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432561 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432666 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432781 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432878 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.432990 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.433089 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.433208 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:47 crc kubenswrapper[3552]: E0320 15:25:47.433368 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.529774 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:47 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:47 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:47 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:47 crc kubenswrapper[3552]: I0320 15:25:47.529853 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430582 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.431830 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430603 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.432099 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430614 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.432328 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430670 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.432568 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430687 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430739 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.432782 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430768 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430794 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430792 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430806 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430818 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430846 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430843 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433155 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430847 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430881 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430898 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433276 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430902 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430908 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430909 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430933 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433461 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430943 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430948 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430962 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433602 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430998 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.430997 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433769 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.431001 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.431004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.431011 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433882 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.431137 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.433038 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.434027 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.434259 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.434585 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.434766 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.434948 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435074 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435228 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435337 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435488 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435590 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435732 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.435851 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.436002 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.436097 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.436199 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.436271 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:48 crc kubenswrapper[3552]: E0320 15:25:48.436340 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.529350 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:48 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:48 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:48 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:48 crc kubenswrapper[3552]: I0320 15:25:48.529440 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430355 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430467 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430563 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430669 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430712 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.430863 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430932 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430953 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431050 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431058 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431066 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431192 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.430642 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431028 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.431558 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.432034 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.432143 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.432330 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.432611 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.432824 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.433041 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.433286 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.433464 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.433596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.433700 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.433904 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.434035 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.434136 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.434318 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.434460 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.434574 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:49 crc kubenswrapper[3552]: E0320 15:25:49.434709 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.530035 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:49 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:49 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:49 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:49 crc kubenswrapper[3552]: I0320 15:25:49.530147 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.429342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.429505 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.429684 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.429710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.429757 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.429983 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.430014 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430049 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.430105 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430111 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430140 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430206 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.430437 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430473 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.430562 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430594 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430647 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.430706 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.430770 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.430887 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.431025 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431119 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431233 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.431362 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431476 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.431586 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431632 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431760 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.431930 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431943 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.431962 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432082 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432190 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432258 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432357 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.432526 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432562 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.432670 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.432722 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432815 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.432892 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.432928 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.432988 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.433004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.432677 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.433090 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.433150 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.433248 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.433441 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.433474 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.433594 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.433717 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.434078 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.434436 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.434718 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.434823 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:50 crc kubenswrapper[3552]: E0320 15:25:50.434953 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.529979 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:50 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:50 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:50 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:50 crc kubenswrapper[3552]: I0320 15:25:50.530065 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.430188 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.430733 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.435564 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435603 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435659 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435737 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435773 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435810 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.435865 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435892 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436506 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435901 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435919 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436613 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435957 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.435967 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436718 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436841 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436962 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.437116 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.437282 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.437477 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.437590 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436072 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.436201 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.436260 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.436302 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.436340 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.437937 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.440762 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.441219 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:51 crc kubenswrapper[3552]: E0320 15:25:51.441511 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.529784 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:51 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:51 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:51 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:51 crc kubenswrapper[3552]: I0320 15:25:51.529942 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429673 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429762 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429684 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429867 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429959 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.429968 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430012 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430044 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430053 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430082 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429964 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430112 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.429769 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430116 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.430251 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430272 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430257 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430287 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430343 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430314 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430611 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.430797 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.430901 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.430969 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.431039 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.431062 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.431161 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.431363 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.431476 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.431553 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.431722 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.431864 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.431907 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.431925 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.431996 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.432104 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.432220 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.432251 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.432341 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.432531 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.432657 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.432754 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433007 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433031 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433065 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433217 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433293 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433384 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433547 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433680 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433884 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433899 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.433958 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.434043 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.434185 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:52 crc kubenswrapper[3552]: E0320 15:25:52.434329 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.529789 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:52 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:52 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:52 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:52 crc kubenswrapper[3552]: I0320 15:25:52.529918 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429724 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429764 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429809 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429771 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429886 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429948 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429965 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429756 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.429724 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.430119 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.430139 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.430216 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.430307 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.430473 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.430503 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.430622 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.430662 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.430889 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.430954 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.430978 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431105 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.431172 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431251 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431333 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431447 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.431502 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431553 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431681 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431806 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431889 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.431957 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.432049 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.434463 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:53 crc kubenswrapper[3552]: E0320 15:25:53.435335 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.529472 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:53 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:53 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:53 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:53 crc kubenswrapper[3552]: I0320 15:25:53.530247 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.102281 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.102803 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.104529 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.104741 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.104713756 +0000 UTC m=+85.798410616 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.104848 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.105053 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.105032225 +0000 UTC m=+85.798729095 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.105255 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.105387 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.105479 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.105463886 +0000 UTC m=+85.799160756 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.105717 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.106077 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.106430 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.105950 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.106797 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.106779802 +0000 UTC m=+85.800476662 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.106312 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.106648 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.106739 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.106994 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.106939806 +0000 UTC m=+85.800636676 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.107044 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.107022208 +0000 UTC m=+85.800719188 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.107653 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.108290 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.108258701 +0000 UTC m=+85.801955571 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.211997 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.212274 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.212592 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.212560634 +0000 UTC m=+85.906257504 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.213183 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.213288 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.213806 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.213742786 +0000 UTC m=+85.907439656 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.214341 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.214507 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.214592 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.214679 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.214748 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.214994 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.215071 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.215147 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.215255 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.215330 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.215454 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215551 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215613 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215658 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.215630117 +0000 UTC m=+85.909326987 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215679 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215703 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.215677018 +0000 UTC m=+85.909373888 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215711 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215782 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.21576758 +0000 UTC m=+85.909464440 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215789 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.215843 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.215867 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.215846222 +0000 UTC m=+85.909543092 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216170 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216226 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216227 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216211182 +0000 UTC m=+85.909908042 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216281 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216305 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216349 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216336776 +0000 UTC m=+85.910033646 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216386 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216392 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216447 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216470 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216455679 +0000 UTC m=+85.910152539 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216472 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216564 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216540151 +0000 UTC m=+85.910237071 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216598 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216584602 +0000 UTC m=+85.910281472 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216625 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216611923 +0000 UTC m=+85.910308793 (durationBeforeRetry 32s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.216656 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.216639894 +0000 UTC m=+85.910336764 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.217659 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.217947 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.217926578 +0000 UTC m=+85.911623448 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.318991 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319113 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319181 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319225 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319327 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.319300533 +0000 UTC m=+86.012997393 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319391 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319472 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319487 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319535 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319551 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.319529049 +0000 UTC m=+86.013225939 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319705 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319673 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319774 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319683 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319812 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319815 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.319782486 +0000 UTC m=+86.013479346 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319831 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319840 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319858 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.319838958 +0000 UTC m=+86.013535948 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319893 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319852 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319918 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.319946 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.319974 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.319953941 +0000 UTC m=+86.013650801 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320012 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.319985622 +0000 UTC m=+86.013682522 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320051 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320073 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320052303 +0000 UTC m=+86.013749383 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320105 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320087104 +0000 UTC m=+86.013784104 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320190 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320250 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320298 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320367 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320374 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320439 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320467 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320483 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320508 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320446 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320487 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320463194 +0000 UTC m=+86.014160064 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320580 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320588 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320566227 +0000 UTC m=+86.014263097 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320598 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320623 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320608848 +0000 UTC m=+86.014305718 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320651 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320634719 +0000 UTC m=+86.014331579 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320722 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320746 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320715391 +0000 UTC m=+86.014412381 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320800 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320849 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.320836444 +0000 UTC m=+86.014533304 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320855 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320909 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320943 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.320959 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320975 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.320997 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321009 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321059 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32103925 +0000 UTC m=+86.014736250 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321129 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321169 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321201 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321206 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321236 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321257 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321242165 +0000 UTC m=+86.014939025 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321267 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321307 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321282596 +0000 UTC m=+86.014979536 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321321 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321206 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321361 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321349258 +0000 UTC m=+86.015046128 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321134 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321397 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321454 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321470 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321497 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321518 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321537 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321560 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321547153 +0000 UTC m=+86.015244013 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321587 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321570084 +0000 UTC m=+86.015266944 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321631 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321650 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321668 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321700 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321602185 +0000 UTC m=+86.015299045 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321780 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321795 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32178104 +0000 UTC m=+86.015477910 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321834 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321902 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.321947 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.321933274 +0000 UTC m=+86.015630144 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.321995 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322045 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322119 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322188 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322232 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322274 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322324 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322368 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322446 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322501 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322551 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322597 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322642 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.322662 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322686 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322736 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.322751 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.322725905 +0000 UTC m=+86.016422815 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.322810 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322822 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.322853 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.322840008 +0000 UTC m=+86.016536868 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.322906 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322913 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.322942 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32293113 +0000 UTC m=+86.016628000 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.322982 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323019 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323086 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323065394 +0000 UTC m=+86.016762264 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323090 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323141 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323154 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323137466 +0000 UTC m=+86.016834426 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323184 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323171487 +0000 UTC m=+86.016868347 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323239 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323295 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323310 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32328954 +0000 UTC m=+86.016986520 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323346 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323328621 +0000 UTC m=+86.017025601 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323385 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323432 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323447 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323486 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323473655 +0000 UTC m=+86.017170515 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323523 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323543 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323579 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323568198 +0000 UTC m=+86.017265058 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323626 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323663 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32365167 +0000 UTC m=+86.017348530 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323680 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323676 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323739 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323742 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323765 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323742252 +0000 UTC m=+86.017439192 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323780 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323794 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323245 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323812 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.323029 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323848 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323816 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323787353 +0000 UTC m=+86.017484223 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323871 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323873 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323886 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323872326 +0000 UTC m=+86.017569196 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323921 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323908967 +0000 UTC m=+86.017605837 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323954 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323933217 +0000 UTC m=+86.017630177 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.323987 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.323970998 +0000 UTC m=+86.017668008 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324055 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324104 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324131 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324135 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324155 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324170 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324211 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324188314 +0000 UTC m=+86.017885184 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324235 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324255 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324305 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324315 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324290587 +0000 UTC m=+86.017987557 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324321 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324353 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324334458 +0000 UTC m=+86.018031438 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324370 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324393 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324376429 +0000 UTC m=+86.018073409 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324473 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324450501 +0000 UTC m=+86.018147461 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324531 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324588 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324602 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324633 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324619096 +0000 UTC m=+86.018315956 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324679 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324721 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324675437 +0000 UTC m=+86.018372307 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324756 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324722 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324809 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324796 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32478464 +0000 UTC m=+86.018481510 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324847 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.324895 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.324868132 +0000 UTC m=+86.018565032 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.324950 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325078 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325126 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325175 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325218 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325237 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325265 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325315 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325361 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325439 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325469 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.325441778 +0000 UTC m=+86.019138648 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325527 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325545 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325581 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325605 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.325590232 +0000 UTC m=+86.019287102 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325656 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325726 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325747 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325769 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325785 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325793 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325842 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.325823758 +0000 UTC m=+86.019520628 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325870 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.325856749 +0000 UTC m=+86.019553619 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325894 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.325882029 +0000 UTC m=+86.019578889 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325842 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325927 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325951 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.325956 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325968 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326004 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326032 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326005323 +0000 UTC m=+86.019702193 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326073 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326084 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326123 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326127 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326114306 +0000 UTC m=+86.019811166 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326175 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326217 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326205858 +0000 UTC m=+86.019902728 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326182 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326240 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326283 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32627069 +0000 UTC m=+86.019967560 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326324 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326358 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326369 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326380 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326394 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326455 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326470 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326457465 +0000 UTC m=+86.020154325 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326512 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326525 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326571 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326557158 +0000 UTC m=+86.020254028 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326606 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326622 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326659 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326683 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326700 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326724 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326665 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32665043 +0000 UTC m=+86.020347300 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326783 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326800 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326784 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326831 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.326666 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326788 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326769183 +0000 UTC m=+86.020466173 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326905 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326891357 +0000 UTC m=+86.020588217 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326905 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326926 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326915347 +0000 UTC m=+86.020612207 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326903 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326951 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326940548 +0000 UTC m=+86.020637408 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326963 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326976 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326976 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.326964698 +0000 UTC m=+86.020661568 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327042 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327094 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327078852 +0000 UTC m=+86.020775722 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.325659 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327119 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327108212 +0000 UTC m=+86.020805072 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327146 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327134423 +0000 UTC m=+86.020831283 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327167 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327181 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327191 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327207 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327231 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326952 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327259 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327173 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327159394 +0000 UTC m=+86.020856254 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.326724 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327314 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327097 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327049 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327301 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327289037 +0000 UTC m=+86.020985907 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327357 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327376 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327379 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327365459 +0000 UTC m=+86.021062329 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327392 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327439 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32739302 +0000 UTC m=+86.021089890 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.327567 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327594 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327576525 +0000 UTC m=+86.021273385 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327620 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327608656 +0000 UTC m=+86.021305526 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327646 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327651 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327640217 +0000 UTC m=+86.021337077 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327688 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327676988 +0000 UTC m=+86.021373848 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327715 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327704078 +0000 UTC m=+86.021400938 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327750 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327734739 +0000 UTC m=+86.021431609 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327782 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32777137 +0000 UTC m=+86.021468230 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.327887 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.327947 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.327998 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.327968965 +0000 UTC m=+86.021665845 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328058 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328076 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328111 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.328098389 +0000 UTC m=+86.021795249 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328123 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328153 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328158 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328190 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.328174591 +0000 UTC m=+86.021871451 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328248 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328259 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.328203492 +0000 UTC m=+86.021900362 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328269 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328284 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328634 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328693 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328723 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328741 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328789 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.328774247 +0000 UTC m=+86.022471107 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328792 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328870 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328887 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328915 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32890026 +0000 UTC m=+86.022597120 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.328954 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328980 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329012 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329039 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.329026644 +0000 UTC m=+86.022723514 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329083 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329093 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329113 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329128 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329180 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.329165857 +0000 UTC m=+86.022862727 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329128 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329197 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329219 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329235 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329283 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.32926899 +0000 UTC m=+86.022965860 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329237 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329340 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329384 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.329372293 +0000 UTC m=+86.023069163 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329342 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329433 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329455 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329469 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329476 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329521 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329569 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329586 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329612 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329621 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329630 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329669 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329678 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.329664061 +0000 UTC m=+86.023360921 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329293 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329716 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329736 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.329720482 +0000 UTC m=+86.023417342 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329775 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329824 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329867 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329890 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329906 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329987 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.329941 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330026 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.33001424 +0000 UTC m=+86.023711110 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330049 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330037961 +0000 UTC m=+86.023734821 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330074 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330062111 +0000 UTC m=+86.023758971 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330112 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330122 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330131 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330144 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330160 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330148704 +0000 UTC m=+86.023845564 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.328893 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330196 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330232 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330280 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330332 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330351 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330363 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330388 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330445 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330461 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330474 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.329890 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330186 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330171624 +0000 UTC m=+86.023868494 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330561 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330566 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330551935 +0000 UTC m=+86.024248805 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330592 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330580525 +0000 UTC m=+86.024277395 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330616 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330604706 +0000 UTC m=+86.024301576 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330707 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330694878 +0000 UTC m=+86.024391748 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330731 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.330720429 +0000 UTC m=+86.024417299 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330755 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.33074289 +0000 UTC m=+86.024439750 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.330776 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.33076511 +0000 UTC m=+86.024461970 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.330815 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.330937 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331041 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.331014727 +0000 UTC m=+86.024711587 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331120 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.331154 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331166 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.331153841 +0000 UTC m=+86.024850711 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331216 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331260 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.331245393 +0000 UTC m=+86.024942363 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.331222 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331280 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331330 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.331316155 +0000 UTC m=+86.025013015 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331387 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.331471 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.331458769 +0000 UTC m=+86.025155639 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429691 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429709 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429801 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429827 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429890 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429825 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.429857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430018 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430029 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.430049 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430191 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.430291 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.430472 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430563 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430633 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430791 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430794 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430845 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.430904 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.430956 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.431039 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.431042 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.431253 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.431474 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.431545 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.438529 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.438627 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.438656 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.439614 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.440349 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.440600 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.440696 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.441145 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.438748 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.438811 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.441340 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.441399 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.441534 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.441729 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.441821 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442005 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.442082 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442247 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442492 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442600 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442725 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442754 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442803 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.438913 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442892 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.442982 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.438548 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.443087 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.443041717 +0000 UTC m=+86.136738607 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.443022 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.443123 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.443106599 +0000 UTC m=+86.136803469 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.442624 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.443185 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.444804 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.445191 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.445568 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.445854 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.446023 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.446280 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.446440 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.447614 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.447771 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.447863 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.530551 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:54 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:54 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:54 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.530879 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.542500 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.542557 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.543769 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.543839 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.543861 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.544173 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544268 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544300 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544317 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544363 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544376 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.544351651 +0000 UTC m=+86.238048511 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544383 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544430 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.544392742 +0000 UTC m=+86.238089592 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544448 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.544557 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.544485605 +0000 UTC m=+86.238182475 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.645974 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:54 crc kubenswrapper[3552]: I0320 15:25:54.646062 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646279 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646326 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646333 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646346 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646363 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646383 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646489 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.646462126 +0000 UTC m=+86.340158996 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:25:54 crc kubenswrapper[3552]: E0320 15:25:54.646524 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:26:26.646510307 +0000 UTC m=+86.340207177 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429693 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429738 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429795 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429829 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429851 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429875 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429748 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429919 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429949 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429840 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429960 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429751 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429706 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429759 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429784 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429908 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.430482 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.430514 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.429996 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.430145 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.430312 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.430589 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.431093 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.431338 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.431549 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.431629 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.431763 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432009 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432048 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432136 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432202 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432260 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432314 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:55 crc kubenswrapper[3552]: E0320 15:25:55.432503 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.529192 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:55 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:55 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:55 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.529358 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:55 crc kubenswrapper[3552]: I0320 15:25:55.962027 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429468 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429923 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429573 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429971 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429588 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429633 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429640 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429640 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429644 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429665 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429700 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429686 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429692 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.430494 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.430592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.430766 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429744 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429753 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429768 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429767 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429797 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429829 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.429846 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.430467 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.431332 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.431364 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.431477 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.431595 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.431737 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.431803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432018 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432132 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432245 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432342 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432510 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432636 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432771 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.432888 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.432927 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.432931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433035 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433129 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433206 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433387 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433478 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433716 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.433998 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.434202 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.434377 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.434663 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.434800 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.434944 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.435077 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.435168 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.435194 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.435297 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:56 crc kubenswrapper[3552]: E0320 15:25:56.435387 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.529870 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:56 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:56 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:56 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:56 crc kubenswrapper[3552]: I0320 15:25:56.530016 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431198 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431269 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431218 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431481 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.431493 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431379 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431566 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431628 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431647 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431714 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431778 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.431817 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431881 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433520 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.431980 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.432189 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433629 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.432218 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.432241 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.432510 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.432536 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.432893 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.432956 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433055 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433198 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433284 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433858 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433450 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.433970 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.434089 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.434234 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:57 crc kubenswrapper[3552]: E0320 15:25:57.434345 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.530162 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:57 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:57 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:57 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:57 crc kubenswrapper[3552]: I0320 15:25:57.530281 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429539 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429579 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429632 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429695 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429863 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.429892 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429925 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.429989 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.430104 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.430212 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.430259 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.430326 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.430395 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.430464 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.430573 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.430616 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.430629 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.430704 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.431034 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.431277 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.431440 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.431541 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.431682 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.431765 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.431887 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.431944 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.432024 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432168 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.432276 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432456 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.432534 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432660 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432728 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432813 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432911 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.432931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.432919 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.433083 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.433461 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.433102 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.433133 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.433666 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.433161 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.433682 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.433230 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.433312 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.433970 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434088 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434246 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434308 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434506 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434572 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434640 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434738 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434866 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.434969 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:25:58 crc kubenswrapper[3552]: E0320 15:25:58.435029 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.529743 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:58 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:58 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:58 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.530230 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:25:58 crc kubenswrapper[3552]: I0320 15:25:58.776164 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" probeResult="failure" output="" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.430716 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.431021 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.431124 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.431195 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.431029 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.432182 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432242 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432509 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.432658 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432675 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432863 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.433154 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.433186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432716 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.433265 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.432731 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.433330 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.433542 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.433712 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.433848 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.433944 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.434076 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.434226 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.434333 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.434499 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.434665 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.434884 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.435053 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.435125 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.435225 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.435331 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.436538 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:25:59 crc kubenswrapper[3552]: E0320 15:25:59.437070 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.529656 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:25:59 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:25:59 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:25:59 crc kubenswrapper[3552]: healthz check failed Mar 20 15:25:59 crc kubenswrapper[3552]: I0320 15:25:59.530210 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.430760 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431133 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431211 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431222 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431278 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431364 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431142 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431372 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431455 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431498 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.430888 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431041 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.431602 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431084 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431607 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431660 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431734 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.430824 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431706 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.431639 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.432010 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432030 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432206 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.432241 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432329 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432476 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432600 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.432617 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.432645 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.432714 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432840 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.432963 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433093 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433286 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.433304 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.433323 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433536 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433634 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.433688 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433718 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433787 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.433955 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434093 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434231 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434460 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434633 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434764 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434882 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.434981 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.435098 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.435197 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.435303 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.435437 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.435545 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:00 crc kubenswrapper[3552]: E0320 15:26:00.435643 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.528930 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:00 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:00 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:00 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:00 crc kubenswrapper[3552]: I0320 15:26:00.528999 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.266125 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.266269 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.266338 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.266384 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.266505 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.430350 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.430387 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.430451 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.430500 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.432362 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.432771 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433040 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433040 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433158 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433226 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433252 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433290 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433326 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433356 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433388 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433390 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433423 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433441 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433513 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433524 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433543 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433569 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433609 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.433076 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433823 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433818 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433758 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433883 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.433964 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.434088 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.434128 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.434194 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.434279 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:01 crc kubenswrapper[3552]: E0320 15:26:01.434367 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.532173 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:01 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:01 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:01 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:01 crc kubenswrapper[3552]: I0320 15:26:01.532254 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429706 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429737 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429772 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429841 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429907 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429922 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430008 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430023 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429917 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430023 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430066 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430129 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430139 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.429791 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430171 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.430116 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.430310 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430103 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430350 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430372 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430319 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430466 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.430490 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430560 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430681 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.430758 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430776 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.430742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.431002 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.431044 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.432075 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.432189 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.432213 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.432130 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.432327 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.432696 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.432761 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.432872 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.433013 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.433176 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.433320 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.434382 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.434565 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.434648 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.434670 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.434895 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.435022 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.435304 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.435395 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.435535 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.435723 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.435890 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.436009 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.436206 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.436287 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.436441 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:02 crc kubenswrapper[3552]: E0320 15:26:02.436599 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.529758 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:02 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:02 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:02 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:02 crc kubenswrapper[3552]: I0320 15:26:02.529865 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.429725 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.429844 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.429924 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.429975 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.429995 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.430043 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430065 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430098 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430173 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430124 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430215 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430245 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430217 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430631 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.430661 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.430875 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430904 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.430991 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.431014 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.431035 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.431249 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.431368 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.431561 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.431955 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.432247 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.432304 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.432592 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.432967 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.433026 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.433149 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.433373 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.433565 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.433714 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:03 crc kubenswrapper[3552]: E0320 15:26:03.433861 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.529863 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:03 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:03 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:03 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:03 crc kubenswrapper[3552]: I0320 15:26:03.529951 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430388 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430467 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.430699 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430768 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430798 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430831 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430778 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430859 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.430768 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.431060 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.431628 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.431832 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.431962 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432049 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.431982 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.432150 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.432299 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432302 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432389 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432388 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432470 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432489 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.432572 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432641 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432652 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432677 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.432721 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432792 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.435554 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432842 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.432922 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.432984 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.433008 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.433015 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433135 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.433217 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433310 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433394 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.433489 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.433495 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433590 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433686 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.433763 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433834 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.433978 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.434124 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.434276 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.434816 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436163 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436258 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436456 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436558 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436713 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436900 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.436980 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.437139 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.437276 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:04 crc kubenswrapper[3552]: E0320 15:26:04.438291 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.530327 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:04 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:04 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:04 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:04 crc kubenswrapper[3552]: I0320 15:26:04.530485 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429696 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429778 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429836 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429853 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429920 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429941 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.429901 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430128 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430059 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430158 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430168 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.430313 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430443 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.430864 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.430980 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431122 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431122 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431244 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431397 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431611 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.431659 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.431720 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431774 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.431931 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432122 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432136 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432233 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432391 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432506 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432655 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.432835 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:05 crc kubenswrapper[3552]: E0320 15:26:05.433019 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.530343 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:05 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:05 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:05 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:05 crc kubenswrapper[3552]: I0320 15:26:05.530457 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430022 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430107 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430185 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430547 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.430596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430623 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430673 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430700 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.430757 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.431018 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431056 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431123 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431144 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431066 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431126 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.431253 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431150 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.431472 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431509 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431563 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431606 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.431711 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431816 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.431899 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.431947 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.432043 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.432125 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.432169 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.432286 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.432367 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.432542 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.432663 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.432847 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.432944 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.433088 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.433171 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.433215 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.433231 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.433334 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.433531 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.433584 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.433720 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.433761 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.433931 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.434017 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.434154 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.434268 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.434482 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.434656 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.434827 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435142 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435196 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435304 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435527 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435703 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435739 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:06 crc kubenswrapper[3552]: E0320 15:26:06.435839 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.529389 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:06 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:06 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:06 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:06 crc kubenswrapper[3552]: I0320 15:26:06.529524 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429570 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.430371 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429568 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.430778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429628 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.430821 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.430781 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429624 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.430901 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429699 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.430940 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429717 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.430979 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431040 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429762 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.431096 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429768 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.431150 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.429795 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431241 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431298 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431650 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431712 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431844 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.431947 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.432060 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.432192 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.432354 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.432660 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.432764 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.432943 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.433065 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.433213 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:07 crc kubenswrapper[3552]: E0320 15:26:07.433291 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.529253 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:07 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:07 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:07 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:07 crc kubenswrapper[3552]: I0320 15:26:07.529373 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430558 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430619 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430571 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430624 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430797 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430912 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430918 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430797 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430983 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431024 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430808 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.430571 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431395 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431513 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431522 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.431583 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.431702 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431723 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431723 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431781 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431785 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431896 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.431905 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.431930 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.432129 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.432160 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.432195 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.432232 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.432333 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.432384 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.432502 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.432846 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.432963 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.432988 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433107 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433167 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433246 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433521 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433527 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433681 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433772 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.433860 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.433868 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.433983 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434094 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.434140 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434312 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434451 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434567 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434620 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434730 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434808 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434873 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.434962 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.435024 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.435121 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.435184 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:08 crc kubenswrapper[3552]: E0320 15:26:08.435238 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.530127 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:08 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:08 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:08 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:08 crc kubenswrapper[3552]: I0320 15:26:08.530592 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.430019 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.430199 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.430287 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.430361 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.430815 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.431257 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.431322 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.431331 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.431349 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.431551 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.431582 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.431616 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.431848 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.431869 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.432008 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.432103 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.432154 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.432269 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.432444 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.432532 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.432570 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.432695 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.432774 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.432700 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.432965 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433050 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433196 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433275 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433362 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.433437 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433541 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433619 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433706 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:09 crc kubenswrapper[3552]: E0320 15:26:09.433817 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.530023 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:09 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:09 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:09 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:09 crc kubenswrapper[3552]: I0320 15:26:09.530395 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.429780 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.429850 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.430816 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.429896 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.429913 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.429958 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.429975 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430016 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430063 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430074 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430083 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430134 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430147 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430180 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430196 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430200 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430199 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430206 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430218 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430236 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430237 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430255 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430271 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430269 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430269 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430308 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430333 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430360 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.430380 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.432075 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.432319 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.432393 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.432552 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.432746 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.432986 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433147 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433325 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433482 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433619 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433721 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433815 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.433942 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434050 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434239 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434386 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434556 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434637 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434774 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.434938 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.435102 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.435280 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.435425 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.435591 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.435791 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.435912 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.436106 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:10 crc kubenswrapper[3552]: E0320 15:26:10.436258 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.529769 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:10 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:10 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:10 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.529876 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.996966 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/6.log" Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.997038 3552 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="f403c1698de4def7aadd43b02d3a259f8649cf04c13ee0e528df40e0215d2870" exitCode=1 Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.997071 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"f403c1698de4def7aadd43b02d3a259f8649cf04c13ee0e528df40e0215d2870"} Mar 20 15:26:10 crc kubenswrapper[3552]: I0320 15:26:10.997639 3552 scope.go:117] "RemoveContainer" containerID="f403c1698de4def7aadd43b02d3a259f8649cf04c13ee0e528df40e0215d2870" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430248 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430699 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430731 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430286 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430779 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430396 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430453 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430807 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430485 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430518 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430831 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430573 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430642 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.430902 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.436776 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.436893 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.436943 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.436978 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.437059 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.437159 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.437708 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.437834 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.437964 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438075 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438197 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438325 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438484 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438622 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438784 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.438911 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.439023 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.439163 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:11 crc kubenswrapper[3552]: E0320 15:26:11.439500 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.529868 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:11 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:11 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:11 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:11 crc kubenswrapper[3552]: I0320 15:26:11.529966 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.003226 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/6.log" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.003289 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"c9e1f7b73e2a30435d1c71c755106c2fc6a4afcec0814197636221fc59b957df"} Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430160 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430196 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430241 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430287 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430218 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430336 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430237 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430346 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430341 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430483 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430684 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.430698 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430695 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430721 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430754 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430774 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430794 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.430879 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.430880 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431026 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.431034 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431099 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.431131 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431161 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431184 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431204 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431243 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431240 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431251 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431379 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.431384 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431446 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.431497 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.431519 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.431720 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.431924 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432001 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.432038 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432118 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432236 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432331 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432557 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432622 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432634 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432692 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432839 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432906 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.432984 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433210 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433356 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433381 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433477 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433600 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433710 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433792 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433836 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433886 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:12 crc kubenswrapper[3552]: E0320 15:26:12.433997 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.529514 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:12 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:12 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:12 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:12 crc kubenswrapper[3552]: I0320 15:26:12.529613 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430385 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430501 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430575 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430644 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430664 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430690 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430705 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430757 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.430828 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430655 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430879 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.430926 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.431032 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.431054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.431059 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.431187 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.431292 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.431338 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.431336 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.431452 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.431491 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.431548 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.431711 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.431931 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432081 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432284 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432460 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432576 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432663 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432835 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.432970 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.433108 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.433236 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:13 crc kubenswrapper[3552]: E0320 15:26:13.433381 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.530468 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:13 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:13 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:13 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:13 crc kubenswrapper[3552]: I0320 15:26:13.531519 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430595 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430652 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430676 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430696 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430668 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430874 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430604 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430625 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430911 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430914 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430982 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430595 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431022 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430643 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431056 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430939 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431084 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431094 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430966 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431128 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.431025 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430915 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431167 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.430847 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431229 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431162 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431179 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431270 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431193 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.431211 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.431646 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.431736 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.432332 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.432372 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.432691 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.432820 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.432959 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433068 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433175 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433266 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433364 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433503 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433589 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433727 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433799 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433880 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.433987 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434098 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434248 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434335 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434430 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434627 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434739 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434791 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434838 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.434947 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:14 crc kubenswrapper[3552]: E0320 15:26:14.435048 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.530197 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:14 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:14 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:14 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:14 crc kubenswrapper[3552]: I0320 15:26:14.530321 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430394 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430518 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430467 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430540 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430608 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430799 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.430815 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.430890 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431148 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431171 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431190 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431386 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431478 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431397 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431510 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431579 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431531 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431685 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431695 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431758 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431809 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.431870 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.431994 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.432167 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.432238 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.432302 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.432555 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.432903 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.433022 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.432936 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.433091 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.433259 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:15 crc kubenswrapper[3552]: E0320 15:26:15.433313 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.530110 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:15 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:15 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:15 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:15 crc kubenswrapper[3552]: I0320 15:26:15.530641 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430214 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430264 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.430470 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430483 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430678 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.430807 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430827 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.430976 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431073 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431092 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431108 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431168 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431234 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431248 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431301 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431249 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431341 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431474 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431491 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431634 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431692 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431763 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431770 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431805 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.431842 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431881 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431910 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.431983 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432060 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432137 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432218 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432270 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432361 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432385 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432426 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432621 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432659 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432758 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432810 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.432833 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.432998 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433047 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433347 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433379 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433514 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433646 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433799 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433828 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.433916 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.434036 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.434146 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.434274 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.434432 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:16 crc kubenswrapper[3552]: E0320 15:26:16.434524 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.530693 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:16 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:16 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:16 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:16 crc kubenswrapper[3552]: I0320 15:26:16.530802 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.430869 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431336 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.431362 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431578 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431680 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.431840 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431879 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431911 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431925 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431962 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.431981 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432014 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432032 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432011 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432073 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432131 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432107 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.432168 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.432197 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.432303 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.432563 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.432837 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.432925 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433144 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433326 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433539 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433609 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433735 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433934 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.433967 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.434117 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.434291 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:17 crc kubenswrapper[3552]: E0320 15:26:17.434358 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.529694 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:17 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:17 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:17 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:17 crc kubenswrapper[3552]: I0320 15:26:17.529796 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430493 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430604 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430696 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430727 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430773 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430715 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430852 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430864 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430751 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.430897 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430935 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430956 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430971 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431019 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431037 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430963 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430577 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431071 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431070 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430719 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431122 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431118 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431183 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431072 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430873 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430543 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.430750 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.431375 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.431639 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.431679 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.431825 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.431907 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.432134 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.432356 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.432542 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.432677 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.432823 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.432966 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433096 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433273 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433341 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433500 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433516 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433634 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433699 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433779 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.433980 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434034 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434168 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434315 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434556 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434574 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434630 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434739 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434815 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:18 crc kubenswrapper[3552]: E0320 15:26:18.434907 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.530981 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:18 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:18 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:18 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:18 crc kubenswrapper[3552]: I0320 15:26:18.531129 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430545 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430695 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430645 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430735 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430826 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430850 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430893 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430736 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430918 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430929 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430864 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430974 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.430981 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.430980 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.431079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.431123 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.431229 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.431278 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.431331 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.431828 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.431937 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.432067 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.432219 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.432397 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.432638 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.432798 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.432942 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433122 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433230 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433370 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433577 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433751 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433777 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:19 crc kubenswrapper[3552]: E0320 15:26:19.433858 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.529329 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:19 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:19 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:19 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:19 crc kubenswrapper[3552]: I0320 15:26:19.529483 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430304 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430389 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430492 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430520 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430520 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430575 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430651 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430391 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.430894 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431239 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431313 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431349 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431377 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.431461 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.431467 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.431496 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.431627 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431736 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431735 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431810 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431874 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431879 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431905 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431923 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.431941 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431982 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.431988 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432020 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432072 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.432089 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.432123 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.432139 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432205 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.432278 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432378 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432544 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432652 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432761 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432875 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.432965 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.433005 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433136 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433302 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433354 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433440 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.433485 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433532 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433725 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.433951 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434093 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434168 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434277 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434484 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434575 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434742 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:20 crc kubenswrapper[3552]: E0320 15:26:20.434867 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.529472 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:20 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:20 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:20 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:20 crc kubenswrapper[3552]: I0320 15:26:20.529577 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.429920 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.429936 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430095 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430050 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430111 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430220 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430295 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430338 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430023 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430218 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430309 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430454 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430382 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430495 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.430523 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.435315 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.435530 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.435676 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.435758 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.435981 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.436259 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.436533 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.436791 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437110 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437257 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437361 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437689 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437715 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437790 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.437889 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.438041 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.438265 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:21 crc kubenswrapper[3552]: E0320 15:26:21.438433 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.529670 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:21 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:21 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:21 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:21 crc kubenswrapper[3552]: I0320 15:26:21.529812 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429812 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429880 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429900 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429963 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429981 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430011 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429923 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429914 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.429968 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.430140 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430178 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430214 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430225 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430233 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430297 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430272 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430308 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430185 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430459 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430489 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.430491 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430541 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.430655 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430720 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.430794 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.430843 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431029 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.431047 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431205 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431350 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431487 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.431538 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431621 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.431670 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.431675 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431787 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.431907 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432015 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432094 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.432130 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.432185 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432245 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432322 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432426 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432496 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.432535 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432582 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432641 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432718 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432854 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.432909 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.433113 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.433150 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.433188 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.433328 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.433358 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:22 crc kubenswrapper[3552]: E0320 15:26:22.433480 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.529930 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:22 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:22 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:22 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:22 crc kubenswrapper[3552]: I0320 15:26:22.530018 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430576 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.431114 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430644 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430656 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.431596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430739 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.431820 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430802 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430822 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.431987 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430855 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432109 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430880 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432258 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430898 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430938 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432365 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430939 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.430974 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.431020 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.431041 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432571 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432643 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.431075 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432762 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.432868 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433002 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433153 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433283 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433437 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433601 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433733 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:23 crc kubenswrapper[3552]: E0320 15:26:23.433853 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.531063 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:23 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:23 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:23 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:23 crc kubenswrapper[3552]: I0320 15:26:23.531157 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.430102 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.430118 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.430990 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.431444 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.431527 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.431618 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.431644 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.431706 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.431725 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.431795 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.431820 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.431878 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.431894 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.431949 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.431971 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432042 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432062 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432120 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432138 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432195 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432219 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432276 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432298 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432355 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432388 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432435 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432477 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432505 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432530 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432553 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432646 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432669 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432711 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432735 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432767 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432789 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432840 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432868 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432892 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432921 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.432935 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.432983 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433003 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.433033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.433062 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433066 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433164 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433304 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433372 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433553 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433611 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433701 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433764 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.433852 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.437650 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:24 crc kubenswrapper[3552]: E0320 15:26:24.438270 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.531138 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:24 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:24 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:24 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:24 crc kubenswrapper[3552]: I0320 15:26:24.532006 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430353 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430458 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430477 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430580 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430624 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430739 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.430980 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.431030 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.431003 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.431128 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.431347 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.431422 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.431478 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.431494 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.431627 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.431636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.431760 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.432025 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.432133 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.432385 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.432518 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.432632 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.432759 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.433010 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.433234 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.433722 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.433829 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.433902 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.434162 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.437498 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.437818 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:25 crc kubenswrapper[3552]: E0320 15:26:25.438130 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.531099 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:25 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:25 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:25 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:25 crc kubenswrapper[3552]: I0320 15:26:25.531282 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.157779 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.158211 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.158508 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.158385811 +0000 UTC m=+149.852082681 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.161139 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.161391 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.161579 3552 configmap.go:199] Couldn't get configMap openshift-console/trusted-ca-bundle: object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.161649 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.161669 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.1616444 +0000 UTC m=+149.855341440 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.161765 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.161788 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.161914 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.161882526 +0000 UTC m=+149.855579366 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"kube-rbac-proxy" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.161508 3552 configmap.go:199] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.161974 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162007 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-session: object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.162036 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162079 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.162065791 +0000 UTC m=+149.855762871 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"machine-config-operator-images" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162108 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.162095582 +0000 UTC m=+149.855792652 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-session" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162108 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/audit-1: object "openshift-apiserver"/"audit-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162157 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.162148873 +0000 UTC m=+149.855845713 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"audit-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162164 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/image-import-ca: object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.162259 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.162231176 +0000 UTC m=+149.855928156 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"image-import-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.263687 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.263785 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.264188 3552 projected.go:294] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.264279 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7: object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.264475 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.264366232 +0000 UTC m=+149.958063202 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.264847 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.264824635 +0000 UTC m=+149.958521495 (durationBeforeRetry 1m4s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.265376 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.265556 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.265702 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.265676418 +0000 UTC m=+149.959373248 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"service-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.266042 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.266166 3552 configmap.go:199] Couldn't get configMap openshift-console/oauth-serving-cert: object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.266392 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.266361567 +0000 UTC m=+149.960058517 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"oauth-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.266822 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.266922 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.266978 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267023 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267091 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267119 3552 projected.go:294] Couldn't get configMap openshift-service-ca/kube-root-ca.crt: object "openshift-service-ca"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267130 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267146 3552 projected.go:294] Couldn't get configMap openshift-service-ca/openshift-service-ca.crt: object "openshift-service-ca"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267162 3552 projected.go:200] Error preparing data for projected volume kube-api-access-pvn6z for pod openshift-service-ca/service-ca-cd974775-4nsv5: [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267189 3552 configmap.go:199] Couldn't get configMap openshift-console/service-ca: object "openshift-console"/"service-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267162 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267143838 +0000 UTC m=+149.960840698 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-idp-0-file-data" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267236 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267239 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.26722423 +0000 UTC m=+149.960921250 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"service-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267283 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267298 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/serving-cert: object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267319 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267369 3552 configmap.go:199] Couldn't get configMap openshift-console-operator/console-operator-config: object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267374 3552 configmap.go:199] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267461 3552 configmap.go:199] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267374 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267284062 +0000 UTC m=+149.960980932 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-pvn6z" (UniqueName: "kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : [object "openshift-service-ca"/"kube-root-ca.crt" not registered, object "openshift-service-ca"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267598 3552 secret.go:194] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267594 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267630 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.26760744 +0000 UTC m=+149.961304300 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.267662 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267743 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267734944 +0000 UTC m=+149.961431774 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-error" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267759 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267753484 +0000 UTC m=+149.961450314 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"console-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267761 3552 configmap.go:199] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267769 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267764415 +0000 UTC m=+149.961461245 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267856 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267838827 +0000 UTC m=+149.961535697 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267897 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267882828 +0000 UTC m=+149.961579688 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-operator-metrics" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.267933 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.267919549 +0000 UTC m=+149.961616419 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369540 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369577 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369614 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369649 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369680 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369711 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.369733 3552 secret.go:194] Couldn't get secret openshift-console/console-serving-cert: object "openshift-console"/"console-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.369820 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/config: object "openshift-route-controller-manager"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.369860 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.36983094 +0000 UTC m=+150.063527800 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.369745 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.369962 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.369925862 +0000 UTC m=+150.063622822 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.369971 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/kube-root-ca.crt: object "openshift-ingress-canary"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370011 3552 secret.go:194] Couldn't get secret openshift-apiserver/serving-cert: object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370100 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370013 3552 projected.go:294] Couldn't get configMap openshift-ingress-canary/openshift-service-ca.crt: object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370132 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370149 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hjlhw for pod openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370157 3552 secret.go:194] Couldn't get secret openshift-route-controller-manager/serving-cert: object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370177 3552 secret.go:194] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370111 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370083027 +0000 UTC m=+150.063780007 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.370171 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370311 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370277252 +0000 UTC m=+150.063974192 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hjlhw" (UniqueName: "kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370358 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370335864 +0000 UTC m=+150.064032764 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370149 3552 projected.go:200] Error preparing data for projected volume kube-api-access-dt5cx for pod openshift-ingress-canary/ingress-canary-2vhcn: [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370398 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert podName:ed024e5d-8fc2-4c22-803d-73f3c9795f19 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370379305 +0000 UTC m=+150.064076305 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert") pod "kube-apiserver-operator-78d54458c4-sc8h7" (UID: "ed024e5d-8fc2-4c22-803d-73f3c9795f19") : object "openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.369966 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-router-certs: object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370560 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx podName:0b5d722a-1123-4935-9740-52a08d018bc9 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370528609 +0000 UTC m=+150.064225509 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-dt5cx" (UniqueName: "kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx") pod "ingress-canary-2vhcn" (UID: "0b5d722a-1123-4935-9740-52a08d018bc9") : [object "openshift-ingress-canary"/"kube-root-ca.crt" not registered, object "openshift-ingress-canary"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370620 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370593441 +0000 UTC m=+150.064290371 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-router-certs" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-router-certs" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.370567 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370668 3552 configmap.go:199] Couldn't get configMap openshift-machine-api/machine-api-operator-images: object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.370742 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370748 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370722634 +0000 UTC m=+150.064419494 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-images" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.370834 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370881 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370915 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.370927 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370938 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tf29r for pod openshift-marketplace/redhat-marketplace-8s8pc: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.370947 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371111 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371125 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ncrf5 for pod openshift-marketplace/certified-operators-7287f: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371134 3552 secret.go:194] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371005 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r podName:c782cf62-a827-4677-b3c2-6f82c5f09cbb nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.370985391 +0000 UTC m=+150.064682361 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tf29r" (UniqueName: "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r") pod "redhat-marketplace-8s8pc" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371064 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371325 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371325 3552 projected.go:294] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371364 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371361 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kp86 for pod openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg: [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371422 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371457 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371482 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86 podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.371457384 +0000 UTC m=+150.065154384 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kp86" (UniqueName: "kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : [object "openshift-cluster-samples-operator"/"kube-root-ca.crt" not registered, object "openshift-cluster-samples-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371528 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/kube-root-ca.crt: object "openshift-ingress-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371543 3552 projected.go:294] Couldn't get configMap openshift-ingress-operator/openshift-service-ca.crt: object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371553 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tl5kg for pod openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t: [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371556 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371591 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.371580087 +0000 UTC m=+150.065276927 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tl5kg" (UniqueName: "kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : [object "openshift-ingress-operator"/"kube-root-ca.crt" not registered, object "openshift-ingress-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371625 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371659 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371684 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371739 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371753 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.371731812 +0000 UTC m=+150.065428842 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-service-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371795 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.371771343 +0000 UTC m=+150.065468403 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371810 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/kube-root-ca.crt: object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371825 3552 projected.go:294] Couldn't get configMap openshift-service-ca-operator/openshift-service-ca.crt: object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371692 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371848 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.371838014 +0000 UTC m=+150.065534854 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371908 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.371910 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371925 3552 projected.go:294] Couldn't get configMap openshift-oauth-apiserver/openshift-service-ca.crt: object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371937 3552 projected.go:200] Error preparing data for projected volume kube-api-access-4w8wh for pod openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd: [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371967 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.371959828 +0000 UTC m=+150.065656668 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-4w8wh" (UniqueName: "kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : [object "openshift-oauth-apiserver"/"kube-root-ca.crt" not registered, object "openshift-oauth-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.372013 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372065 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372079 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372088 3552 projected.go:200] Error preparing data for projected volume kube-api-access-76gl8 for pod openshift-network-diagnostics/network-check-target-v54bt: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.372094 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372119 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8 podName:34a48baf-1bee-4921-8bb2-9b7320e76f79 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372106102 +0000 UTC m=+150.065802942 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-76gl8" (UniqueName: "kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8") pod "network-check-target-v54bt" (UID: "34a48baf-1bee-4921-8bb2-9b7320e76f79") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372165 3552 secret.go:194] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.372169 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372192 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372184584 +0000 UTC m=+150.065881424 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372234 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372265 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372257756 +0000 UTC m=+150.065954596 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.372297 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.372330 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372351 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372426 3552 secret.go:194] Couldn't get secret openshift-dns/dns-default-metrics-tls: object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372460 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372437621 +0000 UTC m=+150.066134631 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372483 3552 configmap.go:199] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372497 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372480032 +0000 UTC m=+150.066177072 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default-metrics-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.371834 3552 projected.go:200] Error preparing data for projected volume kube-api-access-d9vhj for pod openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz: [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372519 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372510143 +0000 UTC m=+150.066206983 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : object "openshift-marketplace"/"marketplace-trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372572 3552 secret.go:194] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372586 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372565614 +0000 UTC m=+150.066262634 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d9vhj" (UniqueName: "kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : [object "openshift-service-ca-operator"/"kube-root-ca.crt" not registered, object "openshift-service-ca-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372627 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls podName:f728c15e-d8de-4a9a-a3ea-fdcead95cb91 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372608425 +0000 UTC m=+150.066305485 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls") pod "cluster-samples-operator-bc474d5d6-wshwg" (UID: "f728c15e-d8de-4a9a-a3ea-fdcead95cb91") : object "openshift-cluster-samples-operator"/"samples-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372631 3552 secret.go:194] Couldn't get secret openshift-config-operator/config-operator-serving-cert: object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372664 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372656477 +0000 UTC m=+150.066353317 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : object "openshift-config-operator"/"config-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372017 3552 secret.go:194] Couldn't get secret openshift-authentication-operator/serving-cert: object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.372364 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372748 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372735029 +0000 UTC m=+150.066432079 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372746 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/config: object "openshift-controller-manager"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372799 3552 configmap.go:199] Couldn't get configMap openshift-service-ca/signing-cabundle: object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372809 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.37279351 +0000 UTC m=+150.066490560 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372861 3552 configmap.go:199] Couldn't get configMap openshift-dns/dns-default: object "openshift-dns"/"dns-default" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.372960 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume podName:13045510-8717-4a71-ade4-be95a76440a7 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.372935324 +0000 UTC m=+150.066632284 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume") pod "dns-default-gbw49" (UID: "13045510-8717-4a71-ade4-be95a76440a7") : object "openshift-dns"/"dns-default" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373090 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373075098 +0000 UTC m=+150.066771938 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-cabundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373108 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373116 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5 podName:887d596e-c519-4bfa-af90-3edd9e1b2f0f nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373107449 +0000 UTC m=+150.066804289 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ncrf5" (UniqueName: "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5") pod "certified-operators-7287f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373073 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373188 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373289 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373297 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/kube-root-ca.crt: object "openshift-etcd-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373317 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373333 3552 projected.go:200] Error preparing data for projected volume kube-api-access-55f7t for pod openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373340 3552 projected.go:294] Couldn't get configMap openshift-etcd-operator/openshift-service-ca.crt: object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373364 3552 projected.go:200] Error preparing data for projected volume kube-api-access-9724w for pod openshift-etcd-operator/etcd-operator-768d5b5d86-722mg: [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373374 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373360066 +0000 UTC m=+150.067056906 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-55f7t" (UniqueName: "kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373309 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373442 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373481 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373457088 +0000 UTC m=+150.067153958 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-9724w" (UniqueName: "kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : [object "openshift-etcd-operator"/"kube-root-ca.crt" not registered, object "openshift-etcd-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373524 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls podName:297ab9b6-2186-4d5b-a952-2bfd59af63c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.37350169 +0000 UTC m=+150.067198650 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls") pod "machine-config-controller-6df6df6b6b-58shh" (UID: "297ab9b6-2186-4d5b-a952-2bfd59af63c4") : object "openshift-machine-config-operator"/"mcc-proxy-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373541 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373563 3552 projected.go:200] Error preparing data for projected volume kube-api-access-ptdrb for pod openshift-marketplace/redhat-operators-f4jkp: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373601 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373629 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb podName:4092a9f8-5acc-4932-9e90-ef962eeb301a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373609673 +0000 UTC m=+150.067306713 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ptdrb" (UniqueName: "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb") pod "redhat-operators-f4jkp" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373703 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373708 3552 secret.go:194] Couldn't get secret openshift-controller-manager/serving-cert: object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373767 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373752646 +0000 UTC m=+150.067449646 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373780 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373816 3552 secret.go:194] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373851 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.373870 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373878 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373861349 +0000 UTC m=+150.067558379 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : object "openshift-machine-api"/"control-plane-machine-set-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373944 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.373921361 +0000 UTC m=+150.067618381 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373967 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.373988 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374002 3552 projected.go:200] Error preparing data for projected volume kube-api-access-n6sqt for pod openshift-marketplace/community-operators-8jhz6: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374007 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374045 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt podName:3f4dca86-e6ee-4ec9-8324-86aff960225e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374032454 +0000 UTC m=+150.067729294 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-n6sqt" (UniqueName: "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt") pod "community-operators-8jhz6" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374104 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374128 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-client: object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374146 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374222 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374261 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.37423879 +0000 UTC m=+150.067935810 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-client" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374270 3552 secret.go:194] Couldn't get secret openshift-image-registry/installation-pull-secrets: object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374266 3552 secret.go:194] Couldn't get secret openshift-console/console-oauth-config: object "openshift-console"/"console-oauth-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374303 3552 secret.go:194] Couldn't get secret openshift-ingress-operator/metrics-tls: object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374310 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374341 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374320592 +0000 UTC m=+150.068017612 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "installation-pull-secrets" (UniqueName: "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"installation-pull-secrets" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374390 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374424 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374391 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374371923 +0000 UTC m=+150.068068993 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"metrics-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374452 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374468 3552 projected.go:200] Error preparing data for projected volume kube-api-access-5rpl7 for pod openshift-console-operator/console-operator-5dbbc74dc9-cp5cd: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374474 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374453125 +0000 UTC m=+150.068150205 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-oauth-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374511 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: object "openshift-authentication-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374538 3552 projected.go:294] Couldn't get configMap openshift-authentication-operator/openshift-service-ca.crt: object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374555 3552 projected.go:200] Error preparing data for projected volume kube-api-access-j7zrh for pod openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8: [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374517 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7 podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374500767 +0000 UTC m=+150.068197807 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-5rpl7" (UniqueName: "kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374646 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374664 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374639051 +0000 UTC m=+150.068335921 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-j7zrh" (UniqueName: "kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : [object "openshift-authentication-operator"/"kube-root-ca.crt" not registered, object "openshift-authentication-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374716 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374739 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374758 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.374744303 +0000 UTC m=+150.068441323 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"encryption-config-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374900 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374924 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374967 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.374981 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.374988 3552 projected.go:200] Error preparing data for projected volume kube-api-access-wrd8h for pod openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375010 3552 secret.go:194] Couldn't get secret openshift-machine-api/machine-api-operator-tls: object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375056 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls podName:4f8aa612-9da0-4a2b-911e-6a1764a4e74e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375042021 +0000 UTC m=+150.068739042 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls") pod "machine-api-operator-788b7c6b6c-ctdmb" (UID: "4f8aa612-9da0-4a2b-911e-6a1764a4e74e") : object "openshift-machine-api"/"machine-api-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375089 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375075392 +0000 UTC m=+150.068772472 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-wrd8h" (UniqueName: "kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375059 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375156 3552 projected.go:294] Couldn't get configMap openshift-kube-scheduler-operator/kube-root-ca.crt: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375178 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7: object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375179 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375215 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375230 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access podName:71af81a9-7d43-49b2-9287-c375900aa905 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375214316 +0000 UTC m=+150.068911286 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access") pod "openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" (UID: "71af81a9-7d43-49b2-9287-c375900aa905") : object "openshift-kube-scheduler-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375237 3552 projected.go:200] Error preparing data for projected volume kube-api-access-2zpsk for pod openshift-console/downloads-65476884b9-9wcvx: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375245 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375327 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375366 3552 configmap.go:199] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375388 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375430 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config podName:ebf09b15-4bb1-44bf-9d54-e76fad5cf76e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375391671 +0000 UTC m=+150.069088511 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config") pod "authentication-operator-7cc7ff75d5-g9qv8" (UID: "ebf09b15-4bb1-44bf-9d54-e76fad5cf76e") : object "openshift-authentication-operator"/"authentication-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375435 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375529 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375535 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk podName:6268b7fe-8910-4505-b404-6f1df638105c nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375511334 +0000 UTC m=+150.069208374 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-2zpsk" (UniqueName: "kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk") pod "downloads-65476884b9-9wcvx" (UID: "6268b7fe-8910-4505-b404-6f1df638105c") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375584 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375561416 +0000 UTC m=+150.069258436 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"trusted-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375650 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375714 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375701269 +0000 UTC m=+150.069398119 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-ocp-branding-template" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375722 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375803 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375924 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375941 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.375914265 +0000 UTC m=+150.069611135 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375960 3552 projected.go:294] Couldn't get configMap openshift-route-controller-manager/openshift-service-ca.crt: object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.375804 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375981 3552 projected.go:200] Error preparing data for projected volume kube-api-access-g9kp4 for pod openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx: [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376041 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376050 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4 podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376028788 +0000 UTC m=+150.069725788 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-g9kp4" (UniqueName: "kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : [object "openshift-route-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-route-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.375838 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/config: object "openshift-apiserver"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376116 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376119 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376172 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376145501 +0000 UTC m=+150.069842381 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376211 3552 configmap.go:199] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376215 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376192833 +0000 UTC m=+150.069889893 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : object "openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376276 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376257705 +0000 UTC m=+150.069954575 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376295 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376373 3552 configmap.go:199] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376471 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.37645109 +0000 UTC m=+150.070148000 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"service-ca-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376539 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376557 3552 secret.go:194] Couldn't get secret openshift-apiserver/etcd-client: object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376657 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376606064 +0000 UTC m=+150.070302954 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-client" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376661 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376757 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376827 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376806199 +0000 UTC m=+150.070503169 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-login" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376763 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376851 3552 secret.go:194] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376917 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.376921 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.376900362 +0000 UTC m=+150.070597282 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-user-template-provider-selection" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.376985 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377054 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377059 3552 projected.go:294] Couldn't get configMap openshift-console/kube-root-ca.crt: object "openshift-console"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377096 3552 projected.go:294] Couldn't get configMap openshift-console/openshift-service-ca.crt: object "openshift-console"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377118 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nkbdt for pod openshift-console/console-8568c59db8-fspjn: [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377133 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377184 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.377163349 +0000 UTC m=+150.070860359 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-nkbdt" (UniqueName: "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : [object "openshift-console"/"kube-root-ca.crt" not registered, object "openshift-console"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377234 3552 configmap.go:199] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377247 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377299 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.377278912 +0000 UTC m=+150.070975812 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"v4-0-config-system-cliconfig" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377361 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377477 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377495 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377551 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377565 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.377543399 +0000 UTC m=+150.071240329 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377620 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377680 3552 projected.go:294] Couldn't get configMap openshift-authentication/kube-root-ca.crt: object "openshift-authentication"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377696 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377712 3552 projected.go:294] Couldn't get configMap openshift-authentication/openshift-service-ca.crt: object "openshift-authentication"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377734 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vrvxn for pod openshift-authentication/oauth-openshift-6499cf79cf-qdfbh: [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377765 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377797 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.377777416 +0000 UTC m=+150.071474366 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrvxn" (UniqueName: "kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : [object "openshift-authentication"/"kube-root-ca.crt" not registered, object "openshift-authentication"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377864 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.377936 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.378009 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378025 3552 secret.go:194] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.378079 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378095 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs podName:a702c6d2-4dde-4077-ab8c-0f8df804bf7a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378071684 +0000 UTC m=+150.071768624 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs") pod "network-metrics-daemon-qdfr4" (UID: "a702c6d2-4dde-4077-ab8c-0f8df804bf7a") : object "openshift-multus"/"metrics-daemon-secret" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377366 3552 secret.go:194] Couldn't get secret openshift-multus/multus-admission-controller-secret: object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.378151 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378171 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs podName:d5025cb4-ddb0-4107-88c1-bcbcdb779ac0 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378151056 +0000 UTC m=+150.071848006 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs") pod "multus-admission-controller-6c7c885997-4hbbc" (UID: "d5025cb4-ddb0-4107-88c1-bcbcdb779ac0") : object "openshift-multus"/"multus-admission-controller-secret" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.378233 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378250 3552 configmap.go:199] Couldn't get configMap openshift-ingress-operator/trusted-ca: object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378316 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca podName:7d51f445-054a-4e4f-a67b-a828f5a32511 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.3782961 +0000 UTC m=+150.071992980 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca") pod "ingress-operator-7d46d5bb6d-rrg6t" (UID: "7d51f445-054a-4e4f-a67b-a828f5a32511") : object "openshift-ingress-operator"/"trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.378317 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378463 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378535 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator-operator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378558 3552 projected.go:200] Error preparing data for projected volume kube-api-access-6kgvs for pod openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr: [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378583 3552 secret.go:194] Couldn't get secret openshift-service-ca/signing-key: object "openshift-service-ca"/"signing-key" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378628 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378607518 +0000 UTC m=+150.072304488 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-6kgvs" (UniqueName: "kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : [object "openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378665 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key podName:6639609b-906b-4193-883e-ed1160aa5d50 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.37864696 +0000 UTC m=+150.072343880 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key") pod "service-ca-cd974775-4nsv5" (UID: "6639609b-906b-4193-883e-ed1160aa5d50") : object "openshift-service-ca"/"signing-key" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378721 3552 secret.go:194] Couldn't get secret openshift-console-operator/serving-cert: object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378751 3552 configmap.go:199] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378782 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert podName:e9127708-ccfd-4891-8a3a-f0cacb77e0f4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378761213 +0000 UTC m=+150.072458113 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert") pod "console-operator-5dbbc74dc9-cp5cd" (UID: "e9127708-ccfd-4891-8a3a-f0cacb77e0f4") : object "openshift-console-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378818 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378800354 +0000 UTC m=+150.072497284 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377867 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/client-ca: object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378889 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378868166 +0000 UTC m=+150.072565196 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"client-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378894 3552 projected.go:294] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378927 3552 projected.go:200] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb: object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378980 3552 secret.go:194] Couldn't get secret openshift-apiserver/encryption-config-1: object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.378986 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access podName:c1620f19-8aa3-45cf-931b-7ae0e5cd14cf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.378968078 +0000 UTC m=+150.072664988 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access") pod "kube-controller-manager-operator-6f6cb54958-rbddb" (UID: "c1620f19-8aa3-45cf-931b-7ae0e5cd14cf") : object "openshift-kube-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379060 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379062 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.37904204 +0000 UTC m=+150.072738970 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"encryption-config-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379131 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379111562 +0000 UTC m=+150.072808622 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.377938 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379165 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379195 3552 projected.go:294] Couldn't get configMap openshift-kube-storage-version-migrator/openshift-service-ca.crt: object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379219 3552 projected.go:200] Error preparing data for projected volume kube-api-access-hqmhq for pod openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv: [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379202 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379181494 +0000 UTC m=+150.072878434 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-service-ca-bundle" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379286 3552 secret.go:194] Couldn't get secret openshift-dns-operator/metrics-tls: object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379297 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq podName:cf1a8966-f594-490a-9fbb-eec5bafd13d3 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379277747 +0000 UTC m=+150.072974657 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-hqmhq" (UniqueName: "kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq") pod "migrator-f7c6d88df-q2fnv" (UID: "cf1a8966-f594-490a-9fbb-eec5bafd13d3") : [object "openshift-kube-storage-version-migrator"/"kube-root-ca.crt" not registered, object "openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379345 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379327478 +0000 UTC m=+150.073024418 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : object "openshift-dns-operator"/"metrics-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379392 3552 secret.go:194] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379474 3552 secret.go:194] Couldn't get secret openshift-service-ca-operator/serving-cert: object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379488 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379468452 +0000 UTC m=+150.073165352 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : object "openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.378497 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379543 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert podName:6d67253e-2acd-4bc1-8185-793587da4f17 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379520913 +0000 UTC m=+150.073217833 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert") pod "service-ca-operator-546b4f8984-pwccz" (UID: "6d67253e-2acd-4bc1-8185-793587da4f17") : object "openshift-service-ca-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.379613 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.379688 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.379763 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379824 3552 projected.go:294] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: object "openshift-machine-api"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379861 3552 projected.go:294] Couldn't get configMap openshift-machine-api/openshift-service-ca.crt: object "openshift-machine-api"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379881 3552 projected.go:200] Error preparing data for projected volume kube-api-access-bm986 for pod openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw: [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379935 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379950 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986 podName:45a8038e-e7f2-4d93-a6f5-7753aa54e63f nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.379929544 +0000 UTC m=+150.073626414 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-bm986" (UniqueName: "kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986") pod "control-plane-machine-set-operator-649bd778b4-tt5tw" (UID: "45a8038e-e7f2-4d93-a6f5-7753aa54e63f") : [object "openshift-machine-api"/"kube-root-ca.crt" not registered, object "openshift-machine-api"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379966 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.379835 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.379990 3552 projected.go:200] Error preparing data for projected volume kube-api-access-tvc4r for pod openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.380055 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380060 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.380036697 +0000 UTC m=+150.073733567 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-tvc4r" (UniqueName: "kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380148 3552 secret.go:194] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380214 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert podName:9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.380193702 +0000 UTC m=+150.073890602 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert") pod "kube-storage-version-migrator-operator-686c6c748c-qbnnr" (UID: "9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7") : object "openshift-kube-storage-version-migrator-operator"/"serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380239 3552 secret.go:194] Couldn't get secret openshift-image-registry/image-registry-operator-tls: object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380307 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.380286904 +0000 UTC m=+150.073983854 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"image-registry-operator-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380318 3552 projected.go:269] Couldn't get secret openshift-image-registry/image-registry-tls: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380340 3552 projected.go:200] Error preparing data for projected volume registry-tls for pod openshift-image-registry/image-registry-6fbd648f87-j4bk5: object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380399 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls podName:5cad292d-912c-4787-a5fa-0ade98e731eb nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.380378857 +0000 UTC m=+150.074075957 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "registry-tls" (UniqueName: "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : object "openshift-image-registry"/"image-registry-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380398 3552 configmap.go:199] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.380152 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380516 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.38049546 +0000 UTC m=+150.074192370 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.380616 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.380699 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380801 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380837 3552 projected.go:294] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380856 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lx2h9 for pod openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m: [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380930 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9 podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.380909171 +0000 UTC m=+150.074606031 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-lx2h9" (UniqueName: "kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : [object "openshift-apiserver-operator"/"kube-root-ca.crt" not registered, object "openshift-apiserver-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.380860 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.380975 3552 projected.go:294] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: object "openshift-console-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381010 3552 projected.go:294] Couldn't get configMap openshift-console-operator/openshift-service-ca.crt: object "openshift-console-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381032 3552 projected.go:200] Error preparing data for projected volume kube-api-access-fqnmc for pod openshift-console-operator/console-conversion-webhook-595f9969b-l6z49: [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381103 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.381082066 +0000 UTC m=+150.074778936 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-fqnmc" (UniqueName: "kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : [object "openshift-console-operator"/"kube-root-ca.crt" not registered, object "openshift-console-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381107 3552 configmap.go:199] Couldn't get configMap openshift-authentication/audit: object "openshift-authentication"/"audit" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381180 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies podName:b61ce6b0-a70f-42b7-9435-3d6acba81ccf nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.381159498 +0000 UTC m=+150.074856518 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies") pod "oauth-openshift-6499cf79cf-qdfbh" (UID: "b61ce6b0-a70f-42b7-9435-3d6acba81ccf") : object "openshift-authentication"/"audit" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381032 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381201 3552 configmap.go:199] Couldn't get configMap openshift-apiserver/etcd-serving-ca: object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381266 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.38124626 +0000 UTC m=+150.074943260 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : object "openshift-apiserver"/"etcd-serving-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381267 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381360 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381461 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381468 3552 secret.go:194] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381541 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381547 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls podName:120b38dc-8236-4fa6-a452-642b8ad738ee nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.381526598 +0000 UTC m=+150.075223528 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls") pod "machine-config-operator-76788bff89-wkjgm" (UID: "120b38dc-8236-4fa6-a452-642b8ad738ee") : object "openshift-machine-config-operator"/"mco-proxy-tls" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381613 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381670 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381703 3552 projected.go:294] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381723 3552 projected.go:200] Error preparing data for projected volume kube-api-access-qcxcp for pod openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381767 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381790 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp podName:d0f40333-c860-4c04-8058-a0bf572dcf12 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.381768944 +0000 UTC m=+150.075465964 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-qcxcp" (UniqueName: "kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp") pod "network-check-source-5c5478f8c-vqvt7" (UID: "d0f40333-c860-4c04-8058-a0bf572dcf12") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381678 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381837 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert podName:c085412c-b875-46c9-ae3e-e6b0d8067091 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.381812606 +0000 UTC m=+150.075509476 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert") pod "olm-operator-6d8474f75f-x54mh" (UID: "c085412c-b875-46c9-ae3e-e6b0d8067091") : object "openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381928 3552 configmap.go:199] Couldn't get configMap openshift-controller-manager/openshift-global-ca: object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.381978 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.381994 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.38197502 +0000 UTC m=+150.075671910 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : object "openshift-controller-manager"/"openshift-global-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382068 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382083 3552 configmap.go:199] Couldn't get configMap openshift-route-controller-manager/client-ca: object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382147 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca podName:d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382126884 +0000 UTC m=+150.075823824 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca") pod "route-controller-manager-6f75dd68cc-gcdzx" (UID: "d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2") : object "openshift-route-controller-manager"/"client-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382149 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382236 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382261 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/kube-root-ca.crt: object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382293 3552 projected.go:294] Couldn't get configMap openshift-controller-manager-operator/openshift-service-ca.crt: object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382308 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382433 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382507 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382484384 +0000 UTC m=+150.076181254 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382567 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382640 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382667 3552 secret.go:194] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382689 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/audit-1: object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382729 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382739 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert podName:0b5c38ff-1fa8-4219-994d-15776acd4a4d nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.3827177 +0000 UTC m=+150.076414560 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert") pod "etcd-operator-768d5b5d86-722mg" (UID: "0b5c38ff-1fa8-4219-994d-15776acd4a4d") : object "openshift-etcd-operator"/"etcd-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382755 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382781 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382760411 +0000 UTC m=+150.076457391 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"audit-1" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382801 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382816 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382796662 +0000 UTC m=+150.076493672 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382843 3552 secret.go:194] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382859 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert podName:bd556935-a077-45df-ba3f-d42c39326ccd nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382843704 +0000 UTC m=+150.076540664 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert") pod "packageserver-8464bcc55b-sjnqz" (UID: "bd556935-a077-45df-ba3f-d42c39326ccd") : object "openshift-operator-lifecycle-manager"/"packageserver-service-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382864 3552 configmap.go:199] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382572 3552 secret.go:194] Couldn't get secret openshift-console-operator/webhook-serving-cert: object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.382865 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382920 3552 configmap.go:199] Couldn't get configMap openshift-image-registry/trusted-ca: object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382912 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert podName:8a5ae51d-d173-4531-8975-f164c975ce1f nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382891325 +0000 UTC m=+150.076588325 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert") pod "catalog-operator-857456c46-7f5wf" (UID: "8a5ae51d-d173-4531-8975-f164c975ce1f") : object "openshift-operator-lifecycle-manager"/"pprof-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382992 3552 configmap.go:199] Couldn't get configMap openshift-console/console-config: object "openshift-console"/"console-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382996 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.382971057 +0000 UTC m=+150.076668137 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-serving-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383094 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert podName:59748b9b-c309-4712-aa85-bb38d71c4915 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.38307153 +0000 UTC m=+150.076768610 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "webhook-serving-cert" (UniqueName: "kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert") pod "console-conversion-webhook-595f9969b-l6z49" (UID: "59748b9b-c309-4712-aa85-bb38d71c4915") : object "openshift-console-operator"/"webhook-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.382315 3552 projected.go:200] Error preparing data for projected volume kube-api-access-l8bxr for pod openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z: [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.383159 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383161 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr podName:0f394926-bdb9-425c-b36e-264d7fd34550 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.383148372 +0000 UTC m=+150.076845212 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-l8bxr" (UniqueName: "kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr") pod "openshift-controller-manager-operator-7978d7d7f6-2nt8z" (UID: "0f394926-bdb9-425c-b36e-264d7fd34550") : [object "openshift-controller-manager-operator"/"kube-root-ca.crt" not registered, object "openshift-controller-manager-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383226 3552 secret.go:194] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383261 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert podName:43ae1c37-047b-4ee2-9fee-41e337dd4ac8 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.383251595 +0000 UTC m=+150.076948435 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert") pod "openshift-apiserver-operator-7c88c4c865-kn67m" (UID: "43ae1c37-047b-4ee2-9fee-41e337dd4ac8") : object "openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.383386 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383444 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config podName:db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.383382008 +0000 UTC m=+150.077079018 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config") pod "console-8568c59db8-fspjn" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4") : object "openshift-console"/"console-config" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383468 3552 secret.go:194] Couldn't get secret openshift-oauth-apiserver/etcd-client: object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383493 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca podName:b54e8941-2fc4-432a-9e51-39684df9089e nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.383471401 +0000 UTC m=+150.077168401 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca") pod "cluster-image-registry-operator-7769bd8d7d-q5cvv" (UID: "b54e8941-2fc4-432a-9e51-39684df9089e") : object "openshift-image-registry"/"trusted-ca" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.383652 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client podName:5bacb25d-97b6-4491-8fb4-99feae1d802a nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.383640185 +0000 UTC m=+150.077337025 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client") pod "apiserver-69c565c9b6-vbdpd" (UID: "5bacb25d-97b6-4491-8fb4-99feae1d802a") : object "openshift-oauth-apiserver"/"etcd-client" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430286 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.430503 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430582 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430676 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430716 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.430725 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.430783 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430804 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430833 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.430875 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.431753 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.431755 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.431749 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.431850 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.431955 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.432060 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432124 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432190 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.432290 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432491 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.432584 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432637 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432640 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.432801 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432854 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.432918 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.433008 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.433044 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.433207 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.433453 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.433597 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.433670 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.433746 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.433884 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.433955 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.434049 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.434255 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434444 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434482 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.434575 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.434658 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.434741 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434762 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434775 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434795 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434802 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.434861 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435047 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435137 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435200 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435292 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435427 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435729 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435800 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435751 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435845 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.435902 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.485143 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.485198 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485357 3552 projected.go:294] Couldn't get configMap openshift-marketplace/kube-root-ca.crt: object "openshift-marketplace"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485382 3552 projected.go:294] Couldn't get configMap openshift-marketplace/openshift-service-ca.crt: object "openshift-marketplace"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485395 3552 projected.go:200] Error preparing data for projected volume kube-api-access-rg2zg for pod openshift-marketplace/marketplace-operator-8b455464d-f9xdt: [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485471 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg podName:3482be94-0cdb-4e2a-889b-e5fac59fdbf5 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.485455404 +0000 UTC m=+150.179152234 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-rg2zg" (UniqueName: "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg") pod "marketplace-operator-8b455464d-f9xdt" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5") : [object "openshift-marketplace"/"kube-root-ca.crt" not registered, object "openshift-marketplace"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485734 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/kube-root-ca.crt: object "hostpath-provisioner"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485807 3552 projected.go:294] Couldn't get configMap hostpath-provisioner/openshift-service-ca.crt: object "hostpath-provisioner"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.485839 3552 projected.go:200] Error preparing data for projected volume kube-api-access-vvtrv for pod hostpath-provisioner/csi-hostpathplugin-hvm8g: [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.486005 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv podName:12e733dd-0939-4f1b-9cbb-13897e093787 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.485961277 +0000 UTC m=+150.179658137 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-vvtrv" (UniqueName: "kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv") pod "csi-hostpathplugin-hvm8g" (UID: "12e733dd-0939-4f1b-9cbb-13897e093787") : [object "hostpath-provisioner"/"kube-root-ca.crt" not registered, object "hostpath-provisioner"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.530635 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:26 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:26 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:26 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.530760 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.587927 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.588053 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.588084 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588314 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/kube-root-ca.crt: object "openshift-dns-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588437 3552 projected.go:294] Couldn't get configMap openshift-dns-operator/openshift-service-ca.crt: object "openshift-dns-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588477 3552 projected.go:200] Error preparing data for projected volume kube-api-access-nf4t2 for pod openshift-dns-operator/dns-operator-75f687757b-nz2xb: [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588669 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2 podName:10603adc-d495-423c-9459-4caa405960bb nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.588590768 +0000 UTC m=+150.282287728 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-nf4t2" (UniqueName: "kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2") pod "dns-operator-75f687757b-nz2xb" (UID: "10603adc-d495-423c-9459-4caa405960bb") : [object "openshift-dns-operator"/"kube-root-ca.crt" not registered, object "openshift-dns-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588712 3552 projected.go:294] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: object "openshift-apiserver"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588718 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: object "openshift-controller-manager"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588739 3552 projected.go:294] Couldn't get configMap openshift-apiserver/openshift-service-ca.crt: object "openshift-apiserver"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588758 3552 projected.go:200] Error preparing data for projected volume kube-api-access-sc9fm for pod openshift-apiserver/apiserver-6cdf967d79-ffdf8: [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588759 3552 projected.go:294] Couldn't get configMap openshift-controller-manager/openshift-service-ca.crt: object "openshift-controller-manager"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588779 3552 projected.go:200] Error preparing data for projected volume kube-api-access-czvnk for pod openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh: [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.588843 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk podName:d3992789-6f8b-4806-8ce0-261a7623ca46 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.588818454 +0000 UTC m=+150.282515324 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-czvnk" (UniqueName: "kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk") pod "controller-manager-7fdc5fd4dd-zdxlh" (UID: "d3992789-6f8b-4806-8ce0-261a7623ca46") : [object "openshift-controller-manager"/"kube-root-ca.crt" not registered, object "openshift-controller-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.589006 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm podName:bab054c9-6c83-40ee-896d-6459b22a6b4b nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.588989429 +0000 UTC m=+150.282686459 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-sc9fm" (UniqueName: "kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm") pod "apiserver-6cdf967d79-ffdf8" (UID: "bab054c9-6c83-40ee-896d-6459b22a6b4b") : [object "openshift-apiserver"/"kube-root-ca.crt" not registered, object "openshift-apiserver"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.691294 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:26 crc kubenswrapper[3552]: I0320 15:26:26.691383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691515 3552 projected.go:294] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: object "openshift-config-operator"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691561 3552 projected.go:294] Couldn't get configMap openshift-config-operator/openshift-service-ca.crt: object "openshift-config-operator"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691577 3552 projected.go:200] Error preparing data for projected volume kube-api-access-8dcvj for pod openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc: [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691660 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj podName:530553aa-0a1d-423e-8a22-f5eb4bdbb883 nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.69163914 +0000 UTC m=+150.385336010 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-8dcvj" (UniqueName: "kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj") pod "openshift-config-operator-77658b5b66-dq5sc" (UID: "530553aa-0a1d-423e-8a22-f5eb4bdbb883") : [object "openshift-config-operator"/"kube-root-ca.crt" not registered, object "openshift-config-operator"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691729 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691797 3552 projected.go:294] Couldn't get configMap openshift-operator-lifecycle-manager/openshift-service-ca.crt: object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691826 3552 projected.go:200] Error preparing data for projected volume kube-api-access-x5d97 for pod openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2: [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:26 crc kubenswrapper[3552]: E0320 15:26:26.691975 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97 podName:63eb7413-02c3-4d6e-bb48-e5ffe5ce15be nodeName:}" failed. No retries permitted until 2026-03-20 15:27:30.691919617 +0000 UTC m=+150.385616487 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-x5d97" (UniqueName: "kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97") pod "package-server-manager-84d578d794-jw7r2" (UID: "63eb7413-02c3-4d6e-bb48-e5ffe5ce15be") : [object "openshift-operator-lifecycle-manager"/"kube-root-ca.crt" not registered, object "openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" not registered] Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.430672 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.430753 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.430710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.430810 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.431113 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.431186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.431357 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.431455 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431590 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.431673 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431810 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431899 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.431906 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431963 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431989 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431974 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.431995 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432100 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.432176 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.432200 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432267 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.432285 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432374 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.432455 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432504 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432558 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432675 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.432739 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432841 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.432906 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.433025 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.433134 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:27 crc kubenswrapper[3552]: E0320 15:26:27.433171 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.529366 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:27 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:27 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:27 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:27 crc kubenswrapper[3552]: I0320 15:26:27.529461 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.429998 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430027 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430138 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430167 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430210 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430335 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.430379 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430580 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430588 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.430693 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430770 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430778 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.430794 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430855 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430811 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430908 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430924 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430866 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430957 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430984 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430809 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431016 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431029 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431041 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431039 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431069 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430779 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.430874 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431116 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431262 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.431773 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.431931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.431961 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.432078 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.432353 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.432688 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.432866 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.433004 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.433139 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.433287 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.433663 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.433843 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.433853 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.434007 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.434149 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.434533 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.434705 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.434950 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435060 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435164 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435296 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435486 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435721 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.435847 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.436005 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:28 crc kubenswrapper[3552]: E0320 15:26:28.436085 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.529688 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:28 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:28 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:28 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.529802 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:28 crc kubenswrapper[3552]: I0320 15:26:28.777265 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" probeResult="failure" output="" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430801 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430878 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430930 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430904 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.430988 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.431021 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430876 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430894 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.430735 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.431463 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.431551 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.431583 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.431716 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.431945 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.432054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.432162 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.432197 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.432241 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.432252 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.432071 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.432355 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.431542 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.432545 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.432817 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.433029 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.433221 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.433445 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.433616 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.433760 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.433984 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.434168 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.434358 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:29 crc kubenswrapper[3552]: E0320 15:26:29.434547 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.529255 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:29 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:29 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:29 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:29 crc kubenswrapper[3552]: I0320 15:26:29.529444 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430117 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430199 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430275 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430230 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430287 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430387 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430141 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430459 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430245 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430493 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430250 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430263 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430291 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430305 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430233 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430808 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.430814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431125 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.430879 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.430981 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431037 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431271 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431060 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431356 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431240 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431435 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431466 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431506 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431509 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.431573 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431622 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431729 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431799 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431859 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.431970 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432063 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.432100 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.432165 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432234 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432306 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432382 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.432447 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432534 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.432597 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432689 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432772 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432838 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.432935 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433003 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433067 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433121 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433248 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433322 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433347 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433455 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.433520 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.434581 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:30 crc kubenswrapper[3552]: E0320 15:26:30.434778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.530495 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:30 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:30 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:30 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:30 crc kubenswrapper[3552]: I0320 15:26:30.530706 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.429800 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.429854 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.429967 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.435628 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.435673 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.435824 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.435885 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.435839 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.436037 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436091 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436046 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.436193 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436240 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436273 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436308 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.436350 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.436587 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436603 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.436754 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.436794 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.436919 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.437061 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.437098 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437116 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.437210 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437306 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437495 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437635 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437744 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437838 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.437937 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.438017 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.438285 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:31 crc kubenswrapper[3552]: E0320 15:26:31.438397 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.530482 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:31 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:31 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:31 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:31 crc kubenswrapper[3552]: I0320 15:26:31.530617 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.429601 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.429702 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.429757 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.429864 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.429866 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.429965 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430229 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430255 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.430257 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430464 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430590 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430737 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430768 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.430747 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430874 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.430897 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.430952 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.431055 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431112 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431142 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.431262 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431324 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431356 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.431667 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431744 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431805 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.431755 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.431978 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.432261 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.432375 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.432687 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.432743 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.432709 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.432839 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.432900 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.433055 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.433113 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.433448 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.433505 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.433605 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.433690 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.433748 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.433906 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.434014 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.434118 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.434437 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.434621 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.434778 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.434844 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435090 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435298 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435361 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435456 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435560 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435660 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435737 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435811 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:32 crc kubenswrapper[3552]: E0320 15:26:32.435883 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.529647 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:32 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:32 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:32 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:32 crc kubenswrapper[3552]: I0320 15:26:32.529752 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.429770 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.429822 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.429898 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.429916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.429976 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.429836 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.430208 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.430276 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.430301 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.430880 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.431105 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.431210 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431260 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431305 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.431395 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.431657 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431664 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431826 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.431861 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431911 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.431915 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.431988 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432181 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432261 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.432326 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432394 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432523 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432633 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432725 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.432871 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.433093 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.433160 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:33 crc kubenswrapper[3552]: E0320 15:26:33.433229 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.529537 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:33 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:33 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:33 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:33 crc kubenswrapper[3552]: I0320 15:26:33.529630 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.430271 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.430381 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.430740 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431751 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431801 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431854 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431878 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431855 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431800 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431782 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431955 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431811 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.431932 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.432108 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.432139 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.432345 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.432590 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.432673 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.432973 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.433118 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433246 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433355 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433550 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433675 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.433359 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433441 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.434072 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433473 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.433510 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.434755 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.434857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.433805 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.434330 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.434569 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.435491 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.435506 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.435798 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.435891 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.435540 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.435692 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.436008 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.436055 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.436575 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.436092 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.436152 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.436245 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.436361 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.436489 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.437262 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.437489 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.437741 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.437915 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.438170 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.438344 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.438551 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.438660 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.438750 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:34 crc kubenswrapper[3552]: E0320 15:26:34.438854 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.529391 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:34 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:34 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:34 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:34 crc kubenswrapper[3552]: I0320 15:26:34.530573 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429748 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429820 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429837 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429895 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429976 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429986 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.430037 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.430051 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.429748 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.430277 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.430478 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.430578 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.430637 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.430751 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.430819 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.430871 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.430983 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431080 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.431146 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431234 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.431361 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431533 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431597 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431694 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431793 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.431863 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.431956 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.431985 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.432113 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.432241 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.432337 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.432485 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.432597 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:35 crc kubenswrapper[3552]: E0320 15:26:35.432699 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.529881 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:35 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:35 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:35 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:35 crc kubenswrapper[3552]: I0320 15:26:35.530059 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.430332 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.430675 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.430706 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.430813 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.430998 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431024 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431139 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431165 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431248 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.431629 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.431824 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431890 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431910 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431889 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432038 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432092 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432123 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.431931 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432274 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.432318 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432380 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.432529 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432590 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432618 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.432672 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.432802 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.432952 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.433064 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.433256 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.433316 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.433558 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.433745 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.433862 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.433900 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.434068 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.434260 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.434353 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.434579 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.434784 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.434912 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.435054 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.435144 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.435321 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.435496 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.435766 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.435854 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.435954 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.436222 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.436327 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.436375 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.436377 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.436551 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.437017 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.437216 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.437355 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.437625 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.437803 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:36 crc kubenswrapper[3552]: E0320 15:26:36.437929 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.529983 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:36 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:36 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:36 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:36 crc kubenswrapper[3552]: I0320 15:26:36.530352 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430291 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430325 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430383 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430394 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430397 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430494 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.431186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430590 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430674 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430665 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430694 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430726 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430737 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430584 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430730 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430792 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430797 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.431652 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.430839 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.431985 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.432333 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.432601 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.432764 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.432899 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433076 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433201 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433483 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433621 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433858 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433874 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.433936 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.434060 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.434186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:37 crc kubenswrapper[3552]: E0320 15:26:37.434322 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.530177 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:37 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:37 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:37 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:37 crc kubenswrapper[3552]: I0320 15:26:37.530328 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430583 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430641 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430586 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430907 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430863 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430990 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431007 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430876 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430922 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430935 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431083 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431158 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431165 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.430935 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431205 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431216 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431230 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431057 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431292 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431305 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431211 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431107 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431104 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431025 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431389 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431615 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.431670 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.431814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.431975 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.432029 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.432458 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.432596 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.432658 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.432742 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.433157 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.433383 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.433612 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.433728 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.433889 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.433969 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.434111 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.434260 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.434447 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.434606 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.434879 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.434932 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435007 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435235 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435472 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435515 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435671 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435843 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.435938 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.436098 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.436246 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:38 crc kubenswrapper[3552]: E0320 15:26:38.436351 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.529980 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:38 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:38 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:38 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:38 crc kubenswrapper[3552]: I0320 15:26:38.530125 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430139 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430236 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430282 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430396 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430444 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430476 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430567 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430585 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430491 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430390 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430438 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.430700 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.431085 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.431144 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.431695 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.431852 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.431880 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.432118 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.432273 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.432480 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.432655 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.432713 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.432745 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433029 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433067 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.433148 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433272 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433481 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433723 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433796 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433898 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.433959 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.434059 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:39 crc kubenswrapper[3552]: E0320 15:26:39.434161 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.530975 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:39 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:39 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:39 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:39 crc kubenswrapper[3552]: I0320 15:26:39.531141 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429558 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429636 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429694 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429714 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429717 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429647 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429765 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429803 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429584 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429920 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429925 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429959 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430012 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430027 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429576 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430056 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429819 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.429835 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.430173 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" podUID="120b38dc-8236-4fa6-a452-642b8ad738ee" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430244 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430296 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430307 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430325 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430432 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430485 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430536 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.430621 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430798 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.430837 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.430895 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431077 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431187 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" podUID="7d51f445-054a-4e4f-a67b-a828f5a32511" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431241 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-v54bt" podUID="34a48baf-1bee-4921-8bb2-9b7320e76f79" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.431286 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431491 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" podUID="0f394926-bdb9-425c-b36e-264d7fd34550" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431650 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431801 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns/dns-default-gbw49" podUID="13045510-8717-4a71-ade4-be95a76440a7" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.431933 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" podUID="297ab9b6-2186-4d5b-a952-2bfd59af63c4" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432076 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" podUID="f728c15e-d8de-4a9a-a3ea-fdcead95cb91" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432186 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" podUID="cf1a8966-f594-490a-9fbb-eec5bafd13d3" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432323 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432476 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" podUID="9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432592 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" podUID="ed024e5d-8fc2-4c22-803d-73f3c9795f19" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432719 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" podUID="4f8aa612-9da0-4a2b-911e-6a1764a4e74e" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432809 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.432899 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" podUID="59748b9b-c309-4712-aa85-bb38d71c4915" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433036 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433166 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" podUID="12e733dd-0939-4f1b-9cbb-13897e093787" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433265 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433376 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433502 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433620 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433762 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" podUID="71af81a9-7d43-49b2-9287-c375900aa905" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433864 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" podUID="c1620f19-8aa3-45cf-931b-7ae0e5cd14cf" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.433904 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" podUID="0b5c38ff-1fa8-4219-994d-15776acd4a4d" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.434008 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" Mar 20 15:26:40 crc kubenswrapper[3552]: E0320 15:26:40.434121 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" podUID="10603adc-d495-423c-9459-4caa405960bb" Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.530342 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:40 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:40 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:40 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:40 crc kubenswrapper[3552]: I0320 15:26:40.530483 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430031 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430153 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430250 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430262 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430137 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430168 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430385 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.430234 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.434754 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.434828 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.434888 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.435033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.435119 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.435123 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.435581 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" podUID="b54e8941-2fc4-432a-9e51-39684df9089e" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.435606 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.435866 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436241 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.436303 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436342 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.436269 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.436398 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436507 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" podUID="43ae1c37-047b-4ee2-9fee-41e337dd4ac8" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436527 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" podUID="63eb7413-02c3-4d6e-bb48-e5ffe5ce15be" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436691 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qdfr4" podUID="a702c6d2-4dde-4077-ab8c-0f8df804bf7a" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436783 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-ingress-canary/ingress-canary-2vhcn" podUID="0b5d722a-1123-4935-9740-52a08d018bc9" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436909 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.436141 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" podUID="45a8038e-e7f2-4d93-a6f5-7753aa54e63f" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.437074 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" podUID="6d67253e-2acd-4bc1-8185-793587da4f17" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.437175 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" podUID="d0f40333-c860-4c04-8058-a0bf572dcf12" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.437308 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" podUID="d5025cb4-ddb0-4107-88c1-bcbcdb779ac0" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.437448 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-service-ca/service-ca-cd974775-4nsv5" podUID="6639609b-906b-4193-883e-ed1160aa5d50" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.437699 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" Mar 20 15:26:41 crc kubenswrapper[3552]: E0320 15:26:41.437824 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" podUID="ebf09b15-4bb1-44bf-9d54-e76fad5cf76e" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.530896 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:41 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:41 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:41 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.530985 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.818919 3552 kubelet_node_status.go:729] "Recording event message for node" node="crc" event="NodeReady" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.819242 3552 kubelet_node_status.go:547] "Fast updating node status as it just became ready" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.882708 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t"] Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.882859 3552 topology_manager.go:215] "Topology Admit Handler" podUID="23dd33fe-d710-4638-9e0e-72cb27cb3e84" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29566995-vzn2t" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.884020 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.888905 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.889127 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.889329 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.890230 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.890340 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.910007 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t"] Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.940975 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23dd33fe-d710-4638-9e0e-72cb27cb3e84-secret-volume\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.942842 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23dd33fe-d710-4638-9e0e-72cb27cb3e84-config-volume\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:41 crc kubenswrapper[3552]: I0320 15:26:41.943228 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhz99\" (UniqueName: \"kubernetes.io/projected/23dd33fe-d710-4638-9e0e-72cb27cb3e84-kube-api-access-bhz99\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.045761 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23dd33fe-d710-4638-9e0e-72cb27cb3e84-config-volume\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.046271 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bhz99\" (UniqueName: \"kubernetes.io/projected/23dd33fe-d710-4638-9e0e-72cb27cb3e84-kube-api-access-bhz99\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.046451 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23dd33fe-d710-4638-9e0e-72cb27cb3e84-secret-volume\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.048269 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23dd33fe-d710-4638-9e0e-72cb27cb3e84-config-volume\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.059002 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23dd33fe-d710-4638-9e0e-72cb27cb3e84-secret-volume\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.083020 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhz99\" (UniqueName: \"kubernetes.io/projected/23dd33fe-d710-4638-9e0e-72cb27cb3e84-kube-api-access-bhz99\") pod \"collect-profiles-29566995-vzn2t\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.212711 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.429619 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.430547 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.431054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.431095 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.431144 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.431507 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.431516 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.432976 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.433170 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.433553 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.433613 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.433936 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434050 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434452 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434521 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434672 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434802 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434820 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.434814 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.435254 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.435342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.435383 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.436035 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.436327 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.436689 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.437204 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.437345 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.437676 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.437659 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.438948 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.439901 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440054 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440107 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440166 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440319 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440398 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440458 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440579 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440714 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440745 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440865 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.440760 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.441166 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-r9fjc" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.441239 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.441500 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.444514 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.444961 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.445872 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.449052 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"webhook-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.449195 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.449263 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.449320 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.449649 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.450167 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.450222 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.450244 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.450336 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.450928 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.451130 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.451396 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.451774 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.452007 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.452171 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.452362 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.452564 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.452846 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.464907 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.466804 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467542 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467596 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467617 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467658 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467676 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467544 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467615 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467780 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467825 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467906 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467999 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468016 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468038 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.467829 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468145 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468252 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468313 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468357 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468447 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468499 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468540 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468619 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468646 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468752 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468782 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.468362 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469049 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469081 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469133 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469227 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469385 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469546 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469553 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469621 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469714 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469751 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.469937 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.471993 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472145 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472316 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472539 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-kpdvz" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472559 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472717 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472760 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472931 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.473056 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.473215 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-58g82" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.473282 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.473223 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-6sd5l" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.472453 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.473880 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.474176 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-ng44q" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.476862 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t"] Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.489025 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.493738 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.502630 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.509132 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.509871 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.511198 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.515367 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.517953 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.519278 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.524176 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.524727 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.526767 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.530453 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:42 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:42 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:42 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.530538 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.530672 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.532605 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.554418 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.573979 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.594101 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.613778 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.633770 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.653119 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.673241 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Mar 20 15:26:42 crc kubenswrapper[3552]: I0320 15:26:42.692610 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.117916 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" event={"ID":"23dd33fe-d710-4638-9e0e-72cb27cb3e84","Type":"ContainerStarted","Data":"87f51db12651656002390720995a7715c0ad623eb8ffece9221a44d3609130bf"} Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.118370 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" event={"ID":"23dd33fe-d710-4638-9e0e-72cb27cb3e84","Type":"ContainerStarted","Data":"b91ee88408528727b0e8e9d9a443bb2de94a8143bb4c3b956233df87560a67c0"} Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.429891 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.429989 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430036 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430101 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430127 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.429890 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.429940 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430242 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430270 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430356 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430359 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430465 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430457 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430458 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.430515 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.436505 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.437780 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.443680 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.444120 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.443786 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.443861 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.443942 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.444128 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.446292 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.446582 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.446953 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.447243 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.447263 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.447390 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.448920 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.448924 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.448944 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-79vsd" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.450520 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.450800 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.452501 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.452689 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.453041 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.453485 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.453523 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.453745 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.453821 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.454301 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.454826 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.455098 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-dwn4s" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.455771 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.455925 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.456023 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.456133 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.456558 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-twmwc" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.456629 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-q786x" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.465510 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.468122 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.472848 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-sv888" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.476617 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.493227 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.512814 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.530115 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:43 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:43 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:43 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.530206 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.533912 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.554425 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Mar 20 15:26:43 crc kubenswrapper[3552]: I0320 15:26:43.573257 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Mar 20 15:26:44 crc kubenswrapper[3552]: I0320 15:26:44.529297 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:44 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:44 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:44 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:44 crc kubenswrapper[3552]: I0320 15:26:44.529435 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:45 crc kubenswrapper[3552]: I0320 15:26:45.127655 3552 generic.go:334] "Generic (PLEG): container finished" podID="23dd33fe-d710-4638-9e0e-72cb27cb3e84" containerID="87f51db12651656002390720995a7715c0ad623eb8ffece9221a44d3609130bf" exitCode=0 Mar 20 15:26:45 crc kubenswrapper[3552]: I0320 15:26:45.127746 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" event={"ID":"23dd33fe-d710-4638-9e0e-72cb27cb3e84","Type":"ContainerDied","Data":"87f51db12651656002390720995a7715c0ad623eb8ffece9221a44d3609130bf"} Mar 20 15:26:45 crc kubenswrapper[3552]: I0320 15:26:45.530597 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:45 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:45 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:45 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:45 crc kubenswrapper[3552]: I0320 15:26:45.530700 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.373347 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.519224 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhz99\" (UniqueName: \"kubernetes.io/projected/23dd33fe-d710-4638-9e0e-72cb27cb3e84-kube-api-access-bhz99\") pod \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.519295 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23dd33fe-d710-4638-9e0e-72cb27cb3e84-secret-volume\") pod \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.519358 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23dd33fe-d710-4638-9e0e-72cb27cb3e84-config-volume\") pod \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\" (UID: \"23dd33fe-d710-4638-9e0e-72cb27cb3e84\") " Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.520456 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23dd33fe-d710-4638-9e0e-72cb27cb3e84-config-volume" (OuterVolumeSpecName: "config-volume") pod "23dd33fe-d710-4638-9e0e-72cb27cb3e84" (UID: "23dd33fe-d710-4638-9e0e-72cb27cb3e84"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.520785 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23dd33fe-d710-4638-9e0e-72cb27cb3e84-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.527431 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23dd33fe-d710-4638-9e0e-72cb27cb3e84-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "23dd33fe-d710-4638-9e0e-72cb27cb3e84" (UID: "23dd33fe-d710-4638-9e0e-72cb27cb3e84"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.527670 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23dd33fe-d710-4638-9e0e-72cb27cb3e84-kube-api-access-bhz99" (OuterVolumeSpecName: "kube-api-access-bhz99") pod "23dd33fe-d710-4638-9e0e-72cb27cb3e84" (UID: "23dd33fe-d710-4638-9e0e-72cb27cb3e84"). InnerVolumeSpecName "kube-api-access-bhz99". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.531033 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:46 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:46 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:46 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.531142 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.624683 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-bhz99\" (UniqueName: \"kubernetes.io/projected/23dd33fe-d710-4638-9e0e-72cb27cb3e84-kube-api-access-bhz99\") on node \"crc\" DevicePath \"\"" Mar 20 15:26:46 crc kubenswrapper[3552]: I0320 15:26:46.624748 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23dd33fe-d710-4638-9e0e-72cb27cb3e84-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.138457 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" event={"ID":"23dd33fe-d710-4638-9e0e-72cb27cb3e84","Type":"ContainerDied","Data":"b91ee88408528727b0e8e9d9a443bb2de94a8143bb4c3b956233df87560a67c0"} Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.138513 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t" Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.138522 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b91ee88408528727b0e8e9d9a443bb2de94a8143bb4c3b956233df87560a67c0" Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.465072 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl"] Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.470758 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555385-rxkwl"] Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.529631 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:47 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:47 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:47 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:47 crc kubenswrapper[3552]: I0320 15:26:47.529746 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:48 crc kubenswrapper[3552]: I0320 15:26:48.530495 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:48 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:48 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:48 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:48 crc kubenswrapper[3552]: I0320 15:26:48.530584 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:49 crc kubenswrapper[3552]: I0320 15:26:49.440190 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a90eb6a-7fc8-4544-b6a7-731623a1fcf6" path="/var/lib/kubelet/pods/3a90eb6a-7fc8-4544-b6a7-731623a1fcf6/volumes" Mar 20 15:26:49 crc kubenswrapper[3552]: I0320 15:26:49.529815 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:49 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:49 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:49 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:49 crc kubenswrapper[3552]: I0320 15:26:49.529888 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:50 crc kubenswrapper[3552]: I0320 15:26:50.530539 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:50 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:50 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:50 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:50 crc kubenswrapper[3552]: I0320 15:26:50.530646 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:51 crc kubenswrapper[3552]: I0320 15:26:51.531002 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:51 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:51 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:51 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:51 crc kubenswrapper[3552]: I0320 15:26:51.531103 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:52 crc kubenswrapper[3552]: I0320 15:26:52.530150 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:52 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:52 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:52 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:52 crc kubenswrapper[3552]: I0320 15:26:52.530301 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:53 crc kubenswrapper[3552]: I0320 15:26:53.530008 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:53 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:53 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:53 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:53 crc kubenswrapper[3552]: I0320 15:26:53.530166 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:54 crc kubenswrapper[3552]: I0320 15:26:54.529925 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:54 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:54 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:54 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:54 crc kubenswrapper[3552]: I0320 15:26:54.530039 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:55 crc kubenswrapper[3552]: I0320 15:26:55.530344 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:55 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:55 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:55 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:55 crc kubenswrapper[3552]: I0320 15:26:55.530557 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:56 crc kubenswrapper[3552]: I0320 15:26:56.531219 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:56 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:56 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:56 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:56 crc kubenswrapper[3552]: I0320 15:26:56.531340 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:57 crc kubenswrapper[3552]: I0320 15:26:57.530793 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:57 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:57 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:57 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:57 crc kubenswrapper[3552]: I0320 15:26:57.531117 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:58 crc kubenswrapper[3552]: I0320 15:26:58.529737 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:58 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:58 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:58 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:58 crc kubenswrapper[3552]: I0320 15:26:58.529826 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:26:58 crc kubenswrapper[3552]: I0320 15:26:58.774028 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:26:59 crc kubenswrapper[3552]: I0320 15:26:59.529825 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:26:59 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:26:59 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:26:59 crc kubenswrapper[3552]: healthz check failed Mar 20 15:26:59 crc kubenswrapper[3552]: I0320 15:26:59.529917 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:00 crc kubenswrapper[3552]: I0320 15:27:00.529453 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:00 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:00 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:00 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:00 crc kubenswrapper[3552]: I0320 15:27:00.529573 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.267625 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.267705 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.267742 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.267766 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.267781 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:27:01 crc kubenswrapper[3552]: E0320 15:27:01.406690 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574\": container with ID starting with 1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574 not found: ID does not exist" containerID="1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.406768 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574" err="rpc error: code = NotFound desc = could not find container \"1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574\": container with ID starting with 1fe939e5830b08618b43895629c3533698f95ce7f05b7d0cd39aaf3a1b886574 not found: ID does not exist" Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.529881 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:01 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:01 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:01 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:01 crc kubenswrapper[3552]: I0320 15:27:01.530248 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:02 crc kubenswrapper[3552]: I0320 15:27:02.530559 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:02 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:02 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:02 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:02 crc kubenswrapper[3552]: I0320 15:27:02.530666 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:03 crc kubenswrapper[3552]: I0320 15:27:03.529832 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:03 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:03 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:03 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:03 crc kubenswrapper[3552]: I0320 15:27:03.530009 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:04 crc kubenswrapper[3552]: I0320 15:27:04.530197 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:04 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:04 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:04 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:04 crc kubenswrapper[3552]: I0320 15:27:04.530596 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:05 crc kubenswrapper[3552]: I0320 15:27:05.530125 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:05 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:05 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:05 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:05 crc kubenswrapper[3552]: I0320 15:27:05.530358 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:06 crc kubenswrapper[3552]: I0320 15:27:06.530681 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:06 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:06 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:06 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:06 crc kubenswrapper[3552]: I0320 15:27:06.530798 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:07 crc kubenswrapper[3552]: I0320 15:27:07.530032 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:07 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:07 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:07 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:07 crc kubenswrapper[3552]: I0320 15:27:07.530325 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:08 crc kubenswrapper[3552]: I0320 15:27:08.529439 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:08 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:08 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:08 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:08 crc kubenswrapper[3552]: I0320 15:27:08.529577 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:09 crc kubenswrapper[3552]: I0320 15:27:09.530106 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:09 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:09 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:09 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:09 crc kubenswrapper[3552]: I0320 15:27:09.531717 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:10 crc kubenswrapper[3552]: I0320 15:27:10.529551 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:10 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:10 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:10 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:10 crc kubenswrapper[3552]: I0320 15:27:10.529636 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:11 crc kubenswrapper[3552]: I0320 15:27:11.529336 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:11 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:11 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:11 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:11 crc kubenswrapper[3552]: I0320 15:27:11.529424 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:12 crc kubenswrapper[3552]: I0320 15:27:12.530083 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:12 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:12 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:12 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:12 crc kubenswrapper[3552]: I0320 15:27:12.530176 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:13 crc kubenswrapper[3552]: I0320 15:27:13.531459 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:13 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:13 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:13 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:13 crc kubenswrapper[3552]: I0320 15:27:13.531563 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:14 crc kubenswrapper[3552]: I0320 15:27:14.530832 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:14 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:14 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:14 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:14 crc kubenswrapper[3552]: I0320 15:27:14.530960 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:15 crc kubenswrapper[3552]: I0320 15:27:15.531437 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:15 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:15 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:15 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:15 crc kubenswrapper[3552]: I0320 15:27:15.533628 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:16 crc kubenswrapper[3552]: I0320 15:27:16.530758 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:16 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:16 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:16 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:16 crc kubenswrapper[3552]: I0320 15:27:16.530974 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:17 crc kubenswrapper[3552]: I0320 15:27:17.530364 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:17 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:17 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:17 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:17 crc kubenswrapper[3552]: I0320 15:27:17.530533 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:18 crc kubenswrapper[3552]: I0320 15:27:18.529380 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:18 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:18 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:18 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:18 crc kubenswrapper[3552]: I0320 15:27:18.529593 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:19 crc kubenswrapper[3552]: I0320 15:27:19.529800 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:19 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:19 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:19 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:19 crc kubenswrapper[3552]: I0320 15:27:19.529902 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:20 crc kubenswrapper[3552]: I0320 15:27:20.529634 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:20 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:20 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:20 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:20 crc kubenswrapper[3552]: I0320 15:27:20.529748 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:21 crc kubenswrapper[3552]: I0320 15:27:21.530900 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:21 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:21 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:21 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:21 crc kubenswrapper[3552]: I0320 15:27:21.531017 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:22 crc kubenswrapper[3552]: I0320 15:27:22.529358 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:22 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:22 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:22 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:22 crc kubenswrapper[3552]: I0320 15:27:22.529572 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:23 crc kubenswrapper[3552]: I0320 15:27:23.529541 3552 patch_prober.go:28] interesting pod/router-default-5c9bf7bc58-6jctv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Mar 20 15:27:23 crc kubenswrapper[3552]: [-]has-synced failed: reason withheld Mar 20 15:27:23 crc kubenswrapper[3552]: [+]process-running ok Mar 20 15:27:23 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:23 crc kubenswrapper[3552]: I0320 15:27:23.529652 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:23 crc kubenswrapper[3552]: I0320 15:27:23.529720 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:27:23 crc kubenswrapper[3552]: I0320 15:27:23.531603 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"f072010141e0432a82d0cd5bce4ef78ec3ed40c5f8bb481a2055e25005db596d"} pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" containerMessage="Container router failed startup probe, will be restarted" Mar 20 15:27:23 crc kubenswrapper[3552]: I0320 15:27:23.531670 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" podUID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerName="router" containerID="cri-o://f072010141e0432a82d0cd5bce4ef78ec3ed40c5f8bb481a2055e25005db596d" gracePeriod=3600 Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.173894 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.174546 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.174762 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.174854 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.175002 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.175075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.175151 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.177050 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.179392 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.179887 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.180146 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.184913 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.185792 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/120b38dc-8236-4fa6-a452-642b8ad738ee-images\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.189599 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-config\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.189976 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.192062 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.199921 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-audit\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.200890 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-trusted-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.201703 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-image-import-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.202571 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.208544 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-session\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.278103 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.278595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279158 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279218 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279264 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279310 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279351 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279400 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279482 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279527 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279592 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279636 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279684 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.279738 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: E0320 15:27:30.280210 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97 podName: nodeName:}" failed. No retries permitted until 2026-03-20 15:29:32.280189636 +0000 UTC m=+271.973886506 (durationBeforeRetry 2m2s). Error: MountVolume.MountDevice failed for volume "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97") pod "image-registry-6fbd648f87-j4bk5" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.282291 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.282717 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.282775 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.283342 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.283500 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.283513 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.283742 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.283795 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.283800 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.284944 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.285210 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.285353 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.285100 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.291731 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.292325 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.292331 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.292811 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71af81a9-7d43-49b2-9287-c375900aa905-config\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.293029 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-service-ca-bundle\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.293054 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-config\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.293057 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-config\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.293554 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed024e5d-8fc2-4c22-803d-73f3c9795f19-config\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.295025 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.295708 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-error\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.296686 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.296876 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-serving-cert\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.297817 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed024e5d-8fc2-4c22-803d-73f3c9795f19-kube-api-access\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.308050 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvn6z\" (UniqueName: \"kubernetes.io/projected/6639609b-906b-4193-883e-ed1160aa5d50-kube-api-access-pvn6z\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380372 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380479 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380535 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380565 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380594 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380621 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.380644 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.381775 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.381974 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.382139 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.382334 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.382512 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.382654 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.382770 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.382878 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.383085 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.383259 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384345 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384450 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384586 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384706 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388561 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388768 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384588 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388618 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384783 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384346 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384836 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384884 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.384923 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.385117 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.385163 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.387520 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvc4r\" (UniqueName: \"kubernetes.io/projected/c085412c-b875-46c9-ae3e-e6b0d8067091-kube-api-access-tvc4r\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.387578 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.387734 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.387751 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388194 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388290 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388344 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388360 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388421 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388450 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.388930 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390496 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390524 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390560 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390582 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390606 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390628 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.390937 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.391155 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.391262 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.391359 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.391648 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.391950 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392127 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392361 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392467 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392573 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392622 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392672 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392724 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392767 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392811 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392855 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392899 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392944 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.392991 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393047 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393106 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393198 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393265 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393314 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393359 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393404 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393476 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393527 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393572 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393611 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393687 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393758 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393808 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393854 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393901 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393943 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.393985 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.394039 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.394119 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.394176 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.394221 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.394622 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjlhw\" (UniqueName: \"kubernetes.io/projected/bd556935-a077-45df-ba3f-d42c39326ccd-kube-api-access-hjlhw\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.395322 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-serving-cert\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.396803 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.396876 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.396878 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-config\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.396923 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.396971 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397021 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397065 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397113 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397188 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397236 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397278 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397319 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397365 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397433 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397481 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.397944 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.398104 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.398343 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.398507 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.398764 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.399184 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.399448 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.404475 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-images\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.405332 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-serving-cert\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.405625 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.406012 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.406157 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed024e5d-8fc2-4c22-803d-73f3c9795f19-serving-cert\") pod \"kube-apiserver-operator-78d54458c4-sc8h7\" (UID: \"ed024e5d-8fc2-4c22-803d-73f3c9795f19\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.407594 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.409360 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrd8h\" (UniqueName: \"kubernetes.io/projected/8a5ae51d-d173-4531-8975-f164c975ce1f-kube-api-access-wrd8h\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.410012 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-config\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.410580 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-serving-cert\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.411392 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-trusted-ca-bundle\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.412058 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-serving-cert\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.412536 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.412912 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.413151 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kgvs\" (UniqueName: \"kubernetes.io/projected/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-kube-api-access-6kgvs\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.413405 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.413988 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.414195 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.414464 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b54e8941-2fc4-432a-9e51-39684df9089e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.414513 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.415523 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.415773 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6639609b-906b-4193-883e-ed1160aa5d50-signing-key\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.415787 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.416300 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.417735 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71af81a9-7d43-49b2-9287-c375900aa905-serving-cert\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.418062 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.418290 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.418357 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.418724 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-router-certs\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.418787 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.419044 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.419108 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d51f445-054a-4e4f-a67b-a828f5a32511-metrics-tls\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.419168 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.419332 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm986\" (UniqueName: \"kubernetes.io/projected/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-kube-api-access-bm986\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.421883 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-55f7t\" (UniqueName: \"kubernetes.io/projected/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-kube-api-access-55f7t\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.422455 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.423245 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kp86\" (UniqueName: \"kubernetes.io/projected/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-kube-api-access-6kp86\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.426991 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-profile-collector-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.429422 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.430081 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f394926-bdb9-425c-b36e-264d7fd34550-config\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.430446 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/530553aa-0a1d-423e-8a22-f5eb4bdbb883-serving-cert\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.430820 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w8wh\" (UniqueName: \"kubernetes.io/projected/5bacb25d-97b6-4491-8fb4-99feae1d802a-kube-api-access-4w8wh\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431051 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431275 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431309 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13045510-8717-4a71-ade4-be95a76440a7-config-volume\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431333 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431456 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431459 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431835 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-serving-cert\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.431844 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432120 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432213 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432251 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432608 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432627 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432657 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/297ab9b6-2186-4d5b-a952-2bfd59af63c4-proxy-tls\") pod \"machine-config-controller-6df6df6b6b-58shh\" (UID: \"297ab9b6-2186-4d5b-a952-2bfd59af63c4\") " pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432718 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6639609b-906b-4193-883e-ed1160aa5d50-signing-cabundle\") pod \"service-ca-cd974775-4nsv5\" (UID: \"6639609b-906b-4193-883e-ed1160aa5d50\") " pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432863 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433012 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433217 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433257 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433252 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433441 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433592 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.433905 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.432389 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434036 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434129 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434229 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434664 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434682 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-service-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434716 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434756 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-config\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434779 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434856 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434884 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434901 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.434935 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13045510-8717-4a71-ade4-be95a76440a7-metrics-tls\") pod \"dns-default-gbw49\" (UID: \"13045510-8717-4a71-ade4-be95a76440a7\") " pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.435064 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.435230 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-config\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.435469 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-encryption-config\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.436053 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.436247 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.436388 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f728c15e-d8de-4a9a-a3ea-fdcead95cb91-samples-operator-tls\") pod \"cluster-samples-operator-bc474d5d6-wshwg\" (UID: \"f728c15e-d8de-4a9a-a3ea-fdcead95cb91\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.436753 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.436873 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.436937 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.437000 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.437786 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.438061 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-config\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.438324 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4f8aa612-9da0-4a2b-911e-6a1764a4e74e-machine-api-operator-tls\") pod \"machine-api-operator-788b7c6b6c-ctdmb\" (UID: \"4f8aa612-9da0-4a2b-911e-6a1764a4e74e\") " pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.439145 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"certified-operators-7287f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.439248 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.439569 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.439905 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-service-ca\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.442431 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"community-operators-8jhz6\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.441120 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-package-server-manager-serving-cert\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.441122 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3992789-6f8b-4806-8ce0-261a7623ca46-serving-cert\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.441906 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d67253e-2acd-4bc1-8185-793587da4f17-config\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.442734 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.442239 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rpl7\" (UniqueName: \"kubernetes.io/projected/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-kube-api-access-5rpl7\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.442855 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-ca\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.442877 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7-config\") pod \"kube-storage-version-migrator-operator-686c6c748c-qbnnr\" (UID: \"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.443102 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"redhat-marketplace-8s8pc\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.443316 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.443352 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.444560 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.445306 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.446173 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.446452 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71af81a9-7d43-49b2-9287-c375900aa905-kube-api-access\") pod \"openshift-kube-scheduler-operator-5d9b995f6b-fcgd7\" (UID: \"71af81a9-7d43-49b2-9287-c375900aa905\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.446549 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-encryption-config\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.447304 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"redhat-operators-f4jkp\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.447615 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1620f19-8aa3-45cf-931b-7ae0e5cd14cf-kube-api-access\") pod \"kube-controller-manager-operator-6f6cb54958-rbddb\" (UID: \"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.448516 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7zrh\" (UniqueName: \"kubernetes.io/projected/ebf09b15-4bb1-44bf-9d54-e76fad5cf76e-kube-api-access-j7zrh\") pod \"authentication-operator-7cc7ff75d5-g9qv8\" (UID: \"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e\") " pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.448988 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.449643 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.451074 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/10603adc-d495-423c-9459-4caa405960bb-metrics-tls\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.451155 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-76gl8\" (UniqueName: \"kubernetes.io/projected/34a48baf-1bee-4921-8bb2-9b7320e76f79-kube-api-access-76gl8\") pod \"network-check-target-v54bt\" (UID: \"34a48baf-1bee-4921-8bb2-9b7320e76f79\") " pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.451877 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.452455 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.453690 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d67253e-2acd-4bc1-8185-793587da4f17-serving-cert\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.454074 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.454459 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.454621 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.454797 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.455604 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.455981 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.456515 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/45a8038e-e7f2-4d93-a6f5-7753aa54e63f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-649bd778b4-tt5tw\" (UID: \"45a8038e-e7f2-4d93-a6f5-7753aa54e63f\") " pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.457062 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9vhj\" (UniqueName: \"kubernetes.io/projected/6d67253e-2acd-4bc1-8185-793587da4f17-kube-api-access-d9vhj\") pod \"service-ca-operator-546b4f8984-pwccz\" (UID: \"6d67253e-2acd-4bc1-8185-793587da4f17\") " pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.457481 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-client-ca\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.457617 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-login\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.458240 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.458572 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a702c6d2-4dde-4077-ab8c-0f8df804bf7a-metrics-certs\") pod \"network-metrics-daemon-qdfr4\" (UID: \"a702c6d2-4dde-4077-ab8c-0f8df804bf7a\") " pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.458927 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9724w\" (UniqueName: \"kubernetes.io/projected/0b5c38ff-1fa8-4219-994d-15776acd4a4d-kube-api-access-9724w\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.459378 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zpsk\" (UniqueName: \"kubernetes.io/projected/6268b7fe-8910-4505-b404-6f1df638105c-kube-api-access-2zpsk\") pod \"downloads-65476884b9-9wcvx\" (UID: \"6268b7fe-8910-4505-b404-6f1df638105c\") " pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.460465 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9kp4\" (UniqueName: \"kubernetes.io/projected/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-kube-api-access-g9kp4\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.460560 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f394926-bdb9-425c-b36e-264d7fd34550-serving-cert\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.460668 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-trusted-ca-bundle\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.460770 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-etcd-client\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.460825 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9127708-ccfd-4891-8a3a-f0cacb77e0f4-serving-cert\") pod \"console-operator-5dbbc74dc9-cp5cd\" (UID: \"e9127708-ccfd-4891-8a3a-f0cacb77e0f4\") " pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.461036 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d5025cb4-ddb0-4107-88c1-bcbcdb779ac0-webhook-certs\") pod \"multus-admission-controller-6c7c885997-4hbbc\" (UID: \"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0\") " pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.462052 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.462067 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-client\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.462834 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt5cx\" (UniqueName: \"kubernetes.io/projected/0b5d722a-1123-4935-9740-52a08d018bc9-kube-api-access-dt5cx\") pod \"ingress-canary-2vhcn\" (UID: \"0b5d722a-1123-4935-9740-52a08d018bc9\") " pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.463012 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl5kg\" (UniqueName: \"kubernetes.io/projected/7d51f445-054a-4e4f-a67b-a828f5a32511-kube-api-access-tl5kg\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.463921 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqmhq\" (UniqueName: \"kubernetes.io/projected/cf1a8966-f594-490a-9fbb-eec5bafd13d3-kube-api-access-hqmhq\") pod \"migrator-f7c6d88df-q2fnv\" (UID: \"cf1a8966-f594-490a-9fbb-eec5bafd13d3\") " pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.464553 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.464825 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.466828 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.467238 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.476303 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.479622 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7d51f445-054a-4e4f-a67b-a828f5a32511-trusted-ca\") pod \"ingress-operator-7d46d5bb6d-rrg6t\" (UID: \"7d51f445-054a-4e4f-a67b-a828f5a32511\") " pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.479844 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.487892 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrvxn\" (UniqueName: \"kubernetes.io/projected/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-kube-api-access-vrvxn\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.498362 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.498833 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.500123 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.500294 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.500501 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.500676 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.500822 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.500946 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501061 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501173 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501298 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501092 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501648 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501676 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501697 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501718 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501737 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501758 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501782 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501800 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501821 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501843 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.501873 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.503020 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"marketplace-operator-8b455464d-f9xdt\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.503968 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b54e8941-2fc4-432a-9e51-39684df9089e-trusted-ca\") pod \"cluster-image-registry-operator-7769bd8d7d-q5cvv\" (UID: \"b54e8941-2fc4-432a-9e51-39684df9089e\") " pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.504495 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-profile-collector-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.504560 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxcp\" (UniqueName: \"kubernetes.io/projected/d0f40333-c860-4c04-8058-a0bf572dcf12-kube-api-access-qcxcp\") pod \"network-check-source-5c5478f8c-vqvt7\" (UID: \"d0f40333-c860-4c04-8058-a0bf572dcf12\") " pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.506444 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqnmc\" (UniqueName: \"kubernetes.io/projected/59748b9b-c309-4712-aa85-bb38d71c4915-kube-api-access-fqnmc\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.516737 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.516978 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.536752 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.543752 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bab054c9-6c83-40ee-896d-6459b22a6b4b-etcd-serving-ca\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.551225 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.556579 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.569239 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-webhook-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.569269 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd556935-a077-45df-ba3f-d42c39326ccd-apiservice-cert\") pod \"packageserver-8464bcc55b-sjnqz\" (UID: \"bd556935-a077-45df-ba3f-d42c39326ccd\") " pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.576226 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.582819 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b61ce6b0-a70f-42b7-9435-3d6acba81ccf-audit-policies\") pod \"oauth-openshift-6499cf79cf-qdfbh\" (UID: \"b61ce6b0-a70f-42b7-9435-3d6acba81ccf\") " pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.599584 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2vhcn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.599717 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.602539 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.602576 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.602598 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.605121 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c085412c-b875-46c9-ae3e-e6b0d8067091-srv-cert\") pod \"olm-operator-6d8474f75f-x54mh\" (UID: \"c085412c-b875-46c9-ae3e-e6b0d8067091\") " pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.613644 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.617251 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.619721 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qdfr4" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.627832 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120b38dc-8236-4fa6-a452-642b8ad738ee-proxy-tls\") pod \"machine-config-operator-76788bff89-wkjgm\" (UID: \"120b38dc-8236-4fa6-a452-642b8ad738ee\") " pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.628443 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.638080 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.638374 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.646046 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.646541 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2-client-ca\") pod \"route-controller-manager-6f75dd68cc-gcdzx\" (UID: \"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2\") " pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.654500 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.661080 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.663850 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d3992789-6f8b-4806-8ce0-261a7623ca46-proxy-ca-bundles\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.671390 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.679666 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.681759 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.685193 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5ae51d-d173-4531-8975-f164c975ce1f-srv-cert\") pod \"catalog-operator-857456c46-7f5wf\" (UID: \"8a5ae51d-d173-4531-8975-f164c975ce1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.686315 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.690978 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" Mar 20 15:27:30 crc kubenswrapper[3552]: W0320 15:27:30.693749 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f8aa612_9da0_4a2b_911e_6a1764a4e74e.slice/crio-b98bc433b1f7b72f2426a9796dfbbdb4144328cf20880dc509e6ba6055a570b4 WatchSource:0}: Error finding container b98bc433b1f7b72f2426a9796dfbbdb4144328cf20880dc509e6ba6055a570b4: Status 404 returned error can't find the container with id b98bc433b1f7b72f2426a9796dfbbdb4144328cf20880dc509e6ba6055a570b4 Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.696221 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console-operator"/"webhook-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.697779 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.703432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.703490 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.704740 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.712965 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-serving-cert\" (UniqueName: \"kubernetes.io/secret/59748b9b-c309-4712-aa85-bb38d71c4915-webhook-serving-cert\") pod \"console-conversion-webhook-595f9969b-l6z49\" (UID: \"59748b9b-c309-4712-aa85-bb38d71c4915\") " pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.713278 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.715363 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5d97\" (UniqueName: \"kubernetes.io/projected/63eb7413-02c3-4d6e-bb48-e5ffe5ce15be-kube-api-access-x5d97\") pod \"package-server-manager-84d578d794-jw7r2\" (UID: \"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be\") " pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.716386 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.717568 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.717882 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.725293 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b5c38ff-1fa8-4219-994d-15776acd4a4d-serving-cert\") pod \"etcd-operator-768d5b5d86-722mg\" (UID: \"0b5c38ff-1fa8-4219-994d-15776acd4a4d\") " pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.735626 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.735722 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.738370 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.756085 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.758889 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.765657 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-client\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.776265 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.796384 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.801995 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"console-8568c59db8-fspjn\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.807507 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.812567 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-serving-cert\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.820369 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.836760 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.842883 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.843822 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-audit-policies\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.844629 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5bacb25d-97b6-4491-8fb4-99feae1d802a-etcd-serving-ca\") pod \"apiserver-69c565c9b6-vbdpd\" (UID: \"5bacb25d-97b6-4491-8fb4-99feae1d802a\") " pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.856510 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.863406 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvtrv\" (UniqueName: \"kubernetes.io/projected/12e733dd-0939-4f1b-9cbb-13897e093787-kube-api-access-vvtrv\") pod \"csi-hostpathplugin-hvm8g\" (UID: \"12e733dd-0939-4f1b-9cbb-13897e093787\") " pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.876393 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.881702 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.884775 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx2h9\" (UniqueName: \"kubernetes.io/projected/43ae1c37-047b-4ee2-9fee-41e337dd4ac8-kube-api-access-lx2h9\") pod \"openshift-apiserver-operator-7c88c4c865-kn67m\" (UID: \"43ae1c37-047b-4ee2-9fee-41e337dd4ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.891384 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.896030 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-dwn4s" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.898447 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:27:30 crc kubenswrapper[3552]: W0320 15:27:30.910016 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34a48baf_1bee_4921_8bb2_9b7320e76f79.slice/crio-221b49cdb410185bdeac4f730133dd0fa4ff8788eb088296b2159cfe37221ff5 WatchSource:0}: Error finding container 221b49cdb410185bdeac4f730133dd0fa4ff8788eb088296b2159cfe37221ff5: Status 404 returned error can't find the container with id 221b49cdb410185bdeac4f730133dd0fa4ff8788eb088296b2159cfe37221ff5 Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.916427 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: W0320 15:27:30.930017 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d51f445_054a_4e4f_a67b_a828f5a32511.slice/crio-e1c81343f1802a9af989d03bd718418d51ff1658210877ea66e355cfa2e34a17 WatchSource:0}: Error finding container e1c81343f1802a9af989d03bd718418d51ff1658210877ea66e355cfa2e34a17: Status 404 returned error can't find the container with id e1c81343f1802a9af989d03bd718418d51ff1658210877ea66e355cfa2e34a17 Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.939698 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.959615 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.963110 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.970206 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.976253 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-79vsd" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.986501 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-cd974775-4nsv5" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.992631 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:27:30 crc kubenswrapper[3552]: I0320 15:27:30.996445 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-kpdvz" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.000573 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.019350 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-6sd5l" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.027334 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.027746 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.037697 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-9r4gl" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.040286 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.058729 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-twmwc" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.064015 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.076323 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.077273 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.105784 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-sv888" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.117526 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.119774 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.128563 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8bxr\" (UniqueName: \"kubernetes.io/projected/0f394926-bdb9-425c-b36e-264d7fd34550-kube-api-access-l8bxr\") pod \"openshift-controller-manager-operator-7978d7d7f6-2nt8z\" (UID: \"0f394926-bdb9-425c-b36e-264d7fd34550\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.137305 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.148442 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf4t2\" (UniqueName: \"kubernetes.io/projected/10603adc-d495-423c-9459-4caa405960bb-kube-api-access-nf4t2\") pod \"dns-operator-75f687757b-nz2xb\" (UID: \"10603adc-d495-423c-9459-4caa405960bb\") " pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.159713 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-ng44q" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.160839 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.176932 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.196155 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.200725 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-czvnk\" (UniqueName: \"kubernetes.io/projected/d3992789-6f8b-4806-8ce0-261a7623ca46-kube-api-access-czvnk\") pod \"controller-manager-7fdc5fd4dd-zdxlh\" (UID: \"d3992789-6f8b-4806-8ce0-261a7623ca46\") " pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.216256 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.233728 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc9fm\" (UniqueName: \"kubernetes.io/projected/bab054c9-6c83-40ee-896d-6459b22a6b4b-kube-api-access-sc9fm\") pod \"apiserver-6cdf967d79-ffdf8\" (UID: \"bab054c9-6c83-40ee-896d-6459b22a6b4b\") " pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.242526 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dcvj\" (UniqueName: \"kubernetes.io/projected/530553aa-0a1d-423e-8a22-f5eb4bdbb883-kube-api-access-8dcvj\") pod \"openshift-config-operator-77658b5b66-dq5sc\" (UID: \"530553aa-0a1d-423e-8a22-f5eb4bdbb883\") " pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.266239 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-58g82" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.274058 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.288897 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.307127 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"e1c81343f1802a9af989d03bd718418d51ff1658210877ea66e355cfa2e34a17"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.311227 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" event={"ID":"34a48baf-1bee-4921-8bb2-9b7320e76f79","Type":"ContainerStarted","Data":"221b49cdb410185bdeac4f730133dd0fa4ff8788eb088296b2159cfe37221ff5"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.315920 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" event={"ID":"297ab9b6-2186-4d5b-a952-2bfd59af63c4","Type":"ContainerStarted","Data":"500e938f4e9792db3accbfd784999f03985a335c967c54dc9c7aead5707b4c0c"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.315954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" event={"ID":"297ab9b6-2186-4d5b-a952-2bfd59af63c4","Type":"ContainerStarted","Data":"7bff2c2254ac982fcc69518770e1c5525216fdd47647a7b69903c3e74262b876"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.317155 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" event={"ID":"ed024e5d-8fc2-4c22-803d-73f3c9795f19","Type":"ContainerStarted","Data":"b153a63b71b0f03ce9577b73722e4fa65c373ae520488d2d9f69779495ec38d7"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.321339 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"fe9260e7daa40602dbdf7f0000bccf37bbec3c0ac3ffbcefab4b0807a35ee861"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.321365 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" event={"ID":"e9127708-ccfd-4891-8a3a-f0cacb77e0f4","Type":"ContainerStarted","Data":"83e6e23fd77cb12c7de7335bc2598e96aaf9427c9ab5496a43443aa9e549aca7"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.322015 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.322924 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" event={"ID":"4f8aa612-9da0-4a2b-911e-6a1764a4e74e","Type":"ContainerStarted","Data":"d203a0760c8c5e3ccf09ac979d205f5f791aded2fc930a7e73c4a0902ab839c7"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.322950 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" event={"ID":"4f8aa612-9da0-4a2b-911e-6a1764a4e74e","Type":"ContainerStarted","Data":"b98bc433b1f7b72f2426a9796dfbbdb4144328cf20880dc509e6ba6055a570b4"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.323665 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" event={"ID":"d0f40333-c860-4c04-8058-a0bf572dcf12","Type":"ContainerStarted","Data":"776069c24108ff97d3b7f3235dc69d3174817280d7e49da37c8451829427c064"} Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.324832 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.332263 3552 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.332319 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.340256 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:31 crc kubenswrapper[3552]: W0320 15:27:31.365266 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b5d722a_1123_4935_9740_52a08d018bc9.slice/crio-a67392208a15ed6ad38f880f70137de4bd94a07a47decc055a431e40dfe41cde WatchSource:0}: Error finding container a67392208a15ed6ad38f880f70137de4bd94a07a47decc055a431e40dfe41cde: Status 404 returned error can't find the container with id a67392208a15ed6ad38f880f70137de4bd94a07a47decc055a431e40dfe41cde Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.435669 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-r9fjc" Mar 20 15:27:31 crc kubenswrapper[3552]: I0320 15:27:31.442381 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:32 crc kubenswrapper[3552]: W0320 15:27:32.059051 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1620f19_8aa3_45cf_931b_7ae0e5cd14cf.slice/crio-177e7866ec358dcafe9975d5cb0be485377ea4f8df931b04d61b0920caa61c57 WatchSource:0}: Error finding container 177e7866ec358dcafe9975d5cb0be485377ea4f8df931b04d61b0920caa61c57: Status 404 returned error can't find the container with id 177e7866ec358dcafe9975d5cb0be485377ea4f8df931b04d61b0920caa61c57 Mar 20 15:27:32 crc kubenswrapper[3552]: W0320 15:27:32.061147 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5025cb4_ddb0_4107_88c1_bcbcdb779ac0.slice/crio-b7c6337436d6a8628fd2071332df32946e683a37d59e3e7d65fb937c505d079e WatchSource:0}: Error finding container b7c6337436d6a8628fd2071332df32946e683a37d59e3e7d65fb937c505d079e: Status 404 returned error can't find the container with id b7c6337436d6a8628fd2071332df32946e683a37d59e3e7d65fb937c505d079e Mar 20 15:27:32 crc kubenswrapper[3552]: W0320 15:27:32.062402 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebf09b15_4bb1_44bf_9d54_e76fad5cf76e.slice/crio-6a1e44bf46ff4506ed5934c37be10a10b33bbe76b84603f7987fdd9918207d8f WatchSource:0}: Error finding container 6a1e44bf46ff4506ed5934c37be10a10b33bbe76b84603f7987fdd9918207d8f: Status 404 returned error can't find the container with id 6a1e44bf46ff4506ed5934c37be10a10b33bbe76b84603f7987fdd9918207d8f Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.348368 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-6df6df6b6b-58shh" event={"ID":"297ab9b6-2186-4d5b-a952-2bfd59af63c4","Type":"ContainerStarted","Data":"f52e6dcd6cb07105f14ba5df8e91ead0a7e77a480b32a6d0861fa9cbf04ff174"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.351856 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerStarted","Data":"6209b77ca0764aa36ae3eefe413572f11ccc3796880b838454cdb21e3cae60ba"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.351877 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5d9b995f6b-fcgd7" event={"ID":"71af81a9-7d43-49b2-9287-c375900aa905","Type":"ContainerStarted","Data":"485e1e4ecb92a22420f424e72f49f6f1d1feaa3ad55f666dc5ec2423300270d9"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.365952 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-v54bt" event={"ID":"34a48baf-1bee-4921-8bb2-9b7320e76f79","Type":"ContainerStarted","Data":"6a615b43065e2fe8ee70e14bdc14730f61358bad2e7d87e06249312009c8ec0b"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.366090 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.367744 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" event={"ID":"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf","Type":"ContainerStarted","Data":"177e7866ec358dcafe9975d5cb0be485377ea4f8df931b04d61b0920caa61c57"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.378904 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" event={"ID":"a702c6d2-4dde-4077-ab8c-0f8df804bf7a","Type":"ContainerStarted","Data":"1a3695345bbf261e79037ac3825397787a7dbcff078e70a5e42c8addd5311963"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.378955 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" event={"ID":"a702c6d2-4dde-4077-ab8c-0f8df804bf7a","Type":"ContainerStarted","Data":"6778a6bab35664e060d3a0972f76a1674e5f6358a8df5e7ef4da8753d037865f"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.380229 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-78d54458c4-sc8h7" event={"ID":"ed024e5d-8fc2-4c22-803d-73f3c9795f19","Type":"ContainerStarted","Data":"ce07701b4c8a43bebe9b19ee32973604f6067031503f712c1f6ea50b0e0b921e"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.382849 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"fb415465a9a1ff924c4c807f00ce88dd80be6b5796e10869b670a7a0d29ed1dd"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.383623 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" event={"ID":"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0","Type":"ContainerStarted","Data":"b7c6337436d6a8628fd2071332df32946e683a37d59e3e7d65fb937c505d079e"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.385803 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5c5478f8c-vqvt7" event={"ID":"d0f40333-c860-4c04-8058-a0bf572dcf12","Type":"ContainerStarted","Data":"8649c4388be999ff84b3fb068407e6677e1782601f192d852877482608083113"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.392134 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-788b7c6b6c-ctdmb" event={"ID":"4f8aa612-9da0-4a2b-911e-6a1764a4e74e","Type":"ContainerStarted","Data":"f53fe8ce6581c2f5a36c344af5a9a9bf7bcc6f7e8c5411c09f0ad27b5730eab5"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.395059 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" event={"ID":"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e","Type":"ContainerStarted","Data":"6a1e44bf46ff4506ed5934c37be10a10b33bbe76b84603f7987fdd9918207d8f"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.398781 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" event={"ID":"0b5d722a-1123-4935-9740-52a08d018bc9","Type":"ContainerStarted","Data":"066202fd7806f7a8d7d6b89b848f240f8ddbf2edcf594a48e5756680b3fbd2f1"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.398824 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2vhcn" event={"ID":"0b5d722a-1123-4935-9740-52a08d018bc9","Type":"ContainerStarted","Data":"a67392208a15ed6ad38f880f70137de4bd94a07a47decc055a431e40dfe41cde"} Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.399796 3552 patch_prober.go:28] interesting pod/console-operator-5dbbc74dc9-cp5cd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Mar 20 15:27:32 crc kubenswrapper[3552]: I0320 15:27:32.399865 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" podUID="e9127708-ccfd-4891-8a3a-f0cacb77e0f4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.62:8443/readyz\": dial tcp 10.217.0.62:8443: connect: connection refused" Mar 20 15:27:32 crc kubenswrapper[3552]: W0320 15:27:32.532105 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45a8038e_e7f2_4d93_a6f5_7753aa54e63f.slice/crio-b382d64bec0434ac6bc01b6b953a5fcefd4844daefd898847b13cfd713a47cde WatchSource:0}: Error finding container b382d64bec0434ac6bc01b6b953a5fcefd4844daefd898847b13cfd713a47cde: Status 404 returned error can't find the container with id b382d64bec0434ac6bc01b6b953a5fcefd4844daefd898847b13cfd713a47cde Mar 20 15:27:32 crc kubenswrapper[3552]: W0320 15:27:32.689942 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod120b38dc_8236_4fa6_a452_642b8ad738ee.slice/crio-349bed48468a0db7f3ef09347340facce68090485284aacae5cde6802b57057d WatchSource:0}: Error finding container 349bed48468a0db7f3ef09347340facce68090485284aacae5cde6802b57057d: Status 404 returned error can't find the container with id 349bed48468a0db7f3ef09347340facce68090485284aacae5cde6802b57057d Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.018884 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b5c38ff_1fa8_4219_994d_15776acd4a4d.slice/crio-590d6b8ae03a9d2182d26c36c212e04d6c498f3ad403851fc5510a87d76868dc WatchSource:0}: Error finding container 590d6b8ae03a9d2182d26c36c212e04d6c498f3ad403851fc5510a87d76868dc: Status 404 returned error can't find the container with id 590d6b8ae03a9d2182d26c36c212e04d6c498f3ad403851fc5510a87d76868dc Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.022911 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f4dca86_e6ee_4ec9_8324_86aff960225e.slice/crio-c03c1dff4b5b81dd53f5fb641d4143b6c8e4e926af644360cde46e77ef64c4e7 WatchSource:0}: Error finding container c03c1dff4b5b81dd53f5fb641d4143b6c8e4e926af644360cde46e77ef64c4e7: Status 404 returned error can't find the container with id c03c1dff4b5b81dd53f5fb641d4143b6c8e4e926af644360cde46e77ef64c4e7 Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.040532 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd556935_a077_45df_ba3f_d42c39326ccd.slice/crio-a5a6d7b8c55def606529e50e02bd4d5437382c4320633157462adbee3cd23b00 WatchSource:0}: Error finding container a5a6d7b8c55def606529e50e02bd4d5437382c4320633157462adbee3cd23b00: Status 404 returned error can't find the container with id a5a6d7b8c55def606529e50e02bd4d5437382c4320633157462adbee3cd23b00 Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.154063 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6639609b_906b_4193_883e_ed1160aa5d50.slice/crio-0449f8bfb3afa130dcccf6498518b220784f6f6e77f03a4a2ab5dad084499396 WatchSource:0}: Error finding container 0449f8bfb3afa130dcccf6498518b220784f6f6e77f03a4a2ab5dad084499396: Status 404 returned error can't find the container with id 0449f8bfb3afa130dcccf6498518b220784f6f6e77f03a4a2ab5dad084499396 Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.156089 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5bacb25d_97b6_4491_8fb4_99feae1d802a.slice/crio-f206032a2dbd69f8aee263893a79f0766f1cb19d3717abf142a3936b62ae482a WatchSource:0}: Error finding container f206032a2dbd69f8aee263893a79f0766f1cb19d3717abf142a3936b62ae482a: Status 404 returned error can't find the container with id f206032a2dbd69f8aee263893a79f0766f1cb19d3717abf142a3936b62ae482a Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.162799 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a5ae51d_d173_4531_8975_f164c975ce1f.slice/crio-5179a9ca6ca219e18c69c7f331e64771bcfc36ee48e5424fb2ea48948eb4062b WatchSource:0}: Error finding container 5179a9ca6ca219e18c69c7f331e64771bcfc36ee48e5424fb2ea48948eb4062b: Status 404 returned error can't find the container with id 5179a9ca6ca219e18c69c7f331e64771bcfc36ee48e5424fb2ea48948eb4062b Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.165650 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd181ba2b_5c34_43f1_bd4d_ae4e3fb3f7c2.slice/crio-db3c19711be08ff7b1683007f3414da2a414b48030c1f0147b04e13f0e49775c WatchSource:0}: Error finding container db3c19711be08ff7b1683007f3414da2a414b48030c1f0147b04e13f0e49775c: Status 404 returned error can't find the container with id db3c19711be08ff7b1683007f3414da2a414b48030c1f0147b04e13f0e49775c Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.194510 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc782cf62_a827_4677_b3c2_6f82c5f09cbb.slice/crio-749c9bc0265111680d24335b66dc358ca6f7086afc779fe4992f8a1eb9ac2fbd WatchSource:0}: Error finding container 749c9bc0265111680d24335b66dc358ca6f7086afc779fe4992f8a1eb9ac2fbd: Status 404 returned error can't find the container with id 749c9bc0265111680d24335b66dc358ca6f7086afc779fe4992f8a1eb9ac2fbd Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.331341 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4092a9f8_5acc_4932_9e90_ef962eeb301a.slice/crio-8798f097f6508f33d2702cf0e2add82abdee68b2584853e0cabc76b4f67b7bfa WatchSource:0}: Error finding container 8798f097f6508f33d2702cf0e2add82abdee68b2584853e0cabc76b4f67b7bfa: Status 404 returned error can't find the container with id 8798f097f6508f33d2702cf0e2add82abdee68b2584853e0cabc76b4f67b7bfa Mar 20 15:27:33 crc kubenswrapper[3552]: W0320 15:27:33.338008 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbab054c9_6c83_40ee_896d_6459b22a6b4b.slice/crio-82982675077d37438861ba5a67849008605f12d3273fd92296aea2eb490787ac WatchSource:0}: Error finding container 82982675077d37438861ba5a67849008605f12d3273fd92296aea2eb490787ac: Status 404 returned error can't find the container with id 82982675077d37438861ba5a67849008605f12d3273fd92296aea2eb490787ac Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.407362 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" event={"ID":"0b5c38ff-1fa8-4219-994d-15776acd4a4d","Type":"ContainerStarted","Data":"590d6b8ae03a9d2182d26c36c212e04d6c498f3ad403851fc5510a87d76868dc"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.408230 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerStarted","Data":"39bfbef6cd1e072d5e2d4fac60cd7568a2a645cc2d76bcee06770b06158b36d3"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.409095 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerStarted","Data":"8798f097f6508f33d2702cf0e2add82abdee68b2584853e0cabc76b4f67b7bfa"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.409793 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" event={"ID":"10603adc-d495-423c-9459-4caa405960bb","Type":"ContainerStarted","Data":"ba5de94db08c69ba8be29c6dbe4bdf977c83b2de99d907eb291aebe4e641d47b"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.410507 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" event={"ID":"59748b9b-c309-4712-aa85-bb38d71c4915","Type":"ContainerStarted","Data":"40e2e9dc981912e79e81f9c708a1c233b635ada1d8f8a98b5bab33acc4f7e36a"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.411286 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" event={"ID":"5bacb25d-97b6-4491-8fb4-99feae1d802a","Type":"ContainerStarted","Data":"f206032a2dbd69f8aee263893a79f0766f1cb19d3717abf142a3936b62ae482a"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.411954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" event={"ID":"cf1a8966-f594-490a-9fbb-eec5bafd13d3","Type":"ContainerStarted","Data":"5b18a0170051b74f845add4d40b4af0b3c15402875d04a63397c67cf6085eb38"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.412678 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"4c23e7ad6a617ab132380b3926b678cc9fc869711dab98112316a0633ba99941"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.413744 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" event={"ID":"43ae1c37-047b-4ee2-9fee-41e337dd4ac8","Type":"ContainerStarted","Data":"3e97d2cd19d2518b42df04db262c7b0ce1a247631b490eac60f27657f6347984"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.414604 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" event={"ID":"45a8038e-e7f2-4d93-a6f5-7753aa54e63f","Type":"ContainerStarted","Data":"b382d64bec0434ac6bc01b6b953a5fcefd4844daefd898847b13cfd713a47cde"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.415376 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerStarted","Data":"c03c1dff4b5b81dd53f5fb641d4143b6c8e4e926af644360cde46e77ef64c4e7"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.416240 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"749c9bc0265111680d24335b66dc358ca6f7086afc779fe4992f8a1eb9ac2fbd"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.417500 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-cd974775-4nsv5" event={"ID":"6639609b-906b-4193-883e-ed1160aa5d50","Type":"ContainerStarted","Data":"0449f8bfb3afa130dcccf6498518b220784f6f6e77f03a4a2ab5dad084499396"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.425420 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"5f65b13a6d16902c689815b393c59261c72d393b80fe14f1a4feb31ab5a8520d"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.439316 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" event={"ID":"0f394926-bdb9-425c-b36e-264d7fd34550","Type":"ContainerStarted","Data":"31fc8c54ba0524bb8732beef75d75e3dcb78dc6170b3b11d2aa67b135f2cbd7a"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.439352 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" event={"ID":"8a5ae51d-d173-4531-8975-f164c975ce1f","Type":"ContainerStarted","Data":"5179a9ca6ca219e18c69c7f331e64771bcfc36ee48e5424fb2ea48948eb4062b"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.439364 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"1f4826d7319f214a4789dcbc1857374f2e76e059486a861642fd049e18aa6609"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.439376 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gbw49" event={"ID":"13045510-8717-4a71-ade4-be95a76440a7","Type":"ContainerStarted","Data":"982a9a18708278bcd2e6ec7546bc9bd3a3320be86cdcb791fab22fdcecaf8a5a"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.439385 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" event={"ID":"b61ce6b0-a70f-42b7-9435-3d6acba81ccf","Type":"ContainerStarted","Data":"dd3a8c7d0296d77414e73a7150dacd168a4ab86ced202337c6c2308f58725023"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.439394 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" event={"ID":"b54e8941-2fc4-432a-9e51-39684df9089e","Type":"ContainerStarted","Data":"750a12c674a4defe15c84a96c104e0327ae00452ee60a1f58ecba5cb2c47bba7"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.440061 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" event={"ID":"c085412c-b875-46c9-ae3e-e6b0d8067091","Type":"ContainerStarted","Data":"04be78180370e9436076fdede7cc43be74b382ed7249c7cba6e57e68bb201fc2"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.441183 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" event={"ID":"d3992789-6f8b-4806-8ce0-261a7623ca46","Type":"ContainerStarted","Data":"7e8805597cf1d99e2d944f222c6341d57164cce7e1d8c4c4e39b12de34d80093"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.442071 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8568c59db8-fspjn" event={"ID":"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4","Type":"ContainerStarted","Data":"14cc749e239ff43ea4823b0c7de8c695afe34d2ed2edfb0f26666f2e5acbcaeb"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.442831 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" event={"ID":"6d67253e-2acd-4bc1-8185-793587da4f17","Type":"ContainerStarted","Data":"a5eaf518d3d52e22b43e1df5176b9ece95eb6cc0b577e18b50e0e804e7bd8700"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.444393 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qdfr4" event={"ID":"a702c6d2-4dde-4077-ab8c-0f8df804bf7a","Type":"ContainerStarted","Data":"d72ade436cfd71e08bc35392047e2e8c3130e6e33e0abf5c0ba712dff895b157"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.445515 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" event={"ID":"120b38dc-8236-4fa6-a452-642b8ad738ee","Type":"ContainerStarted","Data":"349bed48468a0db7f3ef09347340facce68090485284aacae5cde6802b57057d"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.446325 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" event={"ID":"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be","Type":"ContainerStarted","Data":"2e517b320ff943fc5ae8d09124e401a62a37041c440c60dab9025235c6a32712"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.447538 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-6f6cb54958-rbddb" event={"ID":"c1620f19-8aa3-45cf-931b-7ae0e5cd14cf","Type":"ContainerStarted","Data":"dfa330e313ad125cc84d22502aa0b0b694fc2295c3ccaa8bc254eb9b0d94e6b9"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.448663 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"63c02329832da52f3fbb639aab68c7b7d4d969d6ee0b754d524bc582679a11da"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.451312 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7cc7ff75d5-g9qv8" event={"ID":"ebf09b15-4bb1-44bf-9d54-e76fad5cf76e","Type":"ContainerStarted","Data":"0b35108251823c652c721361bc50e28ebdb5f1b5ec0191f68d470712b4726410"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.452481 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" event={"ID":"bd556935-a077-45df-ba3f-d42c39326ccd","Type":"ContainerStarted","Data":"a5a6d7b8c55def606529e50e02bd4d5437382c4320633157462adbee3cd23b00"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.453812 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" event={"ID":"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0","Type":"ContainerStarted","Data":"39f9acbc47634867d1113acfd5ccdb389803ab302db960af29b0c674cbe57b37"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.454794 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" event={"ID":"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7","Type":"ContainerStarted","Data":"cb5e0d885686906030375d6654caf247163b802720e38159b7d8c2c9d977e13c"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.456334 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"03077dc47178df7e113e85e3a763e13e8e3da4f69de673ba1518f2474c567ac6"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.457086 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerStarted","Data":"82982675077d37438861ba5a67849008605f12d3273fd92296aea2eb490787ac"} Mar 20 15:27:33 crc kubenswrapper[3552]: I0320 15:27:33.458115 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" event={"ID":"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2","Type":"ContainerStarted","Data":"db3c19711be08ff7b1683007f3414da2a414b48030c1f0147b04e13f0e49775c"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.463047 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"158ebf6231252413c153fb422d2108fce0bd38d9a86e808152e4e6b790793ac7"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.463949 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.465574 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8568c59db8-fspjn" event={"ID":"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4","Type":"ContainerStarted","Data":"c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.466500 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.466565 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.468328 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-f7c6d88df-q2fnv" event={"ID":"cf1a8966-f594-490a-9fbb-eec5bafd13d3","Type":"ContainerStarted","Data":"f4d26b6d9640a163efa142762a86940bab2919d1b95d084300d0928d657b14c2"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.469858 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-546b4f8984-pwccz" event={"ID":"6d67253e-2acd-4bc1-8185-793587da4f17","Type":"ContainerStarted","Data":"c1d65e3b3d39f5a428298b46b16dccbec9b8a9f6ade5df4920b790589db614d6"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.471571 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-649bd778b4-tt5tw" event={"ID":"45a8038e-e7f2-4d93-a6f5-7753aa54e63f","Type":"ContainerStarted","Data":"a3c0b9e3e25f581ce38f227bc47f6578b186266d84be74330b921576fa292517"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.473056 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" event={"ID":"59748b9b-c309-4712-aa85-bb38d71c4915","Type":"ContainerStarted","Data":"e98102abcecaa6617089b293277b1b0d9c4ea901730edb73179d2a2a8014fa0b"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.474733 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" event={"ID":"b61ce6b0-a70f-42b7-9435-3d6acba81ccf","Type":"ContainerStarted","Data":"dfc01ed2a07bbdbdac9335fd20b19ba0ef1e085eadfc606e2a2efb1a3cd8a214"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.477394 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-7769bd8d7d-q5cvv" event={"ID":"b54e8941-2fc4-432a-9e51-39684df9089e","Type":"ContainerStarted","Data":"74734bdf92ea3155c53a6ff0036845942276c318896122f160cd5276242355fc"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.480973 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-686c6c748c-qbnnr" event={"ID":"9ae0dfbb-a0a9-45bb-85b5-cd9f94f64fe7","Type":"ContainerStarted","Data":"27e3d393d777be640af1cb62aedffcfd3a5e3aed1821d5672a7796e5c7959d11"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.482554 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gbw49" event={"ID":"13045510-8717-4a71-ade4-be95a76440a7","Type":"ContainerStarted","Data":"8b634c2b40bec7645b2f9dbabf2f6b9d7c3df0f27dbc06cb2cfbc16617c496c3"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.484948 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" event={"ID":"120b38dc-8236-4fa6-a452-642b8ad738ee","Type":"ContainerStarted","Data":"e8e0683f0c072831c39c1a2245ce16debfa139e6fcc17458b43f8c1b07c37acb"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.487570 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" event={"ID":"c085412c-b875-46c9-ae3e-e6b0d8067091","Type":"ContainerStarted","Data":"372f0c7ae3a44af63861fc6f212e78432692a137ba456ec9086ebadec2cb0b53"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.489604 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerStarted","Data":"e35516117959c0657c61f75c60ac454960dcc82cd799256400999ce94780240a"} Mar 20 15:27:34 crc kubenswrapper[3552]: I0320 15:27:34.497730 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"c2add79b2751a95f6c7ec1a5492a0a8b06d2cf5c4a5424b5f0c8b4d52bd4500c"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.504007 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" event={"ID":"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be","Type":"ContainerStarted","Data":"c93240fa61e6fc1b2a742e92ffa81468ee0359521cd8edcef4809ec5387249c1"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.506069 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-6c7c885997-4hbbc" event={"ID":"d5025cb4-ddb0-4107-88c1-bcbcdb779ac0","Type":"ContainerStarted","Data":"2a588c99bd085298a495810cf4f82f5f22db610e4da55f2c74094d39eb320fd6"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.510528 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"574244fb16c86fc27409b71d67c1f0aeef5067e458959f85fa45ea92b8bbe893"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.513430 3552 generic.go:334] "Generic (PLEG): container finished" podID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerID="c254c8b2c434ccd5fc3ef2ac73790220547d3b1e71055271f5f863bcf6a77e81" exitCode=0 Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.513873 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerDied","Data":"c254c8b2c434ccd5fc3ef2ac73790220547d3b1e71055271f5f863bcf6a77e81"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.515637 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-7c88c4c865-kn67m" event={"ID":"43ae1c37-047b-4ee2-9fee-41e337dd4ac8","Type":"ContainerStarted","Data":"d00545115689436953193e94d5642d7e6c14b5dc070e9817ac335e37ad47a4d7"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.523917 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-76788bff89-wkjgm" event={"ID":"120b38dc-8236-4fa6-a452-642b8ad738ee","Type":"ContainerStarted","Data":"c752b6867ae03a1872800649345a6a9205b615d777f9be5bd3027490851f106a"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.524697 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.534809 3552 generic.go:334] "Generic (PLEG): container finished" podID="5bacb25d-97b6-4491-8fb4-99feae1d802a" containerID="01618e7c3d1a1d01954a25b626b98a25bbcf5d026ba621fb289f9693a05c3767" exitCode=0 Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.534925 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" event={"ID":"5bacb25d-97b6-4491-8fb4-99feae1d802a","Type":"ContainerDied","Data":"01618e7c3d1a1d01954a25b626b98a25bbcf5d026ba621fb289f9693a05c3767"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.537571 3552 generic.go:334] "Generic (PLEG): container finished" podID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerID="757f8272c42551695b53a563f965627a08838529003952870cd8e43497fd8ea0" exitCode=0 Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.537664 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerDied","Data":"757f8272c42551695b53a563f965627a08838529003952870cd8e43497fd8ea0"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.544721 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"01cb00638ae6b7144230e8bae9bb132a2a2b814a05cf0fea434944013284ebe1"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.548369 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-768d5b5d86-722mg" event={"ID":"0b5c38ff-1fa8-4219-994d-15776acd4a4d","Type":"ContainerStarted","Data":"423b9e1a667048bdab0a65522317a2e7cf00d422530da08523d59e704a8c12fb"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.549976 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" event={"ID":"d3992789-6f8b-4806-8ce0-261a7623ca46","Type":"ContainerStarted","Data":"2e09ae0711659fbc5277afe25948c9d0ec383c7d14ccc151d1326589d3b5a465"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.552141 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" event={"ID":"bd556935-a077-45df-ba3f-d42c39326ccd","Type":"ContainerStarted","Data":"246f34396f40967958890cfaef510b3bf638af2592f8ed3b3ad9b3fdaf9c1f91"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.552352 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.555082 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-7978d7d7f6-2nt8z" event={"ID":"0f394926-bdb9-425c-b36e-264d7fd34550","Type":"ContainerStarted","Data":"87f6356cc6b32edeaf8dc51403e055eb9332fa9739c44c6923cd6291d918eda4"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.557461 3552 generic.go:334] "Generic (PLEG): container finished" podID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerID="1f6624c4aa1b883723037d797a94571511aa13ccc973dae56696329e0a7b69d2" exitCode=0 Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.557519 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerDied","Data":"1f6624c4aa1b883723037d797a94571511aa13ccc973dae56696329e0a7b69d2"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.559306 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" event={"ID":"d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2","Type":"ContainerStarted","Data":"8825de27bc336a5e0e1fa99271fbbf513adb8c8b8e276ecab6a0663991b54817"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.569970 3552 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.570062 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.579830 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" event={"ID":"8a5ae51d-d173-4531-8975-f164c975ce1f","Type":"ContainerStarted","Data":"4be34350b6d9cf6125f2b47dba5a31d5c4ec51983486c723fc925136145bcec6"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.580248 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.593629 3552 generic.go:334] "Generic (PLEG): container finished" podID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerID="2be3b68f1cfd5334102db663408c9e56a62f2cbc20ee8c4eefcd01d7a9f0d52c" exitCode=0 Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.594064 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerDied","Data":"2be3b68f1cfd5334102db663408c9e56a62f2cbc20ee8c4eefcd01d7a9f0d52c"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.599132 3552 generic.go:334] "Generic (PLEG): container finished" podID="bab054c9-6c83-40ee-896d-6459b22a6b4b" containerID="22a37da645022cda2a23b492d5216b224b23f84accfe4d671ce53249244cb9ae" exitCode=0 Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.599190 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerDied","Data":"22a37da645022cda2a23b492d5216b224b23f84accfe4d671ce53249244cb9ae"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.600086 3552 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.600115 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.606979 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gbw49" event={"ID":"13045510-8717-4a71-ade4-be95a76440a7","Type":"ContainerStarted","Data":"c4000b5aec98622f1e087e297063eae4199c6402648aaadd1a3b1bde20cd6cf3"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.609585 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-cd974775-4nsv5" event={"ID":"6639609b-906b-4193-883e-ed1160aa5d50","Type":"ContainerStarted","Data":"d1894a855f3562b6e3088d88357bb3bb4c91357e34c5cd4ac46b1c49bae3d9d7"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.628521 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" event={"ID":"10603adc-d495-423c-9459-4caa405960bb","Type":"ContainerStarted","Data":"ea26156e817aee13b3ad57470e650225b859c3304e29a1f60d7d744959900b4b"} Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.629165 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.629217 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.632108 3552 patch_prober.go:28] interesting pod/olm-operator-6d8474f75f-x54mh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.632163 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.632222 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.632307 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.640976 3552 patch_prober.go:28] interesting pod/oauth-openshift-6499cf79cf-qdfbh container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" start-of-body= Mar 20 15:27:35 crc kubenswrapper[3552]: I0320 15:27:35.641044 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.631521 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerStarted","Data":"9e14643f007c63b14790fb91b8548a7d2e7141f48930710ca9a5019f2c8a80af"} Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.633006 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-75f687757b-nz2xb" event={"ID":"10603adc-d495-423c-9459-4caa405960bb","Type":"ContainerStarted","Data":"e390f0f8dff62f362cd5c9bf991409b99a75a6fbe11f2b33c1fdc8f5e65f29b0"} Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.639545 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" event={"ID":"63eb7413-02c3-4d6e-bb48-e5ffe5ce15be","Type":"ContainerStarted","Data":"d535173f736e0b361d535ff17b0be00979d39a5210c006f3408d4fa46e9fb9f6"} Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.639579 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640556 3552 patch_prober.go:28] interesting pod/oauth-openshift-6499cf79cf-qdfbh container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" start-of-body= Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640605 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" podUID="b61ce6b0-a70f-42b7-9435-3d6acba81ccf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.72:6443/healthz\": dial tcp 10.217.0.72:6443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640611 3552 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640638 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640668 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640941 3552 patch_prober.go:28] interesting pod/route-controller-manager-6f75dd68cc-gcdzx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.87:8443/healthz\": dial tcp 10.217.0.87:8443: connect: connection refused" start-of-body= Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.640974 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.87:8443/healthz\": dial tcp 10.217.0.87:8443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.641064 3552 patch_prober.go:28] interesting pod/olm-operator-6d8474f75f-x54mh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.641179 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" podUID="c085412c-b875-46c9-ae3e-e6b0d8067091" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.641384 3552 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.641442 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.642046 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.642470 3552 patch_prober.go:28] interesting pod/controller-manager-7fdc5fd4dd-zdxlh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" start-of-body= Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.642497 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" Mar 20 15:27:36 crc kubenswrapper[3552]: I0320 15:27:36.707688 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.644570 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" event={"ID":"5bacb25d-97b6-4491-8fb4-99feae1d802a","Type":"ContainerStarted","Data":"f521d6f90894b18bd5747cdea9062cca5d50712aed14b63f78a8f1dce6f4b1cd"} Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.646797 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-bc474d5d6-wshwg" event={"ID":"f728c15e-d8de-4a9a-a3ea-fdcead95cb91","Type":"ContainerStarted","Data":"4df5905442bd80bfa4d35f35714e8d9f61af095ed7d3f6c89fc45f853f8fad49"} Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.650306 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" event={"ID":"bab054c9-6c83-40ee-896d-6459b22a6b4b","Type":"ContainerStarted","Data":"335739ad1839e8b59f30ecb134ad28d5d1ef8280f1b887e38e5f2fcf4304fea2"} Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.652012 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"d617c04c5f4cfe84b884613fe829578314ef242b5b0cb8d7ff3f546aa607cf3f"} Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.653338 3552 generic.go:334] "Generic (PLEG): container finished" podID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerID="01cb00638ae6b7144230e8bae9bb132a2a2b814a05cf0fea434944013284ebe1" exitCode=0 Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.654659 3552 patch_prober.go:28] interesting pod/route-controller-manager-6f75dd68cc-gcdzx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.87:8443/healthz\": dial tcp 10.217.0.87:8443: connect: connection refused" start-of-body= Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.654705 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" podUID="d181ba2b-5c34-43f1-bd4d-ae4e3fb3f7c2" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.87:8443/healthz\": dial tcp 10.217.0.87:8443: connect: connection refused" Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.654921 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerDied","Data":"01cb00638ae6b7144230e8bae9bb132a2a2b814a05cf0fea434944013284ebe1"} Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.655287 3552 patch_prober.go:28] interesting pod/controller-manager-7fdc5fd4dd-zdxlh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" start-of-body= Mar 20 15:27:37 crc kubenswrapper[3552]: I0320 15:27:37.655317 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" Mar 20 15:27:38 crc kubenswrapper[3552]: I0320 15:27:38.660088 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" event={"ID":"530553aa-0a1d-423e-8a22-f5eb4bdbb883","Type":"ContainerStarted","Data":"040802e9cd068519fee3fffed4e8a43dae4e00a9b27eddf16030e08cc3b112b9"} Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.341453 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.524717 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-5dbbc74dc9-cp5cd" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.672410 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.685202 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.714119 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.714177 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.714191 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.714254 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.844170 3552 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.844240 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.844235 3552 patch_prober.go:28] interesting pod/packageserver-8464bcc55b-sjnqz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.844283 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" podUID="bd556935-a077-45df-ba3f-d42c39326ccd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.887069 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6d8474f75f-x54mh" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.892672 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.901373 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-conversion-webhook-595f9969b-l6z49" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.964992 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.965040 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.970974 3552 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.971057 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.971194 3552 patch_prober.go:28] interesting pod/apiserver-69c565c9b6-vbdpd container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.971194 3552 patch_prober.go:28] interesting pod/catalog-operator-857456c46-7f5wf container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.971223 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" podUID="5bacb25d-97b6-4491-8fb4-99feae1d802a" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Mar 20 15:27:40 crc kubenswrapper[3552]: I0320 15:27:40.971249 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" podUID="8a5ae51d-d173-4531-8975-f164c975ce1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.162185 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.162548 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.170170 3552 patch_prober.go:28] interesting pod/console-8568c59db8-fspjn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.170285 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.60:8443/health\": dial tcp 10.217.0.60:8443: connect: connection refused" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.277030 3552 patch_prober.go:28] interesting pod/controller-manager-7fdc5fd4dd-zdxlh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" start-of-body= Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.277146 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" podUID="d3992789-6f8b-4806-8ce0-261a7623ca46" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.76:8443/healthz\": dial tcp 10.217.0.76:8443: connect: connection refused" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.286010 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6f75dd68cc-gcdzx" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.310041 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6499cf79cf-qdfbh" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.443294 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.447230 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.458638 3552 patch_prober.go:28] interesting pod/apiserver-6cdf967d79-ffdf8 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.73:8443/healthz\": dial tcp 10.217.0.73:8443: connect: connection refused" start-of-body= Mar 20 15:27:41 crc kubenswrapper[3552]: I0320 15:27:41.458749 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.73:8443/healthz\": dial tcp 10.217.0.73:8443: connect: connection refused" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.223541 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wdqxz"] Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.223653 3552 topology_manager.go:215] "Topology Admit Handler" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" podNamespace="openshift-marketplace" podName="community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: E0320 15:27:42.223831 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23dd33fe-d710-4638-9e0e-72cb27cb3e84" containerName="collect-profiles" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.223843 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="23dd33fe-d710-4638-9e0e-72cb27cb3e84" containerName="collect-profiles" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.223975 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="23dd33fe-d710-4638-9e0e-72cb27cb3e84" containerName="collect-profiles" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.224762 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.227689 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wdqxz"] Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.323907 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-utilities\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.324329 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45jwc\" (UniqueName: \"kubernetes.io/projected/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-kube-api-access-45jwc\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.324357 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-catalog-content\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.396142 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cpfdl"] Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.396237 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" podNamespace="openshift-marketplace" podName="certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.399680 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.414901 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpfdl"] Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.425418 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-45jwc\" (UniqueName: \"kubernetes.io/projected/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-kube-api-access-45jwc\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.425482 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-catalog-content\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.425571 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-utilities\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.425974 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-utilities\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.426707 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-catalog-content\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.451702 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-45jwc\" (UniqueName: \"kubernetes.io/projected/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-kube-api-access-45jwc\") pod \"community-operators-wdqxz\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.527025 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-catalog-content\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.527136 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-utilities\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.527186 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdtnw\" (UniqueName: \"kubernetes.io/projected/b76f3002-ca0c-473a-a5ec-e66774cab758-kube-api-access-kdtnw\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.586698 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.627728 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kdtnw\" (UniqueName: \"kubernetes.io/projected/b76f3002-ca0c-473a-a5ec-e66774cab758-kube-api-access-kdtnw\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.627856 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-catalog-content\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.627905 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-utilities\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.628365 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-catalog-content\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.628380 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-utilities\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.644288 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdtnw\" (UniqueName: \"kubernetes.io/projected/b76f3002-ca0c-473a-a5ec-e66774cab758-kube-api-access-kdtnw\") pod \"certified-operators-cpfdl\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.678697 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"914ff5645dc19b60dd54272e3e422f7775b886cc2e93d7cb7b55df0ed619ba94"} Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.761678 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.778455 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:27:42 crc kubenswrapper[3552]: I0320 15:27:42.778566 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.201042 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m447z"] Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.201390 3552 topology_manager.go:215] "Topology Admit Handler" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" podNamespace="openshift-marketplace" podName="redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.202633 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.211630 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m447z"] Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.236331 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-utilities\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.236578 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-catalog-content\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.236706 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng9bg\" (UniqueName: \"kubernetes.io/projected/df2e1272-092c-448f-80f0-bcdf036b7090-kube-api-access-ng9bg\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.337759 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-utilities\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.337807 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-catalog-content\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.337845 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ng9bg\" (UniqueName: \"kubernetes.io/projected/df2e1272-092c-448f-80f0-bcdf036b7090-kube-api-access-ng9bg\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.339158 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-utilities\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.339519 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-catalog-content\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.343222 3552 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.343293 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.343236 3552 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.343377 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.355758 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng9bg\" (UniqueName: \"kubernetes.io/projected/df2e1272-092c-448f-80f0-bcdf036b7090-kube-api-access-ng9bg\") pod \"redhat-marketplace-m447z\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.513025 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpfdl"] Mar 20 15:27:43 crc kubenswrapper[3552]: W0320 15:27:43.520822 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb76f3002_ca0c_473a_a5ec_e66774cab758.slice/crio-9306ad93b404df63d909985f44b00dd0de0283e49eda0bd793536cf44bafb9d1 WatchSource:0}: Error finding container 9306ad93b404df63d909985f44b00dd0de0283e49eda0bd793536cf44bafb9d1: Status 404 returned error can't find the container with id 9306ad93b404df63d909985f44b00dd0de0283e49eda0bd793536cf44bafb9d1 Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.522509 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.685074 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpfdl" event={"ID":"b76f3002-ca0c-473a-a5ec-e66774cab758","Type":"ContainerStarted","Data":"9306ad93b404df63d909985f44b00dd0de0283e49eda0bd793536cf44bafb9d1"} Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.695158 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wdqxz"] Mar 20 15:27:43 crc kubenswrapper[3552]: I0320 15:27:43.720481 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m447z"] Mar 20 15:27:43 crc kubenswrapper[3552]: W0320 15:27:43.898214 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf2e1272_092c_448f_80f0_bcdf036b7090.slice/crio-8e52d85efd282a1f01ac46f2ce97f045143292b57b4b2693c5899755c8a477ec WatchSource:0}: Error finding container 8e52d85efd282a1f01ac46f2ce97f045143292b57b4b2693c5899755c8a477ec: Status 404 returned error can't find the container with id 8e52d85efd282a1f01ac46f2ce97f045143292b57b4b2693c5899755c8a477ec Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.203766 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gwfln"] Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.204310 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" podNamespace="openshift-marketplace" podName="redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.206399 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.221788 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gwfln"] Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.257810 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-utilities\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.257862 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-catalog-content\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.257883 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlg5m\" (UniqueName: \"kubernetes.io/projected/2be257e1-1750-4219-a32a-ad9c63b419d5-kube-api-access-mlg5m\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.359010 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-utilities\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.359078 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-catalog-content\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.359109 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mlg5m\" (UniqueName: \"kubernetes.io/projected/2be257e1-1750-4219-a32a-ad9c63b419d5-kube-api-access-mlg5m\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.519975 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-catalog-content\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.520620 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-utilities\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.526983 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlg5m\" (UniqueName: \"kubernetes.io/projected/2be257e1-1750-4219-a32a-ad9c63b419d5-kube-api-access-mlg5m\") pod \"redhat-operators-gwfln\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.571168 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.693657 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wdqxz" event={"ID":"00c35ca6-08f9-4b39-8c2d-8aa044fd5935","Type":"ContainerStarted","Data":"a29d0045b708366a9a0df99faeade8804461a2cb2f5e60f6b999fbc27bc91e27"} Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.695108 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerStarted","Data":"8e52d85efd282a1f01ac46f2ce97f045143292b57b4b2693c5899755c8a477ec"} Mar 20 15:27:44 crc kubenswrapper[3552]: I0320 15:27:44.772147 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gwfln"] Mar 20 15:27:45 crc kubenswrapper[3552]: I0320 15:27:45.701455 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwfln" event={"ID":"2be257e1-1750-4219-a32a-ad9c63b419d5","Type":"ContainerStarted","Data":"762cfb48f5bf278d82a396c427671c952f5895a7241d3dd4763607d649166fb9"} Mar 20 15:27:45 crc kubenswrapper[3552]: I0320 15:27:45.707975 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-gbw49" Mar 20 15:27:45 crc kubenswrapper[3552]: I0320 15:27:45.973000 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:45 crc kubenswrapper[3552]: I0320 15:27:45.980254 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-69c565c9b6-vbdpd" Mar 20 15:27:46 crc kubenswrapper[3552]: I0320 15:27:46.656443 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" Mar 20 15:27:46 crc kubenswrapper[3552]: I0320 15:27:46.732114 3552 generic.go:334] "Generic (PLEG): container finished" podID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerID="388a47a9f20c6834736217548c0c10f2b01c1cd1df6bb793860a3f6d859bc257" exitCode=0 Mar 20 15:27:46 crc kubenswrapper[3552]: I0320 15:27:46.732180 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpfdl" event={"ID":"b76f3002-ca0c-473a-a5ec-e66774cab758","Type":"ContainerDied","Data":"388a47a9f20c6834736217548c0c10f2b01c1cd1df6bb793860a3f6d859bc257"} Mar 20 15:27:46 crc kubenswrapper[3552]: I0320 15:27:46.736801 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wdqxz" event={"ID":"00c35ca6-08f9-4b39-8c2d-8aa044fd5935","Type":"ContainerStarted","Data":"02908bcde9587d5ab036e00ce05e06862119a97bfd45aa1db1ab1f36c52d7578"} Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.619488 3552 patch_prober.go:28] interesting pod/apiserver-6cdf967d79-ffdf8 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]log ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]etcd ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/generic-apiserver-start-informers ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/max-in-flight-filter ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/storage-object-count-tracker-hook ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/image.openshift.io-apiserver-caches ok Mar 20 15:27:47 crc kubenswrapper[3552]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Mar 20 15:27:47 crc kubenswrapper[3552]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/project.openshift.io-projectcache ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/openshift.io-startinformers ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/openshift.io-restmapperupdater ok Mar 20 15:27:47 crc kubenswrapper[3552]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Mar 20 15:27:47 crc kubenswrapper[3552]: healthz check failed Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.619573 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" podUID="bab054c9-6c83-40ee-896d-6459b22a6b4b" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.748161 3552 generic.go:334] "Generic (PLEG): container finished" podID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerID="02908bcde9587d5ab036e00ce05e06862119a97bfd45aa1db1ab1f36c52d7578" exitCode=0 Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.748215 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wdqxz" event={"ID":"00c35ca6-08f9-4b39-8c2d-8aa044fd5935","Type":"ContainerDied","Data":"02908bcde9587d5ab036e00ce05e06862119a97bfd45aa1db1ab1f36c52d7578"} Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.752979 3552 generic.go:334] "Generic (PLEG): container finished" podID="df2e1272-092c-448f-80f0-bcdf036b7090" containerID="f96cdaa1843c7a2ba988d93c1a00c0c601b7609c00baae0a99fa5a16fe3e02a5" exitCode=0 Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.753141 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerDied","Data":"f96cdaa1843c7a2ba988d93c1a00c0c601b7609c00baae0a99fa5a16fe3e02a5"} Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.757769 3552 generic.go:334] "Generic (PLEG): container finished" podID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerID="5be9bd9063932ea78f92060d56c558a67d02ed36e501bd4c316f6a1fff6fb87a" exitCode=0 Mar 20 15:27:47 crc kubenswrapper[3552]: I0320 15:27:47.757842 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwfln" event={"ID":"2be257e1-1750-4219-a32a-ad9c63b419d5","Type":"ContainerDied","Data":"5be9bd9063932ea78f92060d56c558a67d02ed36e501bd4c316f6a1fff6fb87a"} Mar 20 15:27:48 crc kubenswrapper[3552]: I0320 15:27:48.776537 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"13e1e55fb76ce4f5e4509ddf3c438387c423cffc520e50a7e8a7e76e1fca7e06"} Mar 20 15:27:49 crc kubenswrapper[3552]: I0320 15:27:49.376003 3552 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Mar 20 15:27:49 crc kubenswrapper[3552]: I0320 15:27:49.498619 3552 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-03-20T15:27:49.376300541Z","Handler":null,"Name":""} Mar 20 15:27:49 crc kubenswrapper[3552]: I0320 15:27:49.505280 3552 csi_plugin.go:99] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Mar 20 15:27:49 crc kubenswrapper[3552]: I0320 15:27:49.505315 3552 csi_plugin.go:112] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Mar 20 15:27:50 crc kubenswrapper[3552]: I0320 15:27:50.714706 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:27:50 crc kubenswrapper[3552]: I0320 15:27:50.714795 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:27:50 crc kubenswrapper[3552]: I0320 15:27:50.714814 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:27:50 crc kubenswrapper[3552]: I0320 15:27:50.714875 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:27:50 crc kubenswrapper[3552]: I0320 15:27:50.848361 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-8464bcc55b-sjnqz" Mar 20 15:27:50 crc kubenswrapper[3552]: I0320 15:27:50.973461 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-857456c46-7f5wf" Mar 20 15:27:51 crc kubenswrapper[3552]: I0320 15:27:51.165793 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:51 crc kubenswrapper[3552]: I0320 15:27:51.169369 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:27:51 crc kubenswrapper[3552]: I0320 15:27:51.378248 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7fdc5fd4dd-zdxlh" Mar 20 15:27:51 crc kubenswrapper[3552]: I0320 15:27:51.457032 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:51 crc kubenswrapper[3552]: I0320 15:27:51.473800 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-6cdf967d79-ffdf8" Mar 20 15:27:57 crc kubenswrapper[3552]: I0320 15:27:57.829532 3552 generic.go:334] "Generic (PLEG): container finished" podID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerID="d617c04c5f4cfe84b884613fe829578314ef242b5b0cb8d7ff3f546aa607cf3f" exitCode=0 Mar 20 15:27:57 crc kubenswrapper[3552]: I0320 15:27:57.829637 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerDied","Data":"d617c04c5f4cfe84b884613fe829578314ef242b5b0cb8d7ff3f546aa607cf3f"} Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.714254 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.714590 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.714627 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.715961 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"158ebf6231252413c153fb422d2108fce0bd38d9a86e808152e4e6b790793ac7"} pod="openshift-console/downloads-65476884b9-9wcvx" containerMessage="Container download-server failed liveness probe, will be restarted" Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.716018 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" containerID="cri-o://158ebf6231252413c153fb422d2108fce0bd38d9a86e808152e4e6b790793ac7" gracePeriod=2 Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.716145 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.716177 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.716481 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:00 crc kubenswrapper[3552]: I0320 15:28:00.716562 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.268478 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.268547 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.268592 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.268620 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.268659 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.848533 3552 generic.go:334] "Generic (PLEG): container finished" podID="6268b7fe-8910-4505-b404-6f1df638105c" containerID="158ebf6231252413c153fb422d2108fce0bd38d9a86e808152e4e6b790793ac7" exitCode=0 Mar 20 15:28:01 crc kubenswrapper[3552]: I0320 15:28:01.848582 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerDied","Data":"158ebf6231252413c153fb422d2108fce0bd38d9a86e808152e4e6b790793ac7"} Mar 20 15:28:10 crc kubenswrapper[3552]: I0320 15:28:10.622469 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-v54bt" Mar 20 15:28:10 crc kubenswrapper[3552]: I0320 15:28:10.714140 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:10 crc kubenswrapper[3552]: I0320 15:28:10.714339 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:10 crc kubenswrapper[3552]: I0320 15:28:10.897962 3552 generic.go:334] "Generic (PLEG): container finished" podID="aa90b3c2-febd-4588-a063-7fbbe82f00c1" containerID="f072010141e0432a82d0cd5bce4ef78ec3ed40c5f8bb481a2055e25005db596d" exitCode=0 Mar 20 15:28:10 crc kubenswrapper[3552]: I0320 15:28:10.898008 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerDied","Data":"f072010141e0432a82d0cd5bce4ef78ec3ed40c5f8bb481a2055e25005db596d"} Mar 20 15:28:10 crc kubenswrapper[3552]: I0320 15:28:10.996896 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-84d578d794-jw7r2" Mar 20 15:28:12 crc kubenswrapper[3552]: I0320 15:28:12.778354 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:28:12 crc kubenswrapper[3552]: I0320 15:28:12.778577 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:28:20 crc kubenswrapper[3552]: I0320 15:28:20.714110 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:20 crc kubenswrapper[3552]: I0320 15:28:20.714723 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.987423 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerStarted","Data":"71a64d97722562a28eb102fd15aa034e65b73d80baa70b5c41248bfde3f71a4b"} Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.989705 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"ea4457836d47d189e1810c352d74c1ad213d2572c9f459d56103e7b3231fe826"} Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.991590 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-65476884b9-9wcvx" event={"ID":"6268b7fe-8910-4505-b404-6f1df638105c","Type":"ContainerStarted","Data":"9f8d5ec455b325d1337e3e726655d95f83af59e36b22d2619a578dcb3d949539"} Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.992018 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.992247 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.992324 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.993891 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" event={"ID":"aa90b3c2-febd-4588-a063-7fbbe82f00c1","Type":"ContainerStarted","Data":"10ce291a964eb87c91dc0bb356ed5d0a76e169c0552777ffcdd4de7bcee893c0"} Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.995806 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerStarted","Data":"e77eafab78f1a15d5605fa3bd75a5093d1f6c9b8001b4168ef7d3752b52ad43c"} Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.997496 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerStarted","Data":"9acb7495105f93f492f6322f6afff3ccc17fbcb145e5aa1d828542efe7aea273"} Mar 20 15:28:27 crc kubenswrapper[3552]: I0320 15:28:27.999731 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpfdl" event={"ID":"b76f3002-ca0c-473a-a5ec-e66774cab758","Type":"ContainerStarted","Data":"f052b221e6b942a4ce3ca804e694168debcc4ebdf3fe8e6ca06a1be1d29c6068"} Mar 20 15:28:28 crc kubenswrapper[3552]: I0320 15:28:28.001834 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wdqxz" event={"ID":"00c35ca6-08f9-4b39-8c2d-8aa044fd5935","Type":"ContainerStarted","Data":"c09d9535642f668280cf8be637e16c3f1f9f19f0f257bd3562b922fdfd4b36da"} Mar 20 15:28:28 crc kubenswrapper[3552]: I0320 15:28:28.004153 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerStarted","Data":"8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e"} Mar 20 15:28:28 crc kubenswrapper[3552]: I0320 15:28:28.006192 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwfln" event={"ID":"2be257e1-1750-4219-a32a-ad9c63b419d5","Type":"ContainerStarted","Data":"6a5625ab56f0de99ec377d453a41b736b8ed5b7e565ae0cb2bc440171981cce1"} Mar 20 15:28:28 crc kubenswrapper[3552]: I0320 15:28:28.527323 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:28:28 crc kubenswrapper[3552]: I0320 15:28:28.530519 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:28:29 crc kubenswrapper[3552]: I0320 15:28:29.011936 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerStarted","Data":"c38b422941aa22592610a0f47c436846eb1fda56485cb0fb70554db44e2a1903"} Mar 20 15:28:29 crc kubenswrapper[3552]: I0320 15:28:29.014550 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hvm8g" event={"ID":"12e733dd-0939-4f1b-9cbb-13897e093787","Type":"ContainerStarted","Data":"f07cddf8032f0be96dd319030552eb729db5ea2d204bf1a0e35dc36ee2a6b227"} Mar 20 15:28:29 crc kubenswrapper[3552]: I0320 15:28:29.014743 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:28:29 crc kubenswrapper[3552]: I0320 15:28:29.015521 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:29 crc kubenswrapper[3552]: I0320 15:28:29.015565 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:29 crc kubenswrapper[3552]: I0320 15:28:29.017898 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5c9bf7bc58-6jctv" Mar 20 15:28:30 crc kubenswrapper[3552]: I0320 15:28:30.021972 3552 generic.go:334] "Generic (PLEG): container finished" podID="df2e1272-092c-448f-80f0-bcdf036b7090" containerID="71a64d97722562a28eb102fd15aa034e65b73d80baa70b5c41248bfde3f71a4b" exitCode=0 Mar 20 15:28:30 crc kubenswrapper[3552]: I0320 15:28:30.022034 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerDied","Data":"71a64d97722562a28eb102fd15aa034e65b73d80baa70b5c41248bfde3f71a4b"} Mar 20 15:28:30 crc kubenswrapper[3552]: I0320 15:28:30.714785 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:30 crc kubenswrapper[3552]: I0320 15:28:30.714877 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:30 crc kubenswrapper[3552]: I0320 15:28:30.714912 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:30 crc kubenswrapper[3552]: I0320 15:28:30.714998 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:31 crc kubenswrapper[3552]: I0320 15:28:31.001324 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:28:31 crc kubenswrapper[3552]: I0320 15:28:31.001370 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:28:31 crc kubenswrapper[3552]: I0320 15:28:31.635313 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:28:32 crc kubenswrapper[3552]: I0320 15:28:32.035383 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerStarted","Data":"d836ee04c95bc14b5ae516b3365043620064a6e7e35a91ac0f12be1384e1a95c"} Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.041690 3552 generic.go:334] "Generic (PLEG): container finished" podID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerID="9acb7495105f93f492f6322f6afff3ccc17fbcb145e5aa1d828542efe7aea273" exitCode=0 Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.041780 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerDied","Data":"9acb7495105f93f492f6322f6afff3ccc17fbcb145e5aa1d828542efe7aea273"} Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.061806 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m447z" podStartSLOduration=7.45540934 podStartE2EDuration="50.061752718s" podCreationTimestamp="2026-03-20 15:27:43 +0000 UTC" firstStartedPulling="2026-03-20 15:27:47.754636474 +0000 UTC m=+167.448333304" lastFinishedPulling="2026-03-20 15:28:30.360979842 +0000 UTC m=+210.054676682" observedRunningTime="2026-03-20 15:28:33.056418563 +0000 UTC m=+212.750115403" watchObservedRunningTime="2026-03-20 15:28:33.061752718 +0000 UTC m=+212.755449558" Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.523566 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.523628 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.622070 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.956325 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7287f"] Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.968784 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpfdl"] Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.969144 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cpfdl" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerName="extract-content" containerID="cri-o://f052b221e6b942a4ce3ca804e694168debcc4ebdf3fe8e6ca06a1be1d29c6068" gracePeriod=30 Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.978865 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8jhz6"] Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.979252 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8jhz6" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="extract-content" containerID="cri-o://e77eafab78f1a15d5605fa3bd75a5093d1f6c9b8001b4168ef7d3752b52ad43c" gracePeriod=30 Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.998577 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wdqxz"] Mar 20 15:28:33 crc kubenswrapper[3552]: I0320 15:28:33.999216 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wdqxz" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerName="extract-content" containerID="cri-o://c09d9535642f668280cf8be637e16c3f1f9f19f0f257bd3562b922fdfd4b36da" gracePeriod=30 Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.011344 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-8b455464d-f9xdt"] Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.011625 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" containerID="cri-o://e35516117959c0657c61f75c60ac454960dcc82cd799256400999ce94780240a" gracePeriod=30 Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.025686 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8s8pc"] Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.025954 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" containerID="cri-o://8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e" gracePeriod=30 Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.036154 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m447z"] Mar 20 15:28:34 crc kubenswrapper[3552]: E0320 15:28:34.036759 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e" cmd=["grpc_health_probe","-addr=:50051"] Mar 20 15:28:34 crc kubenswrapper[3552]: E0320 15:28:34.038185 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e" cmd=["grpc_health_probe","-addr=:50051"] Mar 20 15:28:34 crc kubenswrapper[3552]: E0320 15:28:34.040365 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e" cmd=["grpc_health_probe","-addr=:50051"] Mar 20 15:28:34 crc kubenswrapper[3552]: E0320 15:28:34.040428 3552 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-8s8pc" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.042242 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-8b455464d-tb8k4"] Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.042336 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0836a98f-28f3-4a11-9409-4cb07a04c016" podNamespace="openshift-marketplace" podName="marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.043183 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.047361 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-b4zbk" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.048831 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f4jkp"] Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.048980 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-f4jkp" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="extract-content" containerID="cri-o://c38b422941aa22592610a0f47c436846eb1fda56485cb0fb70554db44e2a1903" gracePeriod=30 Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.058137 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gwfln"] Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.058483 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gwfln" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerName="extract-content" containerID="cri-o://6a5625ab56f0de99ec377d453a41b736b8ed5b7e565ae0cb2bc440171981cce1" gracePeriod=30 Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.065996 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-8b455464d-tb8k4"] Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.205268 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0836a98f-28f3-4a11-9409-4cb07a04c016-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.205491 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0836a98f-28f3-4a11-9409-4cb07a04c016-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.205664 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltgtj\" (UniqueName: \"kubernetes.io/projected/0836a98f-28f3-4a11-9409-4cb07a04c016-kube-api-access-ltgtj\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.306704 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ltgtj\" (UniqueName: \"kubernetes.io/projected/0836a98f-28f3-4a11-9409-4cb07a04c016-kube-api-access-ltgtj\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.307466 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0836a98f-28f3-4a11-9409-4cb07a04c016-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.307737 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0836a98f-28f3-4a11-9409-4cb07a04c016-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.309775 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0836a98f-28f3-4a11-9409-4cb07a04c016-marketplace-trusted-ca\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.314432 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0836a98f-28f3-4a11-9409-4cb07a04c016-marketplace-operator-metrics\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.328177 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltgtj\" (UniqueName: \"kubernetes.io/projected/0836a98f-28f3-4a11-9409-4cb07a04c016-kube-api-access-ltgtj\") pod \"marketplace-operator-8b455464d-tb8k4\" (UID: \"0836a98f-28f3-4a11-9409-4cb07a04c016\") " pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.415842 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:34 crc kubenswrapper[3552]: I0320 15:28:34.927248 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-8b455464d-tb8k4"] Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.054919 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" event={"ID":"0836a98f-28f3-4a11-9409-4cb07a04c016","Type":"ContainerStarted","Data":"4ac6755a5db9079bf11242d59075742270baa69cd54f04f43d0a52479a6a8ba5"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.056846 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wdqxz_00c35ca6-08f9-4b39-8c2d-8aa044fd5935/extract-content/0.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.057419 3552 generic.go:334] "Generic (PLEG): container finished" podID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerID="c09d9535642f668280cf8be637e16c3f1f9f19f0f257bd3562b922fdfd4b36da" exitCode=2 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.057475 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wdqxz" event={"ID":"00c35ca6-08f9-4b39-8c2d-8aa044fd5935","Type":"ContainerDied","Data":"c09d9535642f668280cf8be637e16c3f1f9f19f0f257bd3562b922fdfd4b36da"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.059293 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8jhz6_3f4dca86-e6ee-4ec9-8324-86aff960225e/extract-content/1.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.059701 3552 generic.go:334] "Generic (PLEG): container finished" podID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerID="e77eafab78f1a15d5605fa3bd75a5093d1f6c9b8001b4168ef7d3752b52ad43c" exitCode=2 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.059752 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerDied","Data":"e77eafab78f1a15d5605fa3bd75a5093d1f6c9b8001b4168ef7d3752b52ad43c"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.061252 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-f4jkp_4092a9f8-5acc-4932-9e90-ef962eeb301a/extract-content/1.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.061813 3552 generic.go:334] "Generic (PLEG): container finished" podID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerID="c38b422941aa22592610a0f47c436846eb1fda56485cb0fb70554db44e2a1903" exitCode=2 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.061864 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerDied","Data":"c38b422941aa22592610a0f47c436846eb1fda56485cb0fb70554db44e2a1903"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.063299 3552 generic.go:334] "Generic (PLEG): container finished" podID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerID="e35516117959c0657c61f75c60ac454960dcc82cd799256400999ce94780240a" exitCode=0 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.063348 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerDied","Data":"e35516117959c0657c61f75c60ac454960dcc82cd799256400999ce94780240a"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.066091 3552 generic.go:334] "Generic (PLEG): container finished" podID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerID="8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e" exitCode=0 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.066140 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerDied","Data":"8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.068160 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwfln_2be257e1-1750-4219-a32a-ad9c63b419d5/extract-content/0.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.068577 3552 generic.go:334] "Generic (PLEG): container finished" podID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerID="6a5625ab56f0de99ec377d453a41b736b8ed5b7e565ae0cb2bc440171981cce1" exitCode=2 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.068622 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwfln" event={"ID":"2be257e1-1750-4219-a32a-ad9c63b419d5","Type":"ContainerDied","Data":"6a5625ab56f0de99ec377d453a41b736b8ed5b7e565ae0cb2bc440171981cce1"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.071338 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerStarted","Data":"d5f606ccd861f92887a73ef2bfcf22b93b0b6c6c8e287aa4c1dbd65cf5853925"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.071546 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7287f" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="registry-server" containerID="cri-o://d5f606ccd861f92887a73ef2bfcf22b93b0b6c6c8e287aa4c1dbd65cf5853925" gracePeriod=30 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.074051 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cpfdl_b76f3002-ca0c-473a-a5ec-e66774cab758/extract-content/0.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.074499 3552 generic.go:334] "Generic (PLEG): container finished" podID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerID="f052b221e6b942a4ce3ca804e694168debcc4ebdf3fe8e6ca06a1be1d29c6068" exitCode=2 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.074670 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-m447z" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="registry-server" containerID="cri-o://d836ee04c95bc14b5ae516b3365043620064a6e7e35a91ac0f12be1384e1a95c" gracePeriod=30 Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.074876 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpfdl" event={"ID":"b76f3002-ca0c-473a-a5ec-e66774cab758","Type":"ContainerDied","Data":"f052b221e6b942a4ce3ca804e694168debcc4ebdf3fe8e6ca06a1be1d29c6068"} Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.418853 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwfln_2be257e1-1750-4219-a32a-ad9c63b419d5/extract-content/0.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.419566 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.479986 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cpfdl_b76f3002-ca0c-473a-a5ec-e66774cab758/extract-content/0.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.480327 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.526060 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-catalog-content\") pod \"2be257e1-1750-4219-a32a-ad9c63b419d5\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.526158 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlg5m\" (UniqueName: \"kubernetes.io/projected/2be257e1-1750-4219-a32a-ad9c63b419d5-kube-api-access-mlg5m\") pod \"2be257e1-1750-4219-a32a-ad9c63b419d5\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.526255 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-utilities\") pod \"2be257e1-1750-4219-a32a-ad9c63b419d5\" (UID: \"2be257e1-1750-4219-a32a-ad9c63b419d5\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.527144 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-utilities" (OuterVolumeSpecName: "utilities") pod "2be257e1-1750-4219-a32a-ad9c63b419d5" (UID: "2be257e1-1750-4219-a32a-ad9c63b419d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.530789 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2be257e1-1750-4219-a32a-ad9c63b419d5-kube-api-access-mlg5m" (OuterVolumeSpecName: "kube-api-access-mlg5m") pod "2be257e1-1750-4219-a32a-ad9c63b419d5" (UID: "2be257e1-1750-4219-a32a-ad9c63b419d5"). InnerVolumeSpecName "kube-api-access-mlg5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.561247 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.566926 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.571142 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8jhz6_3f4dca86-e6ee-4ec9-8324-86aff960225e/extract-content/1.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.571472 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.574672 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-f4jkp_4092a9f8-5acc-4932-9e90-ef962eeb301a/extract-content/1.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.575013 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.581201 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wdqxz_00c35ca6-08f9-4b39-8c2d-8aa044fd5935/extract-content/0.log" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.585018 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.627179 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-utilities\") pod \"b76f3002-ca0c-473a-a5ec-e66774cab758\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.627245 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdtnw\" (UniqueName: \"kubernetes.io/projected/b76f3002-ca0c-473a-a5ec-e66774cab758-kube-api-access-kdtnw\") pod \"b76f3002-ca0c-473a-a5ec-e66774cab758\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.627358 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-catalog-content\") pod \"b76f3002-ca0c-473a-a5ec-e66774cab758\" (UID: \"b76f3002-ca0c-473a-a5ec-e66774cab758\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.627618 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mlg5m\" (UniqueName: \"kubernetes.io/projected/2be257e1-1750-4219-a32a-ad9c63b419d5-kube-api-access-mlg5m\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.627644 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.627928 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-utilities" (OuterVolumeSpecName: "utilities") pod "b76f3002-ca0c-473a-a5ec-e66774cab758" (UID: "b76f3002-ca0c-473a-a5ec-e66774cab758"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.630322 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b76f3002-ca0c-473a-a5ec-e66774cab758-kube-api-access-kdtnw" (OuterVolumeSpecName: "kube-api-access-kdtnw") pod "b76f3002-ca0c-473a-a5ec-e66774cab758" (UID: "b76f3002-ca0c-473a-a5ec-e66774cab758"). InnerVolumeSpecName "kube-api-access-kdtnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728667 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-catalog-content\") pod \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728738 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") pod \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728786 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities\") pod \"4092a9f8-5acc-4932-9e90-ef962eeb301a\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728820 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content\") pod \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728851 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45jwc\" (UniqueName: \"kubernetes.io/projected/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-kube-api-access-45jwc\") pod \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728878 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content\") pod \"3f4dca86-e6ee-4ec9-8324-86aff960225e\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728910 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities\") pod \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.728982 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") pod \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729005 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") pod \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\" (UID: \"3482be94-0cdb-4e2a-889b-e5fac59fdbf5\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729045 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-utilities\") pod \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\" (UID: \"00c35ca6-08f9-4b39-8c2d-8aa044fd5935\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729064 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities\") pod \"3f4dca86-e6ee-4ec9-8324-86aff960225e\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729088 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content\") pod \"4092a9f8-5acc-4932-9e90-ef962eeb301a\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729123 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") pod \"3f4dca86-e6ee-4ec9-8324-86aff960225e\" (UID: \"3f4dca86-e6ee-4ec9-8324-86aff960225e\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729153 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") pod \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\" (UID: \"c782cf62-a827-4677-b3c2-6f82c5f09cbb\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729204 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") pod \"4092a9f8-5acc-4932-9e90-ef962eeb301a\" (UID: \"4092a9f8-5acc-4932-9e90-ef962eeb301a\") " Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729395 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.729444 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kdtnw\" (UniqueName: \"kubernetes.io/projected/b76f3002-ca0c-473a-a5ec-e66774cab758-kube-api-access-kdtnw\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.730116 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities" (OuterVolumeSpecName: "utilities") pod "3f4dca86-e6ee-4ec9-8324-86aff960225e" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.730872 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities" (OuterVolumeSpecName: "utilities") pod "4092a9f8-5acc-4932-9e90-ef962eeb301a" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.731207 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities" (OuterVolumeSpecName: "utilities") pod "c782cf62-a827-4677-b3c2-6f82c5f09cbb" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.731918 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "3482be94-0cdb-4e2a-889b-e5fac59fdbf5" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.732328 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-kube-api-access-45jwc" (OuterVolumeSpecName: "kube-api-access-45jwc") pod "00c35ca6-08f9-4b39-8c2d-8aa044fd5935" (UID: "00c35ca6-08f9-4b39-8c2d-8aa044fd5935"). InnerVolumeSpecName "kube-api-access-45jwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.734738 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-utilities" (OuterVolumeSpecName: "utilities") pod "00c35ca6-08f9-4b39-8c2d-8aa044fd5935" (UID: "00c35ca6-08f9-4b39-8c2d-8aa044fd5935"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.736987 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r" (OuterVolumeSpecName: "kube-api-access-tf29r") pod "c782cf62-a827-4677-b3c2-6f82c5f09cbb" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb"). InnerVolumeSpecName "kube-api-access-tf29r". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.737078 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg" (OuterVolumeSpecName: "kube-api-access-rg2zg") pod "3482be94-0cdb-4e2a-889b-e5fac59fdbf5" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5"). InnerVolumeSpecName "kube-api-access-rg2zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.737128 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt" (OuterVolumeSpecName: "kube-api-access-n6sqt") pod "3f4dca86-e6ee-4ec9-8324-86aff960225e" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e"). InnerVolumeSpecName "kube-api-access-n6sqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.738683 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb" (OuterVolumeSpecName: "kube-api-access-ptdrb") pod "4092a9f8-5acc-4932-9e90-ef962eeb301a" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a"). InnerVolumeSpecName "kube-api-access-ptdrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.745307 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "3482be94-0cdb-4e2a-889b-e5fac59fdbf5" (UID: "3482be94-0cdb-4e2a-889b-e5fac59fdbf5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830592 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830639 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830655 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-n6sqt\" (UniqueName: \"kubernetes.io/projected/3f4dca86-e6ee-4ec9-8324-86aff960225e-kube-api-access-n6sqt\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830669 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-tf29r\" (UniqueName: \"kubernetes.io/projected/c782cf62-a827-4677-b3c2-6f82c5f09cbb-kube-api-access-tf29r\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830683 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-ptdrb\" (UniqueName: \"kubernetes.io/projected/4092a9f8-5acc-4932-9e90-ef962eeb301a-kube-api-access-ptdrb\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830699 3552 reconciler_common.go:300] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830712 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830726 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-45jwc\" (UniqueName: \"kubernetes.io/projected/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-kube-api-access-45jwc\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830739 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830754 3552 reconciler_common.go:300] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.830766 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rg2zg\" (UniqueName: \"kubernetes.io/projected/3482be94-0cdb-4e2a-889b-e5fac59fdbf5-kube-api-access-rg2zg\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.875180 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c782cf62-a827-4677-b3c2-6f82c5f09cbb" (UID: "c782cf62-a827-4677-b3c2-6f82c5f09cbb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:35 crc kubenswrapper[3552]: I0320 15:28:35.932012 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c782cf62-a827-4677-b3c2-6f82c5f09cbb-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.090971 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wdqxz_00c35ca6-08f9-4b39-8c2d-8aa044fd5935/extract-content/0.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.092329 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wdqxz" event={"ID":"00c35ca6-08f9-4b39-8c2d-8aa044fd5935","Type":"ContainerDied","Data":"a29d0045b708366a9a0df99faeade8804461a2cb2f5e60f6b999fbc27bc91e27"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.092378 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wdqxz" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.092389 3552 scope.go:117] "RemoveContainer" containerID="c09d9535642f668280cf8be637e16c3f1f9f19f0f257bd3562b922fdfd4b36da" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.097670 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8jhz6_3f4dca86-e6ee-4ec9-8324-86aff960225e/extract-content/1.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.097944 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jhz6" event={"ID":"3f4dca86-e6ee-4ec9-8324-86aff960225e","Type":"ContainerDied","Data":"c03c1dff4b5b81dd53f5fb641d4143b6c8e4e926af644360cde46e77ef64c4e7"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.097997 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jhz6" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.101943 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-f4jkp_4092a9f8-5acc-4932-9e90-ef962eeb301a/extract-content/1.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.102431 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f4jkp" event={"ID":"4092a9f8-5acc-4932-9e90-ef962eeb301a","Type":"ContainerDied","Data":"8798f097f6508f33d2702cf0e2add82abdee68b2584853e0cabc76b4f67b7bfa"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.102495 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f4jkp" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.120210 3552 generic.go:334] "Generic (PLEG): container finished" podID="df2e1272-092c-448f-80f0-bcdf036b7090" containerID="d836ee04c95bc14b5ae516b3365043620064a6e7e35a91ac0f12be1384e1a95c" exitCode=0 Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.120390 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerDied","Data":"d836ee04c95bc14b5ae516b3365043620064a6e7e35a91ac0f12be1384e1a95c"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.125249 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8s8pc" event={"ID":"c782cf62-a827-4677-b3c2-6f82c5f09cbb","Type":"ContainerDied","Data":"749c9bc0265111680d24335b66dc358ca6f7086afc779fe4992f8a1eb9ac2fbd"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.125301 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8s8pc" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.126608 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwfln_2be257e1-1750-4219-a32a-ad9c63b419d5/extract-content/0.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.127314 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwfln" event={"ID":"2be257e1-1750-4219-a32a-ad9c63b419d5","Type":"ContainerDied","Data":"762cfb48f5bf278d82a396c427671c952f5895a7241d3dd4763607d649166fb9"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.127376 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwfln" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.128732 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7287f_887d596e-c519-4bfa-af90-3edd9e1b2f0f/registry-server/1.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.129159 3552 generic.go:334] "Generic (PLEG): container finished" podID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerID="d5f606ccd861f92887a73ef2bfcf22b93b0b6c6c8e287aa4c1dbd65cf5853925" exitCode=2 Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.129194 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerDied","Data":"d5f606ccd861f92887a73ef2bfcf22b93b0b6c6c8e287aa4c1dbd65cf5853925"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.130477 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" event={"ID":"3482be94-0cdb-4e2a-889b-e5fac59fdbf5","Type":"ContainerDied","Data":"63c02329832da52f3fbb639aab68c7b7d4d969d6ee0b754d524bc582679a11da"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.130519 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-8b455464d-f9xdt" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.141686 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" event={"ID":"0836a98f-28f3-4a11-9409-4cb07a04c016","Type":"ContainerStarted","Data":"9ef501c5ce169a8fd9f4d239cb3758712f38670fca1d2a13bac034ab519822b2"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.142519 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.144174 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cpfdl_b76f3002-ca0c-473a-a5ec-e66774cab758/extract-content/0.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.144468 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpfdl" event={"ID":"b76f3002-ca0c-473a-a5ec-e66774cab758","Type":"ContainerDied","Data":"9306ad93b404df63d909985f44b00dd0de0283e49eda0bd793536cf44bafb9d1"} Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.144520 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpfdl" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.146617 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.164619 3552 scope.go:117] "RemoveContainer" containerID="02908bcde9587d5ab036e00ce05e06862119a97bfd45aa1db1ab1f36c52d7578" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.192665 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-8b455464d-f9xdt"] Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.193917 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-8b455464d-f9xdt"] Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.202822 3552 scope.go:117] "RemoveContainer" containerID="e77eafab78f1a15d5605fa3bd75a5093d1f6c9b8001b4168ef7d3752b52ad43c" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.243228 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8s8pc"] Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.254850 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8s8pc"] Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.260596 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7287f_887d596e-c519-4bfa-af90-3edd9e1b2f0f/registry-server/1.log" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.261983 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.271754 3552 scope.go:117] "RemoveContainer" containerID="757f8272c42551695b53a563f965627a08838529003952870cd8e43497fd8ea0" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.274301 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-8b455464d-tb8k4" podStartSLOduration=2.274257317 podStartE2EDuration="2.274257317s" podCreationTimestamp="2026-03-20 15:28:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:28:36.266535656 +0000 UTC m=+215.960232486" watchObservedRunningTime="2026-03-20 15:28:36.274257317 +0000 UTC m=+215.967954147" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.313749 3552 scope.go:117] "RemoveContainer" containerID="c38b422941aa22592610a0f47c436846eb1fda56485cb0fb70554db44e2a1903" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.336759 3552 scope.go:117] "RemoveContainer" containerID="1f6624c4aa1b883723037d797a94571511aa13ccc973dae56696329e0a7b69d2" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.356575 3552 scope.go:117] "RemoveContainer" containerID="8af159d6374dd7f35b9cafa19ea71fcdd25e60ad24117211f22e51ff8cc0771e" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.377070 3552 scope.go:117] "RemoveContainer" containerID="d617c04c5f4cfe84b884613fe829578314ef242b5b0cb8d7ff3f546aa607cf3f" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.400877 3552 scope.go:117] "RemoveContainer" containerID="c254c8b2c434ccd5fc3ef2ac73790220547d3b1e71055271f5f863bcf6a77e81" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.429246 3552 scope.go:117] "RemoveContainer" containerID="6a5625ab56f0de99ec377d453a41b736b8ed5b7e565ae0cb2bc440171981cce1" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.438568 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities\") pod \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.438624 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") pod \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.438705 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content\") pod \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\" (UID: \"887d596e-c519-4bfa-af90-3edd9e1b2f0f\") " Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.446584 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities" (OuterVolumeSpecName: "utilities") pod "887d596e-c519-4bfa-af90-3edd9e1b2f0f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.454617 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5" (OuterVolumeSpecName: "kube-api-access-ncrf5") pod "887d596e-c519-4bfa-af90-3edd9e1b2f0f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f"). InnerVolumeSpecName "kube-api-access-ncrf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.473269 3552 scope.go:117] "RemoveContainer" containerID="5be9bd9063932ea78f92060d56c558a67d02ed36e501bd4c316f6a1fff6fb87a" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.504849 3552 scope.go:117] "RemoveContainer" containerID="e35516117959c0657c61f75c60ac454960dcc82cd799256400999ce94780240a" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.534646 3552 scope.go:117] "RemoveContainer" containerID="f052b221e6b942a4ce3ca804e694168debcc4ebdf3fe8e6ca06a1be1d29c6068" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.540046 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.540097 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-ncrf5\" (UniqueName: \"kubernetes.io/projected/887d596e-c519-4bfa-af90-3edd9e1b2f0f-kube-api-access-ncrf5\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:36 crc kubenswrapper[3552]: I0320 15:28:36.558575 3552 scope.go:117] "RemoveContainer" containerID="388a47a9f20c6834736217548c0c10f2b01c1cd1df6bb793860a3f6d859bc257" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.152025 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7287f_887d596e-c519-4bfa-af90-3edd9e1b2f0f/registry-server/1.log" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.153209 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7287f" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.153243 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7287f" event={"ID":"887d596e-c519-4bfa-af90-3edd9e1b2f0f","Type":"ContainerDied","Data":"39bfbef6cd1e072d5e2d4fac60cd7568a2a645cc2d76bcee06770b06158b36d3"} Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.153303 3552 scope.go:117] "RemoveContainer" containerID="d5f606ccd861f92887a73ef2bfcf22b93b0b6c6c8e287aa4c1dbd65cf5853925" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.198222 3552 scope.go:117] "RemoveContainer" containerID="9acb7495105f93f492f6322f6afff3ccc17fbcb145e5aa1d828542efe7aea273" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.238173 3552 scope.go:117] "RemoveContainer" containerID="2be3b68f1cfd5334102db663408c9e56a62f2cbc20ee8c4eefcd01d7a9f0d52c" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.441568 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" path="/var/lib/kubelet/pods/3482be94-0cdb-4e2a-889b-e5fac59fdbf5/volumes" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.442471 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" path="/var/lib/kubelet/pods/c782cf62-a827-4677-b3c2-6f82c5f09cbb/volumes" Mar 20 15:28:37 crc kubenswrapper[3552]: I0320 15:28:37.993083 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.160444 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ng9bg\" (UniqueName: \"kubernetes.io/projected/df2e1272-092c-448f-80f0-bcdf036b7090-kube-api-access-ng9bg\") pod \"df2e1272-092c-448f-80f0-bcdf036b7090\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.160559 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-catalog-content\") pod \"df2e1272-092c-448f-80f0-bcdf036b7090\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.160794 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-utilities\") pod \"df2e1272-092c-448f-80f0-bcdf036b7090\" (UID: \"df2e1272-092c-448f-80f0-bcdf036b7090\") " Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.161765 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-utilities" (OuterVolumeSpecName: "utilities") pod "df2e1272-092c-448f-80f0-bcdf036b7090" (UID: "df2e1272-092c-448f-80f0-bcdf036b7090"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.162144 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.165364 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df2e1272-092c-448f-80f0-bcdf036b7090-kube-api-access-ng9bg" (OuterVolumeSpecName: "kube-api-access-ng9bg") pod "df2e1272-092c-448f-80f0-bcdf036b7090" (UID: "df2e1272-092c-448f-80f0-bcdf036b7090"). InnerVolumeSpecName "kube-api-access-ng9bg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.177508 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m447z" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.177592 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m447z" event={"ID":"df2e1272-092c-448f-80f0-bcdf036b7090","Type":"ContainerDied","Data":"8e52d85efd282a1f01ac46f2ce97f045143292b57b4b2693c5899755c8a477ec"} Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.177681 3552 scope.go:117] "RemoveContainer" containerID="d836ee04c95bc14b5ae516b3365043620064a6e7e35a91ac0f12be1384e1a95c" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.206883 3552 scope.go:117] "RemoveContainer" containerID="71a64d97722562a28eb102fd15aa034e65b73d80baa70b5c41248bfde3f71a4b" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.236979 3552 scope.go:117] "RemoveContainer" containerID="f96cdaa1843c7a2ba988d93c1a00c0c601b7609c00baae0a99fa5a16fe3e02a5" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.263246 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-ng9bg\" (UniqueName: \"kubernetes.io/projected/df2e1272-092c-448f-80f0-bcdf036b7090-kube-api-access-ng9bg\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.833771 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bdhkj"] Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.833889 3552 topology_manager.go:215] "Topology Admit Handler" podUID="70ff60e6-4747-4506-9cf4-913661b8e689" podNamespace="openshift-marketplace" podName="redhat-marketplace-bdhkj" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834026 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834040 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834053 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834063 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834077 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834085 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834100 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834111 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834126 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834136 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834149 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834158 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834169 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834177 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834209 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834222 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834235 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834243 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834256 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834265 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834277 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834285 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834297 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834305 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834316 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834324 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834336 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834345 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834357 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834367 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834378 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834386 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834430 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834438 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834449 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834458 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="extract-utilities" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834470 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834479 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: E0320 15:28:38.834489 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834497 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834603 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834614 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834625 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3482be94-0cdb-4e2a-889b-e5fac59fdbf5" containerName="marketplace-operator" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834636 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c782cf62-a827-4677-b3c2-6f82c5f09cbb" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834647 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" containerName="registry-server" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834657 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834676 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834688 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.834699 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" containerName="extract-content" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.835796 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.848348 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdhkj"] Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.980063 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ff60e6-4747-4506-9cf4-913661b8e689-utilities\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.980242 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frn66\" (UniqueName: \"kubernetes.io/projected/70ff60e6-4747-4506-9cf4-913661b8e689-kube-api-access-frn66\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:38 crc kubenswrapper[3552]: I0320 15:28:38.980380 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ff60e6-4747-4506-9cf4-913661b8e689-catalog-content\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.081601 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ff60e6-4747-4506-9cf4-913661b8e689-utilities\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.081672 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-frn66\" (UniqueName: \"kubernetes.io/projected/70ff60e6-4747-4506-9cf4-913661b8e689-kube-api-access-frn66\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.081706 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ff60e6-4747-4506-9cf4-913661b8e689-catalog-content\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.082254 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70ff60e6-4747-4506-9cf4-913661b8e689-catalog-content\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.082327 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70ff60e6-4747-4506-9cf4-913661b8e689-utilities\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.099741 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-frn66\" (UniqueName: \"kubernetes.io/projected/70ff60e6-4747-4506-9cf4-913661b8e689-kube-api-access-frn66\") pod \"redhat-marketplace-bdhkj\" (UID: \"70ff60e6-4747-4506-9cf4-913661b8e689\") " pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.157687 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:39 crc kubenswrapper[3552]: I0320 15:28:39.404950 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdhkj"] Mar 20 15:28:39 crc kubenswrapper[3552]: W0320 15:28:39.418448 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70ff60e6_4747_4506_9cf4_913661b8e689.slice/crio-4c74fcfd2ca39f1ca7488fddedb1b01ed43207d2b33a2d48b8df193df99c348e WatchSource:0}: Error finding container 4c74fcfd2ca39f1ca7488fddedb1b01ed43207d2b33a2d48b8df193df99c348e: Status 404 returned error can't find the container with id 4c74fcfd2ca39f1ca7488fddedb1b01ed43207d2b33a2d48b8df193df99c348e Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.199442 3552 generic.go:334] "Generic (PLEG): container finished" podID="70ff60e6-4747-4506-9cf4-913661b8e689" containerID="356e7e7336fc67223d328484fd1190425f84f778c1b27093e95970f570984fdf" exitCode=0 Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.199503 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdhkj" event={"ID":"70ff60e6-4747-4506-9cf4-913661b8e689","Type":"ContainerDied","Data":"356e7e7336fc67223d328484fd1190425f84f778c1b27093e95970f570984fdf"} Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.199530 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdhkj" event={"ID":"70ff60e6-4747-4506-9cf4-913661b8e689","Type":"ContainerStarted","Data":"4c74fcfd2ca39f1ca7488fddedb1b01ed43207d2b33a2d48b8df193df99c348e"} Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.715020 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.715630 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.715022 3552 patch_prober.go:28] interesting pod/downloads-65476884b9-9wcvx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" start-of-body= Mar 20 15:28:40 crc kubenswrapper[3552]: I0320 15:28:40.715797 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-65476884b9-9wcvx" podUID="6268b7fe-8910-4505-b404-6f1df638105c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.66:8080/\": dial tcp 10.217.0.66:8080: connect: connection refused" Mar 20 15:28:42 crc kubenswrapper[3552]: I0320 15:28:42.213585 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdhkj" event={"ID":"70ff60e6-4747-4506-9cf4-913661b8e689","Type":"ContainerStarted","Data":"516a8d55f2d53c0a79ed939409e4edc86809533c0bc34af0d2534bf3ac798ea2"} Mar 20 15:28:42 crc kubenswrapper[3552]: I0320 15:28:42.778687 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:28:42 crc kubenswrapper[3552]: I0320 15:28:42.778792 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:28:42 crc kubenswrapper[3552]: I0320 15:28:42.778846 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:28:42 crc kubenswrapper[3552]: I0320 15:28:42.779878 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6ca39e978d01eda74942a426f0604e4d4f5e9ca91b0d2821a78787798a318e64"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:28:42 crc kubenswrapper[3552]: I0320 15:28:42.780271 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://6ca39e978d01eda74942a426f0604e4d4f5e9ca91b0d2821a78787798a318e64" gracePeriod=600 Mar 20 15:28:44 crc kubenswrapper[3552]: I0320 15:28:44.232431 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="6ca39e978d01eda74942a426f0604e4d4f5e9ca91b0d2821a78787798a318e64" exitCode=0 Mar 20 15:28:44 crc kubenswrapper[3552]: I0320 15:28:44.232531 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"6ca39e978d01eda74942a426f0604e4d4f5e9ca91b0d2821a78787798a318e64"} Mar 20 15:28:44 crc kubenswrapper[3552]: I0320 15:28:44.325802 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3f4dca86-e6ee-4ec9-8324-86aff960225e" (UID: "3f4dca86-e6ee-4ec9-8324-86aff960225e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:44 crc kubenswrapper[3552]: I0320 15:28:44.357177 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f4dca86-e6ee-4ec9-8324-86aff960225e-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:44 crc kubenswrapper[3552]: I0320 15:28:44.569637 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8jhz6"] Mar 20 15:28:44 crc kubenswrapper[3552]: I0320 15:28:44.580203 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8jhz6"] Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.051644 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dm8n5"] Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.052799 3552 topology_manager.go:215] "Topology Admit Handler" podUID="1d0b6cae-c446-4d8c-a8ae-41523c726770" podNamespace="openshift-marketplace" podName="community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.053930 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.083002 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dm8n5"] Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.170002 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d0b6cae-c446-4d8c-a8ae-41523c726770-utilities\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.170061 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzkx6\" (UniqueName: \"kubernetes.io/projected/1d0b6cae-c446-4d8c-a8ae-41523c726770-kube-api-access-nzkx6\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.170163 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d0b6cae-c446-4d8c-a8ae-41523c726770-catalog-content\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.239512 3552 generic.go:334] "Generic (PLEG): container finished" podID="70ff60e6-4747-4506-9cf4-913661b8e689" containerID="516a8d55f2d53c0a79ed939409e4edc86809533c0bc34af0d2534bf3ac798ea2" exitCode=0 Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.239567 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdhkj" event={"ID":"70ff60e6-4747-4506-9cf4-913661b8e689","Type":"ContainerDied","Data":"516a8d55f2d53c0a79ed939409e4edc86809533c0bc34af0d2534bf3ac798ea2"} Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.241694 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"b0e20f7a5795a7032ac82d7a882b848725b321b9b7d2af58b2d9df46f95bd747"} Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.271159 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d0b6cae-c446-4d8c-a8ae-41523c726770-utilities\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.271231 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nzkx6\" (UniqueName: \"kubernetes.io/projected/1d0b6cae-c446-4d8c-a8ae-41523c726770-kube-api-access-nzkx6\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.271330 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d0b6cae-c446-4d8c-a8ae-41523c726770-catalog-content\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.272073 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d0b6cae-c446-4d8c-a8ae-41523c726770-utilities\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.272085 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d0b6cae-c446-4d8c-a8ae-41523c726770-catalog-content\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.299513 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzkx6\" (UniqueName: \"kubernetes.io/projected/1d0b6cae-c446-4d8c-a8ae-41523c726770-kube-api-access-nzkx6\") pod \"community-operators-dm8n5\" (UID: \"1d0b6cae-c446-4d8c-a8ae-41523c726770\") " pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.367901 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.441506 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f4dca86-e6ee-4ec9-8324-86aff960225e" path="/var/lib/kubelet/pods/3f4dca86-e6ee-4ec9-8324-86aff960225e/volumes" Mar 20 15:28:45 crc kubenswrapper[3552]: I0320 15:28:45.838940 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dm8n5"] Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.251175 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dm8n5" event={"ID":"1d0b6cae-c446-4d8c-a8ae-41523c726770","Type":"ContainerStarted","Data":"ea7123d5600df8438de65dbe3b0e9f6b30fa278b8b5c63b07f0c618e283a4003"} Mar 20 15:28:46 crc kubenswrapper[3552]: E0320 15:28:46.528046 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="unmounted volumes=[registry-storage], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.647542 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4092a9f8-5acc-4932-9e90-ef962eeb301a" (UID: "4092a9f8-5acc-4932-9e90-ef962eeb301a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.662942 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4092a9f8-5acc-4932-9e90-ef962eeb301a-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.710724 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df2e1272-092c-448f-80f0-bcdf036b7090" (UID: "df2e1272-092c-448f-80f0-bcdf036b7090"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.749121 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2be257e1-1750-4219-a32a-ad9c63b419d5" (UID: "2be257e1-1750-4219-a32a-ad9c63b419d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.764824 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df2e1272-092c-448f-80f0-bcdf036b7090-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.764885 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2be257e1-1750-4219-a32a-ad9c63b419d5-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.885882 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b76f3002-ca0c-473a-a5ec-e66774cab758" (UID: "b76f3002-ca0c-473a-a5ec-e66774cab758"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.887959 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00c35ca6-08f9-4b39-8c2d-8aa044fd5935" (UID: "00c35ca6-08f9-4b39-8c2d-8aa044fd5935"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.917063 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m447z"] Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.917880 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "887d596e-c519-4bfa-af90-3edd9e1b2f0f" (UID: "887d596e-c519-4bfa-af90-3edd9e1b2f0f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.920570 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-m447z"] Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.975058 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b76f3002-ca0c-473a-a5ec-e66774cab758-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.975117 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c35ca6-08f9-4b39-8c2d-8aa044fd5935-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.975136 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887d596e-c519-4bfa-af90-3edd9e1b2f0f-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.979326 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wdqxz"] Mar 20 15:28:46 crc kubenswrapper[3552]: I0320 15:28:46.992622 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wdqxz"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.013293 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f4jkp"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.027255 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-f4jkp"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.038962 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gwfln"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.045437 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gwfln"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.050103 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpfdl"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.053609 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cpfdl"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.087458 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7287f"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.095061 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7287f"] Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.258933 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdhkj" event={"ID":"70ff60e6-4747-4506-9cf4-913661b8e689","Type":"ContainerStarted","Data":"4cedfe32699b88904faa31b7b240948b37c32e394c95f720238fa7d6ecc7b54d"} Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.262039 3552 generic.go:334] "Generic (PLEG): container finished" podID="1d0b6cae-c446-4d8c-a8ae-41523c726770" containerID="4a43bacdd7355339680bf72785c18eb56ffdb8105e527eb9df3547479e347aef" exitCode=0 Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.262076 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dm8n5" event={"ID":"1d0b6cae-c446-4d8c-a8ae-41523c726770","Type":"ContainerDied","Data":"4a43bacdd7355339680bf72785c18eb56ffdb8105e527eb9df3547479e347aef"} Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.284738 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bdhkj" podStartSLOduration=4.946389974 podStartE2EDuration="9.284685979s" podCreationTimestamp="2026-03-20 15:28:38 +0000 UTC" firstStartedPulling="2026-03-20 15:28:41.205436107 +0000 UTC m=+220.899132947" lastFinishedPulling="2026-03-20 15:28:45.543732112 +0000 UTC m=+225.237428952" observedRunningTime="2026-03-20 15:28:47.278472666 +0000 UTC m=+226.972169536" watchObservedRunningTime="2026-03-20 15:28:47.284685979 +0000 UTC m=+226.978382819" Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.442242 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00c35ca6-08f9-4b39-8c2d-8aa044fd5935" path="/var/lib/kubelet/pods/00c35ca6-08f9-4b39-8c2d-8aa044fd5935/volumes" Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.443116 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2be257e1-1750-4219-a32a-ad9c63b419d5" path="/var/lib/kubelet/pods/2be257e1-1750-4219-a32a-ad9c63b419d5/volumes" Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.443885 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4092a9f8-5acc-4932-9e90-ef962eeb301a" path="/var/lib/kubelet/pods/4092a9f8-5acc-4932-9e90-ef962eeb301a/volumes" Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.445628 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="887d596e-c519-4bfa-af90-3edd9e1b2f0f" path="/var/lib/kubelet/pods/887d596e-c519-4bfa-af90-3edd9e1b2f0f/volumes" Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.446741 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b76f3002-ca0c-473a-a5ec-e66774cab758" path="/var/lib/kubelet/pods/b76f3002-ca0c-473a-a5ec-e66774cab758/volumes" Mar 20 15:28:47 crc kubenswrapper[3552]: I0320 15:28:47.448063 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df2e1272-092c-448f-80f0-bcdf036b7090" path="/var/lib/kubelet/pods/df2e1272-092c-448f-80f0-bcdf036b7090/volumes" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.158961 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.159339 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.273691 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dm8n5" event={"ID":"1d0b6cae-c446-4d8c-a8ae-41523c726770","Type":"ContainerStarted","Data":"3bc6af370a381837ec3c02dc34b0c325e13aba09c67e8c2f4e205a14e8c53772"} Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.301417 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.454100 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pwxgv"] Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.454171 3552 topology_manager.go:215] "Topology Admit Handler" podUID="222b106d-1c3f-47e0-bf17-27ff566f7b95" podNamespace="openshift-marketplace" podName="redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.455261 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pwxgv"] Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.455349 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.460074 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-dwn4s" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.521723 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/222b106d-1c3f-47e0-bf17-27ff566f7b95-catalog-content\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.521821 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9ktv\" (UniqueName: \"kubernetes.io/projected/222b106d-1c3f-47e0-bf17-27ff566f7b95-kube-api-access-z9ktv\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.522088 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/222b106d-1c3f-47e0-bf17-27ff566f7b95-utilities\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.623662 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/222b106d-1c3f-47e0-bf17-27ff566f7b95-utilities\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.623854 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/222b106d-1c3f-47e0-bf17-27ff566f7b95-catalog-content\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.623918 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-z9ktv\" (UniqueName: \"kubernetes.io/projected/222b106d-1c3f-47e0-bf17-27ff566f7b95-kube-api-access-z9ktv\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.625233 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/222b106d-1c3f-47e0-bf17-27ff566f7b95-utilities\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.625924 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/222b106d-1c3f-47e0-bf17-27ff566f7b95-catalog-content\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.645147 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dcwv8"] Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.645564 3552 topology_manager.go:215] "Topology Admit Handler" podUID="95f52fb1-4064-4f8b-8031-fe8351bebc06" podNamespace="openshift-marketplace" podName="certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.646343 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9ktv\" (UniqueName: \"kubernetes.io/projected/222b106d-1c3f-47e0-bf17-27ff566f7b95-kube-api-access-z9ktv\") pod \"redhat-operators-pwxgv\" (UID: \"222b106d-1c3f-47e0-bf17-27ff566f7b95\") " pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.646889 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.651865 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-twmwc" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.653542 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dcwv8"] Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.727140 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfzq6\" (UniqueName: \"kubernetes.io/projected/95f52fb1-4064-4f8b-8031-fe8351bebc06-kube-api-access-mfzq6\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.727229 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95f52fb1-4064-4f8b-8031-fe8351bebc06-utilities\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.727477 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95f52fb1-4064-4f8b-8031-fe8351bebc06-catalog-content\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.775374 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.830956 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mfzq6\" (UniqueName: \"kubernetes.io/projected/95f52fb1-4064-4f8b-8031-fe8351bebc06-kube-api-access-mfzq6\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.831062 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95f52fb1-4064-4f8b-8031-fe8351bebc06-utilities\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.831172 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95f52fb1-4064-4f8b-8031-fe8351bebc06-catalog-content\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.832447 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95f52fb1-4064-4f8b-8031-fe8351bebc06-utilities\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.832540 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95f52fb1-4064-4f8b-8031-fe8351bebc06-catalog-content\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.853612 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfzq6\" (UniqueName: \"kubernetes.io/projected/95f52fb1-4064-4f8b-8031-fe8351bebc06-kube-api-access-mfzq6\") pod \"certified-operators-dcwv8\" (UID: \"95f52fb1-4064-4f8b-8031-fe8351bebc06\") " pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:49 crc kubenswrapper[3552]: I0320 15:28:49.968005 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:50 crc kubenswrapper[3552]: I0320 15:28:50.205130 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pwxgv"] Mar 20 15:28:50 crc kubenswrapper[3552]: I0320 15:28:50.221112 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dcwv8"] Mar 20 15:28:50 crc kubenswrapper[3552]: W0320 15:28:50.230623 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95f52fb1_4064_4f8b_8031_fe8351bebc06.slice/crio-65ac90b89e64345bcd8a67621cb13983fde4f5da18564d344f077584a41bd01c WatchSource:0}: Error finding container 65ac90b89e64345bcd8a67621cb13983fde4f5da18564d344f077584a41bd01c: Status 404 returned error can't find the container with id 65ac90b89e64345bcd8a67621cb13983fde4f5da18564d344f077584a41bd01c Mar 20 15:28:50 crc kubenswrapper[3552]: I0320 15:28:50.280096 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcwv8" event={"ID":"95f52fb1-4064-4f8b-8031-fe8351bebc06","Type":"ContainerStarted","Data":"65ac90b89e64345bcd8a67621cb13983fde4f5da18564d344f077584a41bd01c"} Mar 20 15:28:50 crc kubenswrapper[3552]: I0320 15:28:50.281697 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pwxgv" event={"ID":"222b106d-1c3f-47e0-bf17-27ff566f7b95","Type":"ContainerStarted","Data":"e2d5391bc969106a2b89ac2c63b752cf7639e33d69ab57714e65c0accb60af9d"} Mar 20 15:28:50 crc kubenswrapper[3552]: I0320 15:28:50.739158 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-65476884b9-9wcvx" Mar 20 15:28:51 crc kubenswrapper[3552]: I0320 15:28:51.289699 3552 generic.go:334] "Generic (PLEG): container finished" podID="95f52fb1-4064-4f8b-8031-fe8351bebc06" containerID="0e43cd077fd03ceea0e8b856dcd533321db4c1f28464e1d6306bea892270151f" exitCode=0 Mar 20 15:28:51 crc kubenswrapper[3552]: I0320 15:28:51.289859 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcwv8" event={"ID":"95f52fb1-4064-4f8b-8031-fe8351bebc06","Type":"ContainerDied","Data":"0e43cd077fd03ceea0e8b856dcd533321db4c1f28464e1d6306bea892270151f"} Mar 20 15:28:51 crc kubenswrapper[3552]: I0320 15:28:51.292193 3552 generic.go:334] "Generic (PLEG): container finished" podID="222b106d-1c3f-47e0-bf17-27ff566f7b95" containerID="817dcfaba6fd9128d74c5c32c2c57c7fd5717ce40f36aac7c40809c6c7bfd629" exitCode=0 Mar 20 15:28:51 crc kubenswrapper[3552]: I0320 15:28:51.292228 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pwxgv" event={"ID":"222b106d-1c3f-47e0-bf17-27ff566f7b95","Type":"ContainerDied","Data":"817dcfaba6fd9128d74c5c32c2c57c7fd5717ce40f36aac7c40809c6c7bfd629"} Mar 20 15:28:52 crc kubenswrapper[3552]: I0320 15:28:52.302241 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcwv8" event={"ID":"95f52fb1-4064-4f8b-8031-fe8351bebc06","Type":"ContainerStarted","Data":"afb60dbce41a5ffa5c059acccbf3fceaf33e37aaba49060d268f25065e8e9552"} Mar 20 15:28:52 crc kubenswrapper[3552]: I0320 15:28:52.305885 3552 generic.go:334] "Generic (PLEG): container finished" podID="1d0b6cae-c446-4d8c-a8ae-41523c726770" containerID="3bc6af370a381837ec3c02dc34b0c325e13aba09c67e8c2f4e205a14e8c53772" exitCode=0 Mar 20 15:28:52 crc kubenswrapper[3552]: I0320 15:28:52.305947 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dm8n5" event={"ID":"1d0b6cae-c446-4d8c-a8ae-41523c726770","Type":"ContainerDied","Data":"3bc6af370a381837ec3c02dc34b0c325e13aba09c67e8c2f4e205a14e8c53772"} Mar 20 15:28:53 crc kubenswrapper[3552]: I0320 15:28:53.313018 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pwxgv" event={"ID":"222b106d-1c3f-47e0-bf17-27ff566f7b95","Type":"ContainerStarted","Data":"eb6bea5be0332a84b9740155210963ee7694e73b9b729a0fae78e89967d57b29"} Mar 20 15:28:54 crc kubenswrapper[3552]: I0320 15:28:54.322692 3552 generic.go:334] "Generic (PLEG): container finished" podID="95f52fb1-4064-4f8b-8031-fe8351bebc06" containerID="afb60dbce41a5ffa5c059acccbf3fceaf33e37aaba49060d268f25065e8e9552" exitCode=0 Mar 20 15:28:54 crc kubenswrapper[3552]: I0320 15:28:54.322843 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcwv8" event={"ID":"95f52fb1-4064-4f8b-8031-fe8351bebc06","Type":"ContainerDied","Data":"afb60dbce41a5ffa5c059acccbf3fceaf33e37aaba49060d268f25065e8e9552"} Mar 20 15:28:54 crc kubenswrapper[3552]: I0320 15:28:54.330675 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dm8n5" event={"ID":"1d0b6cae-c446-4d8c-a8ae-41523c726770","Type":"ContainerStarted","Data":"b600df55a16bf0d30079093856e348254ee9b9cbec8b4d292049440e88237216"} Mar 20 15:28:55 crc kubenswrapper[3552]: I0320 15:28:55.336275 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcwv8" event={"ID":"95f52fb1-4064-4f8b-8031-fe8351bebc06","Type":"ContainerStarted","Data":"3a93e95d0b76c7a171df33c8031776358f1448938c6ddd5ae91bfd65eb2e0058"} Mar 20 15:28:55 crc kubenswrapper[3552]: I0320 15:28:55.359842 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dm8n5" podStartSLOduration=6.010492575 podStartE2EDuration="10.359780902s" podCreationTimestamp="2026-03-20 15:28:45 +0000 UTC" firstStartedPulling="2026-03-20 15:28:48.268454844 +0000 UTC m=+227.962151704" lastFinishedPulling="2026-03-20 15:28:52.617743171 +0000 UTC m=+232.311440031" observedRunningTime="2026-03-20 15:28:55.355884211 +0000 UTC m=+235.049581041" watchObservedRunningTime="2026-03-20 15:28:55.359780902 +0000 UTC m=+235.053477732" Mar 20 15:28:55 crc kubenswrapper[3552]: I0320 15:28:55.368317 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:55 crc kubenswrapper[3552]: I0320 15:28:55.368372 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:28:56 crc kubenswrapper[3552]: I0320 15:28:56.361188 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dcwv8" podStartSLOduration=3.994442618 podStartE2EDuration="7.361122145s" podCreationTimestamp="2026-03-20 15:28:49 +0000 UTC" firstStartedPulling="2026-03-20 15:28:51.294397524 +0000 UTC m=+230.988094394" lastFinishedPulling="2026-03-20 15:28:54.661077081 +0000 UTC m=+234.354773921" observedRunningTime="2026-03-20 15:28:56.360813875 +0000 UTC m=+236.054510715" watchObservedRunningTime="2026-03-20 15:28:56.361122145 +0000 UTC m=+236.054818965" Mar 20 15:28:56 crc kubenswrapper[3552]: I0320 15:28:56.476792 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-dm8n5" podUID="1d0b6cae-c446-4d8c-a8ae-41523c726770" containerName="registry-server" probeResult="failure" output=< Mar 20 15:28:56 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:28:56 crc kubenswrapper[3552]: > Mar 20 15:28:59 crc kubenswrapper[3552]: I0320 15:28:59.286820 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bdhkj" Mar 20 15:28:59 crc kubenswrapper[3552]: I0320 15:28:59.968245 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:28:59 crc kubenswrapper[3552]: I0320 15:28:59.968849 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:29:00 crc kubenswrapper[3552]: I0320 15:29:00.106386 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:29:00 crc kubenswrapper[3552]: I0320 15:29:00.464362 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dcwv8" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.268911 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.268989 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.269015 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.269035 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.269048 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.430490 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.526262 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7\": container with ID starting with 25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7 not found: ID does not exist" containerID="25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.527531 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7" err="rpc error: code = NotFound desc = could not find container \"25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7\": container with ID starting with 25735c6e47161b86d7b2f78fbf3d3260e4aea2b2ced6944958b5ae5c856780f7 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.528096 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93\": container with ID starting with c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93 not found: ID does not exist" containerID="c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.528122 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93" err="rpc error: code = NotFound desc = could not find container \"c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93\": container with ID starting with c09aecd4ac0907db3690105bde94f4f2d7f2afdb984b525adfcd1e3adb135e93 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.528584 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d\": container with ID starting with 20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d not found: ID does not exist" containerID="20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.528618 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d" err="rpc error: code = NotFound desc = could not find container \"20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d\": container with ID starting with 20ae5abf6a16e184e1a782e145004596ca5fe22ef8a62870b61a95e4ded2747d not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.529960 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc\": container with ID starting with 012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc not found: ID does not exist" containerID="012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.529995 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc" err="rpc error: code = NotFound desc = could not find container \"012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc\": container with ID starting with 012545d653736c51f34a06b810ad03de9dc45c3c010b2253613fba8e718b5bcc not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.530425 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4\": container with ID starting with 1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4 not found: ID does not exist" containerID="1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.530465 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4" err="rpc error: code = NotFound desc = could not find container \"1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4\": container with ID starting with 1350206b1e30b3971ec7ac87abd91e619adf5d773c6d44c2e877b2389a2d2df4 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.530806 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306\": container with ID starting with 04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306 not found: ID does not exist" containerID="04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.530832 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306" err="rpc error: code = NotFound desc = could not find container \"04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306\": container with ID starting with 04f39dd6c26123e98822fcebd4217e741bcca557be4b20b4979b129cb08b5306 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.532981 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f\": container with ID starting with 526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f not found: ID does not exist" containerID="526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.533064 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f" err="rpc error: code = NotFound desc = could not find container \"526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f\": container with ID starting with 526bc0068d7120eca1c49e6c9cd46b0806e029cdfe69bc77d80e63ed2824621f not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.533728 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904\": container with ID starting with a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904 not found: ID does not exist" containerID="a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.533766 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904" err="rpc error: code = NotFound desc = could not find container \"a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904\": container with ID starting with a27c87b9d55165e64d2385933341458119a430f62bc5cf9d47ba6ab21105c904 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.535519 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f\": container with ID starting with 29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f not found: ID does not exist" containerID="29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.535553 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f" err="rpc error: code = NotFound desc = could not find container \"29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f\": container with ID starting with 29dfaac929b171d141ae69ea87c918ed3428a69fda4d2437de98861acc61690f not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.535954 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755\": container with ID starting with 1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755 not found: ID does not exist" containerID="1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.536016 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755" err="rpc error: code = NotFound desc = could not find container \"1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755\": container with ID starting with 1b91a900f525490bb12fdd8c1748a9d6b4a5be32d59cf6d5e0dee827d802b755 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.536437 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8\": container with ID starting with 1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8 not found: ID does not exist" containerID="1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.536466 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8" err="rpc error: code = NotFound desc = could not find container \"1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8\": container with ID starting with 1bf0b76ea20ce684ae71de3b2a9e97f4a75368ae01786073dc5a7baecf9f5bb8 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.536923 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810\": container with ID starting with e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810 not found: ID does not exist" containerID="e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.536987 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810" err="rpc error: code = NotFound desc = could not find container \"e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810\": container with ID starting with e59973dbc153e0b5bf585569894470db8d3a36e38c6bddaece34b0e6286f1810 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.537532 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29\": container with ID starting with 5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29 not found: ID does not exist" containerID="5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.537591 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29" err="rpc error: code = NotFound desc = could not find container \"5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29\": container with ID starting with 5bb72871ca468f5b2c44bf30403293b46fe4669408d8b3f88701e5ed96696e29 not found: ID does not exist" Mar 20 15:29:01 crc kubenswrapper[3552]: E0320 15:29:01.538009 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61\": container with ID starting with e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61 not found: ID does not exist" containerID="e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61" Mar 20 15:29:01 crc kubenswrapper[3552]: I0320 15:29:01.538038 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61" err="rpc error: code = NotFound desc = could not find container \"e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61\": container with ID starting with e812332dccab02b78fcc94f2e8449c1be956f6102239b89d17a287bfa877fe61 not found: ID does not exist" Mar 20 15:29:05 crc kubenswrapper[3552]: I0320 15:29:05.489807 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:29:05 crc kubenswrapper[3552]: I0320 15:29:05.607071 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dm8n5" Mar 20 15:29:25 crc kubenswrapper[3552]: I0320 15:29:25.494458 3552 generic.go:334] "Generic (PLEG): container finished" podID="222b106d-1c3f-47e0-bf17-27ff566f7b95" containerID="eb6bea5be0332a84b9740155210963ee7694e73b9b729a0fae78e89967d57b29" exitCode=0 Mar 20 15:29:25 crc kubenswrapper[3552]: I0320 15:29:25.494585 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pwxgv" event={"ID":"222b106d-1c3f-47e0-bf17-27ff566f7b95","Type":"ContainerDied","Data":"eb6bea5be0332a84b9740155210963ee7694e73b9b729a0fae78e89967d57b29"} Mar 20 15:29:28 crc kubenswrapper[3552]: I0320 15:29:28.514819 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pwxgv" event={"ID":"222b106d-1c3f-47e0-bf17-27ff566f7b95","Type":"ContainerStarted","Data":"9626d5e54faf47c310e9372de44a6029973197aee3ddfc762739ab67cb712862"} Mar 20 15:29:28 crc kubenswrapper[3552]: I0320 15:29:28.547015 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pwxgv" podStartSLOduration=4.987911425 podStartE2EDuration="39.546870303s" podCreationTimestamp="2026-03-20 15:28:49 +0000 UTC" firstStartedPulling="2026-03-20 15:28:51.293691222 +0000 UTC m=+230.987388052" lastFinishedPulling="2026-03-20 15:29:25.85265006 +0000 UTC m=+265.546346930" observedRunningTime="2026-03-20 15:29:28.542787433 +0000 UTC m=+268.236484333" watchObservedRunningTime="2026-03-20 15:29:28.546870303 +0000 UTC m=+268.240567173" Mar 20 15:29:29 crc kubenswrapper[3552]: I0320 15:29:29.775640 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:29:29 crc kubenswrapper[3552]: I0320 15:29:29.775688 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:29:30 crc kubenswrapper[3552]: I0320 15:29:30.896153 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pwxgv" podUID="222b106d-1c3f-47e0-bf17-27ff566f7b95" containerName="registry-server" probeResult="failure" output=< Mar 20 15:29:30 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:29:30 crc kubenswrapper[3552]: > Mar 20 15:29:32 crc kubenswrapper[3552]: I0320 15:29:32.308016 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:29:32 crc kubenswrapper[3552]: I0320 15:29:32.313141 3552 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Mar 20 15:29:32 crc kubenswrapper[3552]: I0320 15:29:32.313271 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ea5f9a7192af1960ec8c50a86fd2d9a756dbf85695798868f611e04a03ec009/globalmount\"" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:29:32 crc kubenswrapper[3552]: I0320 15:29:32.344136 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-6fbd648f87-j4bk5\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:29:32 crc kubenswrapper[3552]: I0320 15:29:32.635930 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-q786x" Mar 20 15:29:32 crc kubenswrapper[3552]: I0320 15:29:32.644574 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:29:33 crc kubenswrapper[3552]: I0320 15:29:33.541278 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" event={"ID":"5cad292d-912c-4787-a5fa-0ade98e731eb","Type":"ContainerStarted","Data":"3dd7e3cc6f6f95cf014df3ea8927aa60e26225717ee25c25df92358324240066"} Mar 20 15:29:33 crc kubenswrapper[3552]: I0320 15:29:33.541890 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" event={"ID":"5cad292d-912c-4787-a5fa-0ade98e731eb","Type":"ContainerStarted","Data":"04d81141f50ec91b428f3968a6eee97a15d5e072023396fd4710ee4823a71e3b"} Mar 20 15:29:34 crc kubenswrapper[3552]: I0320 15:29:34.547288 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:29:37 crc kubenswrapper[3552]: I0320 15:29:37.569246 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/3.log" Mar 20 15:29:37 crc kubenswrapper[3552]: I0320 15:29:37.569626 3552 generic.go:334] "Generic (PLEG): container finished" podID="7d51f445-054a-4e4f-a67b-a828f5a32511" containerID="fb415465a9a1ff924c4c807f00ce88dd80be6b5796e10869b670a7a0d29ed1dd" exitCode=1 Mar 20 15:29:37 crc kubenswrapper[3552]: I0320 15:29:37.569676 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerDied","Data":"fb415465a9a1ff924c4c807f00ce88dd80be6b5796e10869b670a7a0d29ed1dd"} Mar 20 15:29:37 crc kubenswrapper[3552]: I0320 15:29:37.570256 3552 scope.go:117] "RemoveContainer" containerID="fb415465a9a1ff924c4c807f00ce88dd80be6b5796e10869b670a7a0d29ed1dd" Mar 20 15:29:38 crc kubenswrapper[3552]: I0320 15:29:38.576231 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ingress-operator_ingress-operator-7d46d5bb6d-rrg6t_7d51f445-054a-4e4f-a67b-a828f5a32511/ingress-operator/3.log" Mar 20 15:29:38 crc kubenswrapper[3552]: I0320 15:29:38.576286 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-7d46d5bb6d-rrg6t" event={"ID":"7d51f445-054a-4e4f-a67b-a828f5a32511","Type":"ContainerStarted","Data":"5a42a72c07599f8c9eb026d0d86faa99bed3d02a756ec1c5ba3f7c19ed7e91d6"} Mar 20 15:29:39 crc kubenswrapper[3552]: I0320 15:29:39.876620 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:29:40 crc kubenswrapper[3552]: I0320 15:29:40.009221 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pwxgv" Mar 20 15:29:52 crc kubenswrapper[3552]: I0320 15:29:52.653086 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.161737 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55"] Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.162548 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6ecabf18-01fa-4357-8042-dc7fcb56e5ee" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.163131 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.167091 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.167314 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.180547 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55"] Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.275551 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-config-volume\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.275819 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-secret-volume\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.275905 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ckmv\" (UniqueName: \"kubernetes.io/projected/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-kube-api-access-2ckmv\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.377312 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-secret-volume\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.377372 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2ckmv\" (UniqueName: \"kubernetes.io/projected/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-kube-api-access-2ckmv\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.377432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-config-volume\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.378172 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-config-volume\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.401932 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-secret-volume\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.408130 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ckmv\" (UniqueName: \"kubernetes.io/projected/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-kube-api-access-2ckmv\") pod \"collect-profiles-29567010-zqp55\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.489850 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:00 crc kubenswrapper[3552]: I0320 15:30:00.714321 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55"] Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.269433 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.269874 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.269900 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.269920 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.269954 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.695484 3552 generic.go:334] "Generic (PLEG): container finished" podID="6ecabf18-01fa-4357-8042-dc7fcb56e5ee" containerID="10a41eed047daf8ffb05cea26fd3200eecb45c1c5fc574acdb0ba75711e07896" exitCode=0 Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.695759 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" event={"ID":"6ecabf18-01fa-4357-8042-dc7fcb56e5ee","Type":"ContainerDied","Data":"10a41eed047daf8ffb05cea26fd3200eecb45c1c5fc574acdb0ba75711e07896"} Mar 20 15:30:01 crc kubenswrapper[3552]: I0320 15:30:01.696544 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" event={"ID":"6ecabf18-01fa-4357-8042-dc7fcb56e5ee","Type":"ContainerStarted","Data":"87adfae85b7dbe7b13e6083eb1a1cd99ba7ca30dc98ef8c4512f8a475c19d6c2"} Mar 20 15:30:02 crc kubenswrapper[3552]: I0320 15:30:02.885137 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.008440 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-secret-volume\") pod \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.008592 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ckmv\" (UniqueName: \"kubernetes.io/projected/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-kube-api-access-2ckmv\") pod \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.008642 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-config-volume\") pod \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\" (UID: \"6ecabf18-01fa-4357-8042-dc7fcb56e5ee\") " Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.009980 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-config-volume" (OuterVolumeSpecName: "config-volume") pod "6ecabf18-01fa-4357-8042-dc7fcb56e5ee" (UID: "6ecabf18-01fa-4357-8042-dc7fcb56e5ee"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.015004 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-kube-api-access-2ckmv" (OuterVolumeSpecName: "kube-api-access-2ckmv") pod "6ecabf18-01fa-4357-8042-dc7fcb56e5ee" (UID: "6ecabf18-01fa-4357-8042-dc7fcb56e5ee"). InnerVolumeSpecName "kube-api-access-2ckmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.015512 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6ecabf18-01fa-4357-8042-dc7fcb56e5ee" (UID: "6ecabf18-01fa-4357-8042-dc7fcb56e5ee"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.110743 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.110793 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2ckmv\" (UniqueName: \"kubernetes.io/projected/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-kube-api-access-2ckmv\") on node \"crc\" DevicePath \"\"" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.110809 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6ecabf18-01fa-4357-8042-dc7fcb56e5ee-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.709555 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" event={"ID":"6ecabf18-01fa-4357-8042-dc7fcb56e5ee","Type":"ContainerDied","Data":"87adfae85b7dbe7b13e6083eb1a1cd99ba7ca30dc98ef8c4512f8a475c19d6c2"} Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.709590 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87adfae85b7dbe7b13e6083eb1a1cd99ba7ca30dc98ef8c4512f8a475c19d6c2" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.709649 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55" Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.967367 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp"] Mar 20 15:30:03 crc kubenswrapper[3552]: I0320 15:30:03.972913 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555400-kb5zp"] Mar 20 15:30:05 crc kubenswrapper[3552]: I0320 15:30:05.460286 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="079b7b69-b036-48d0-ab94-f3d1e03777f9" path="/var/lib/kubelet/pods/079b7b69-b036-48d0-ab94-f3d1e03777f9/volumes" Mar 20 15:31:01 crc kubenswrapper[3552]: I0320 15:31:01.270776 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:31:01 crc kubenswrapper[3552]: I0320 15:31:01.271467 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:31:01 crc kubenswrapper[3552]: I0320 15:31:01.271498 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:31:01 crc kubenswrapper[3552]: I0320 15:31:01.271545 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:31:01 crc kubenswrapper[3552]: I0320 15:31:01.271590 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:31:01 crc kubenswrapper[3552]: E0320 15:31:01.558085 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde\": container with ID starting with 8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde not found: ID does not exist" containerID="8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde" Mar 20 15:31:01 crc kubenswrapper[3552]: I0320 15:31:01.558121 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde" err="rpc error: code = NotFound desc = could not find container \"8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde\": container with ID starting with 8386fd16df2c336ce1bbb3d67595c6e4f09149d7e9aed88a15edab61a5d2bcde not found: ID does not exist" Mar 20 15:31:12 crc kubenswrapper[3552]: I0320 15:31:12.778729 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:31:12 crc kubenswrapper[3552]: I0320 15:31:12.779447 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.018640 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-hs5kp"] Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.018987 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" podNamespace="openshift-multus" podName="cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: E0320 15:31:15.019118 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6ecabf18-01fa-4357-8042-dc7fcb56e5ee" containerName="collect-profiles" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.019129 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ecabf18-01fa-4357-8042-dc7fcb56e5ee" containerName="collect-profiles" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.019220 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ecabf18-01fa-4357-8042-dc7fcb56e5ee" containerName="collect-profiles" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.019561 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.021928 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-sysctl-allowlist" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.037252 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-smth4" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.181463 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/2ea91057-268f-4992-b13a-5915cea53ae3-ready\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.181582 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2ea91057-268f-4992-b13a-5915cea53ae3-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.181719 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tllm\" (UniqueName: \"kubernetes.io/projected/2ea91057-268f-4992-b13a-5915cea53ae3-kube-api-access-6tllm\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.181765 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2ea91057-268f-4992-b13a-5915cea53ae3-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283167 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/2ea91057-268f-4992-b13a-5915cea53ae3-ready\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283264 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2ea91057-268f-4992-b13a-5915cea53ae3-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283319 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6tllm\" (UniqueName: \"kubernetes.io/projected/2ea91057-268f-4992-b13a-5915cea53ae3-kube-api-access-6tllm\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283352 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2ea91057-268f-4992-b13a-5915cea53ae3-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283569 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/2ea91057-268f-4992-b13a-5915cea53ae3-ready\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283575 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2ea91057-268f-4992-b13a-5915cea53ae3-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.283983 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2ea91057-268f-4992-b13a-5915cea53ae3-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.308019 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tllm\" (UniqueName: \"kubernetes.io/projected/2ea91057-268f-4992-b13a-5915cea53ae3-kube-api-access-6tllm\") pod \"cni-sysctl-allowlist-ds-hs5kp\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:15 crc kubenswrapper[3552]: I0320 15:31:15.338222 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:16 crc kubenswrapper[3552]: I0320 15:31:16.106293 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" event={"ID":"2ea91057-268f-4992-b13a-5915cea53ae3","Type":"ContainerStarted","Data":"351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132"} Mar 20 15:31:16 crc kubenswrapper[3552]: I0320 15:31:16.106697 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" event={"ID":"2ea91057-268f-4992-b13a-5915cea53ae3","Type":"ContainerStarted","Data":"b7b6adf33d0f786147f9db092cc354fa7578b0694aff629defdf235a8487afd5"} Mar 20 15:31:16 crc kubenswrapper[3552]: I0320 15:31:16.107057 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:16 crc kubenswrapper[3552]: I0320 15:31:16.129844 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" podStartSLOduration=2.129790439 podStartE2EDuration="2.129790439s" podCreationTimestamp="2026-03-20 15:31:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:31:16.125225309 +0000 UTC m=+375.818922139" watchObservedRunningTime="2026-03-20 15:31:16.129790439 +0000 UTC m=+375.823487269" Mar 20 15:31:16 crc kubenswrapper[3552]: I0320 15:31:16.161058 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:17 crc kubenswrapper[3552]: I0320 15:31:17.025635 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-hs5kp"] Mar 20 15:31:18 crc kubenswrapper[3552]: I0320 15:31:18.115389 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" gracePeriod=30 Mar 20 15:31:25 crc kubenswrapper[3552]: E0320 15:31:25.342670 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:25 crc kubenswrapper[3552]: E0320 15:31:25.345912 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:25 crc kubenswrapper[3552]: E0320 15:31:25.348707 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:25 crc kubenswrapper[3552]: E0320 15:31:25.348814 3552 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" Mar 20 15:31:35 crc kubenswrapper[3552]: E0320 15:31:35.343097 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:35 crc kubenswrapper[3552]: E0320 15:31:35.345825 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:35 crc kubenswrapper[3552]: E0320 15:31:35.348482 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:35 crc kubenswrapper[3552]: E0320 15:31:35.348547 3552 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.716233 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66994f7557-dmsqj"] Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.716660 3552 topology_manager.go:215] "Topology Admit Handler" podUID="db2f5d66-738c-46c8-9aaa-dc8bfb356d2e" podNamespace="openshift-image-registry" podName="image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.717244 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.740969 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66994f7557-dmsqj"] Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829557 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-bound-sa-token\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829620 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-registry-certificates\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829646 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqhnw\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-kube-api-access-rqhnw\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829695 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-registry-tls\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829730 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-installation-pull-secrets\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829767 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829808 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-trusted-ca\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.829838 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-ca-trust-extracted\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.860146 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.930815 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-registry-tls\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.930921 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-installation-pull-secrets\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.930988 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-trusted-ca\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.931035 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-ca-trust-extracted\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.931123 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-bound-sa-token\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.931162 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-registry-certificates\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.931204 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rqhnw\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-kube-api-access-rqhnw\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.933691 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-trusted-ca\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.934143 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-ca-trust-extracted\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.935077 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-registry-certificates\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.939166 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-registry-tls\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.940158 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-installation-pull-secrets\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.962485 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-bound-sa-token\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:36 crc kubenswrapper[3552]: I0320 15:31:36.965647 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqhnw\" (UniqueName: \"kubernetes.io/projected/db2f5d66-738c-46c8-9aaa-dc8bfb356d2e-kube-api-access-rqhnw\") pod \"image-registry-66994f7557-dmsqj\" (UID: \"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e\") " pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:37 crc kubenswrapper[3552]: I0320 15:31:37.041054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:37 crc kubenswrapper[3552]: I0320 15:31:37.286988 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66994f7557-dmsqj"] Mar 20 15:31:38 crc kubenswrapper[3552]: I0320 15:31:38.238241 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" event={"ID":"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e","Type":"ContainerStarted","Data":"174d8f5bf957e38950028d455eb951a9e07bfdd28963fa2794d74b5929288496"} Mar 20 15:31:38 crc kubenswrapper[3552]: I0320 15:31:38.238282 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" event={"ID":"db2f5d66-738c-46c8-9aaa-dc8bfb356d2e","Type":"ContainerStarted","Data":"0af63ca6907ac20edcb9292d9fabdbc499800bd485bc48cad9ad21e8e3814cb4"} Mar 20 15:31:38 crc kubenswrapper[3552]: I0320 15:31:38.238655 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:38 crc kubenswrapper[3552]: I0320 15:31:38.262834 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" podStartSLOduration=2.262780921 podStartE2EDuration="2.262780921s" podCreationTimestamp="2026-03-20 15:31:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:31:38.259856271 +0000 UTC m=+397.953553111" watchObservedRunningTime="2026-03-20 15:31:38.262780921 +0000 UTC m=+397.956477761" Mar 20 15:31:42 crc kubenswrapper[3552]: I0320 15:31:42.778345 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:31:42 crc kubenswrapper[3552]: I0320 15:31:42.779034 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:31:45 crc kubenswrapper[3552]: E0320 15:31:45.341221 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:45 crc kubenswrapper[3552]: E0320 15:31:45.343368 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:45 crc kubenswrapper[3552]: E0320 15:31:45.346078 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" cmd=["/bin/bash","-c","test -f /ready/ready"] Mar 20 15:31:45 crc kubenswrapper[3552]: E0320 15:31:45.346202 3552 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.324105 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-hs5kp_2ea91057-268f-4992-b13a-5915cea53ae3/kube-multus-additional-cni-plugins/0.log" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.324230 3552 generic.go:334] "Generic (PLEG): container finished" podID="2ea91057-268f-4992-b13a-5915cea53ae3" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" exitCode=137 Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.324282 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" event={"ID":"2ea91057-268f-4992-b13a-5915cea53ae3","Type":"ContainerDied","Data":"351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132"} Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.759813 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-hs5kp_2ea91057-268f-4992-b13a-5915cea53ae3/kube-multus-additional-cni-plugins/0.log" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.760133 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.917663 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2ea91057-268f-4992-b13a-5915cea53ae3-cni-sysctl-allowlist\") pod \"2ea91057-268f-4992-b13a-5915cea53ae3\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.917741 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2ea91057-268f-4992-b13a-5915cea53ae3-tuning-conf-dir\") pod \"2ea91057-268f-4992-b13a-5915cea53ae3\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.917799 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tllm\" (UniqueName: \"kubernetes.io/projected/2ea91057-268f-4992-b13a-5915cea53ae3-kube-api-access-6tllm\") pod \"2ea91057-268f-4992-b13a-5915cea53ae3\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.917867 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/2ea91057-268f-4992-b13a-5915cea53ae3-ready\") pod \"2ea91057-268f-4992-b13a-5915cea53ae3\" (UID: \"2ea91057-268f-4992-b13a-5915cea53ae3\") " Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.918331 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ea91057-268f-4992-b13a-5915cea53ae3-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "2ea91057-268f-4992-b13a-5915cea53ae3" (UID: "2ea91057-268f-4992-b13a-5915cea53ae3"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.919494 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea91057-268f-4992-b13a-5915cea53ae3-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "2ea91057-268f-4992-b13a-5915cea53ae3" (UID: "2ea91057-268f-4992-b13a-5915cea53ae3"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.919917 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ea91057-268f-4992-b13a-5915cea53ae3-ready" (OuterVolumeSpecName: "ready") pod "2ea91057-268f-4992-b13a-5915cea53ae3" (UID: "2ea91057-268f-4992-b13a-5915cea53ae3"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:31:48 crc kubenswrapper[3552]: I0320 15:31:48.927457 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ea91057-268f-4992-b13a-5915cea53ae3-kube-api-access-6tllm" (OuterVolumeSpecName: "kube-api-access-6tllm") pod "2ea91057-268f-4992-b13a-5915cea53ae3" (UID: "2ea91057-268f-4992-b13a-5915cea53ae3"). InnerVolumeSpecName "kube-api-access-6tllm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.020110 3552 reconciler_common.go:300] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2ea91057-268f-4992-b13a-5915cea53ae3-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.020176 3552 reconciler_common.go:300] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2ea91057-268f-4992-b13a-5915cea53ae3-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.020200 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-6tllm\" (UniqueName: \"kubernetes.io/projected/2ea91057-268f-4992-b13a-5915cea53ae3-kube-api-access-6tllm\") on node \"crc\" DevicePath \"\"" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.020228 3552 reconciler_common.go:300] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/2ea91057-268f-4992-b13a-5915cea53ae3-ready\") on node \"crc\" DevicePath \"\"" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.330542 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-hs5kp_2ea91057-268f-4992-b13a-5915cea53ae3/kube-multus-additional-cni-plugins/0.log" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.330634 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" event={"ID":"2ea91057-268f-4992-b13a-5915cea53ae3","Type":"ContainerDied","Data":"b7b6adf33d0f786147f9db092cc354fa7578b0694aff629defdf235a8487afd5"} Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.330673 3552 scope.go:117] "RemoveContainer" containerID="351f9397ea78d09d8f4b5888e196f4b33bc769e298b047436035de5347b44132" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.330706 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-hs5kp" Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.372494 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-hs5kp"] Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.374945 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-hs5kp"] Mar 20 15:31:49 crc kubenswrapper[3552]: I0320 15:31:49.437718 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" path="/var/lib/kubelet/pods/2ea91057-268f-4992-b13a-5915cea53ae3/volumes" Mar 20 15:31:57 crc kubenswrapper[3552]: I0320 15:31:57.050872 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66994f7557-dmsqj" Mar 20 15:31:57 crc kubenswrapper[3552]: I0320 15:31:57.151970 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-6fbd648f87-j4bk5"] Mar 20 15:32:01 crc kubenswrapper[3552]: I0320 15:32:01.272567 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:32:01 crc kubenswrapper[3552]: I0320 15:32:01.273070 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:32:01 crc kubenswrapper[3552]: I0320 15:32:01.273108 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:32:01 crc kubenswrapper[3552]: I0320 15:32:01.273197 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:32:01 crc kubenswrapper[3552]: I0320 15:32:01.273248 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:32:12 crc kubenswrapper[3552]: I0320 15:32:12.778782 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:32:12 crc kubenswrapper[3552]: I0320 15:32:12.779592 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:32:12 crc kubenswrapper[3552]: I0320 15:32:12.779652 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:32:12 crc kubenswrapper[3552]: I0320 15:32:12.780741 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b0e20f7a5795a7032ac82d7a882b848725b321b9b7d2af58b2d9df46f95bd747"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:32:12 crc kubenswrapper[3552]: I0320 15:32:12.781060 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://b0e20f7a5795a7032ac82d7a882b848725b321b9b7d2af58b2d9df46f95bd747" gracePeriod=600 Mar 20 15:32:13 crc kubenswrapper[3552]: I0320 15:32:13.481834 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="b0e20f7a5795a7032ac82d7a882b848725b321b9b7d2af58b2d9df46f95bd747" exitCode=0 Mar 20 15:32:13 crc kubenswrapper[3552]: I0320 15:32:13.481912 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"b0e20f7a5795a7032ac82d7a882b848725b321b9b7d2af58b2d9df46f95bd747"} Mar 20 15:32:13 crc kubenswrapper[3552]: I0320 15:32:13.482568 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"a6c34bfc12c2223d8144efc535af88e195af690ac65b1064e138b70ee20351af"} Mar 20 15:32:13 crc kubenswrapper[3552]: I0320 15:32:13.482609 3552 scope.go:117] "RemoveContainer" containerID="6ca39e978d01eda74942a426f0604e4d4f5e9ca91b0d2821a78787798a318e64" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.278534 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" containerName="registry" containerID="cri-o://3dd7e3cc6f6f95cf014df3ea8927aa60e26225717ee25c25df92358324240066" gracePeriod=30 Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.547014 3552 generic.go:334] "Generic (PLEG): container finished" podID="5cad292d-912c-4787-a5fa-0ade98e731eb" containerID="3dd7e3cc6f6f95cf014df3ea8927aa60e26225717ee25c25df92358324240066" exitCode=0 Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.547066 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" event={"ID":"5cad292d-912c-4787-a5fa-0ade98e731eb","Type":"ContainerDied","Data":"3dd7e3cc6f6f95cf014df3ea8927aa60e26225717ee25c25df92358324240066"} Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.700038 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812243 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812318 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812343 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812392 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812440 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812464 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812830 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.812861 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") pod \"5cad292d-912c-4787-a5fa-0ade98e731eb\" (UID: \"5cad292d-912c-4787-a5fa-0ade98e731eb\") " Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.814731 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.814781 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.816617 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.822151 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.823510 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.823725 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.827825 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97" (OuterVolumeSpecName: "registry-storage") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "pvc-f5d86efc-9248-4b55-9b8b-23cf63fe9e97". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.835143 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m" (OuterVolumeSpecName: "kube-api-access-b568m") pod "5cad292d-912c-4787-a5fa-0ade98e731eb" (UID: "5cad292d-912c-4787-a5fa-0ade98e731eb"). InnerVolumeSpecName "kube-api-access-b568m". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914514 3552 reconciler_common.go:300] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5cad292d-912c-4787-a5fa-0ade98e731eb-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914581 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-b568m\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-kube-api-access-b568m\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914594 3552 reconciler_common.go:300] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-certificates\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914606 3552 reconciler_common.go:300] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5cad292d-912c-4787-a5fa-0ade98e731eb-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914618 3552 reconciler_common.go:300] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-bound-sa-token\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914633 3552 reconciler_common.go:300] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5cad292d-912c-4787-a5fa-0ade98e731eb-trusted-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:22 crc kubenswrapper[3552]: I0320 15:32:22.914643 3552 reconciler_common.go:300] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5cad292d-912c-4787-a5fa-0ade98e731eb-registry-tls\") on node \"crc\" DevicePath \"\"" Mar 20 15:32:23 crc kubenswrapper[3552]: I0320 15:32:23.554167 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" event={"ID":"5cad292d-912c-4787-a5fa-0ade98e731eb","Type":"ContainerDied","Data":"04d81141f50ec91b428f3968a6eee97a15d5e072023396fd4710ee4823a71e3b"} Mar 20 15:32:23 crc kubenswrapper[3552]: I0320 15:32:23.554220 3552 scope.go:117] "RemoveContainer" containerID="3dd7e3cc6f6f95cf014df3ea8927aa60e26225717ee25c25df92358324240066" Mar 20 15:32:23 crc kubenswrapper[3552]: I0320 15:32:23.554257 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" Mar 20 15:32:23 crc kubenswrapper[3552]: I0320 15:32:23.614809 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-6fbd648f87-j4bk5"] Mar 20 15:32:23 crc kubenswrapper[3552]: I0320 15:32:23.623536 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-6fbd648f87-j4bk5"] Mar 20 15:32:25 crc kubenswrapper[3552]: I0320 15:32:25.443949 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" path="/var/lib/kubelet/pods/5cad292d-912c-4787-a5fa-0ade98e731eb/volumes" Mar 20 15:32:25 crc kubenswrapper[3552]: I0320 15:32:25.512640 3552 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 20 15:32:27 crc kubenswrapper[3552]: I0320 15:32:27.651300 3552 patch_prober.go:28] interesting pod/image-registry-6fbd648f87-j4bk5 container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.65:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:32:27 crc kubenswrapper[3552]: I0320 15:32:27.651951 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-6fbd648f87-j4bk5" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.65:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:32:46 crc kubenswrapper[3552]: I0320 15:32:46.374837 3552 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 20 15:32:47 crc kubenswrapper[3552]: I0320 15:32:47.310769 3552 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 20 15:33:01 crc kubenswrapper[3552]: I0320 15:33:01.274134 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:33:01 crc kubenswrapper[3552]: I0320 15:33:01.274985 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:33:01 crc kubenswrapper[3552]: I0320 15:33:01.275022 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:33:01 crc kubenswrapper[3552]: I0320 15:33:01.275072 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:33:01 crc kubenswrapper[3552]: I0320 15:33:01.275122 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:33:01 crc kubenswrapper[3552]: E0320 15:33:01.620678 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f\": container with ID starting with 11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f not found: ID does not exist" containerID="11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f" Mar 20 15:33:01 crc kubenswrapper[3552]: I0320 15:33:01.620756 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f" err="rpc error: code = NotFound desc = could not find container \"11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f\": container with ID starting with 11fa51f494704491466f5d88675e2af85f19ac7374dd4fcd6718fc27bf25cd8f not found: ID does not exist" Mar 20 15:34:01 crc kubenswrapper[3552]: I0320 15:34:01.275350 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:34:01 crc kubenswrapper[3552]: I0320 15:34:01.276010 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:34:01 crc kubenswrapper[3552]: I0320 15:34:01.276038 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:34:01 crc kubenswrapper[3552]: I0320 15:34:01.276080 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:34:01 crc kubenswrapper[3552]: I0320 15:34:01.276115 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.245073 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-676dd9bd64-gk2js"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.245574 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8527c5a5-b8f1-4156-bb87-ebca589bf2ab" podNamespace="cert-manager" podName="cert-manager-cainjector-676dd9bd64-gk2js" Mar 20 15:34:18 crc kubenswrapper[3552]: E0320 15:34:18.245706 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.245779 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" Mar 20 15:34:18 crc kubenswrapper[3552]: E0320 15:34:18.245790 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" containerName="registry" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.245797 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" containerName="registry" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.245910 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cad292d-912c-4787-a5fa-0ade98e731eb" containerName="registry" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.245935 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ea91057-268f-4992-b13a-5915cea53ae3" containerName="kube-multus-additional-cni-plugins" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.246244 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.248991 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.249089 3552 reflector.go:351] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-rjw5w" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.249204 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.257638 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-758df9885c-2km6z"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.257724 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4581d21f-88be-409f-8105-be49568258e0" podNamespace="cert-manager" podName="cert-manager-758df9885c-2km6z" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.258216 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-758df9885c-2km6z" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.260294 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-855f577f79-r4z4f"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.260345 3552 topology_manager.go:215] "Topology Admit Handler" podUID="25fd0780-accd-456f-8df8-206823b15fe7" podNamespace="cert-manager" podName="cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.260807 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.262417 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-676dd9bd64-gk2js"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.267757 3552 reflector.go:351] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-rmq8m" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.270820 3552 reflector.go:351] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-nrgz2" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.277882 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-855f577f79-r4z4f"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.286433 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-758df9885c-2km6z"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.398536 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cqfh\" (UniqueName: \"kubernetes.io/projected/4581d21f-88be-409f-8105-be49568258e0-kube-api-access-9cqfh\") pod \"cert-manager-758df9885c-2km6z\" (UID: \"4581d21f-88be-409f-8105-be49568258e0\") " pod="cert-manager/cert-manager-758df9885c-2km6z" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.398646 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlslb\" (UniqueName: \"kubernetes.io/projected/25fd0780-accd-456f-8df8-206823b15fe7-kube-api-access-dlslb\") pod \"cert-manager-webhook-855f577f79-r4z4f\" (UID: \"25fd0780-accd-456f-8df8-206823b15fe7\") " pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.398674 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f9wl\" (UniqueName: \"kubernetes.io/projected/8527c5a5-b8f1-4156-bb87-ebca589bf2ab-kube-api-access-8f9wl\") pod \"cert-manager-cainjector-676dd9bd64-gk2js\" (UID: \"8527c5a5-b8f1-4156-bb87-ebca589bf2ab\") " pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.499872 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dlslb\" (UniqueName: \"kubernetes.io/projected/25fd0780-accd-456f-8df8-206823b15fe7-kube-api-access-dlslb\") pod \"cert-manager-webhook-855f577f79-r4z4f\" (UID: \"25fd0780-accd-456f-8df8-206823b15fe7\") " pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.499935 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8f9wl\" (UniqueName: \"kubernetes.io/projected/8527c5a5-b8f1-4156-bb87-ebca589bf2ab-kube-api-access-8f9wl\") pod \"cert-manager-cainjector-676dd9bd64-gk2js\" (UID: \"8527c5a5-b8f1-4156-bb87-ebca589bf2ab\") " pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.500005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9cqfh\" (UniqueName: \"kubernetes.io/projected/4581d21f-88be-409f-8105-be49568258e0-kube-api-access-9cqfh\") pod \"cert-manager-758df9885c-2km6z\" (UID: \"4581d21f-88be-409f-8105-be49568258e0\") " pod="cert-manager/cert-manager-758df9885c-2km6z" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.519878 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f9wl\" (UniqueName: \"kubernetes.io/projected/8527c5a5-b8f1-4156-bb87-ebca589bf2ab-kube-api-access-8f9wl\") pod \"cert-manager-cainjector-676dd9bd64-gk2js\" (UID: \"8527c5a5-b8f1-4156-bb87-ebca589bf2ab\") " pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.521952 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cqfh\" (UniqueName: \"kubernetes.io/projected/4581d21f-88be-409f-8105-be49568258e0-kube-api-access-9cqfh\") pod \"cert-manager-758df9885c-2km6z\" (UID: \"4581d21f-88be-409f-8105-be49568258e0\") " pod="cert-manager/cert-manager-758df9885c-2km6z" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.526974 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlslb\" (UniqueName: \"kubernetes.io/projected/25fd0780-accd-456f-8df8-206823b15fe7-kube-api-access-dlslb\") pod \"cert-manager-webhook-855f577f79-r4z4f\" (UID: \"25fd0780-accd-456f-8df8-206823b15fe7\") " pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.564111 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.573215 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-758df9885c-2km6z" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.578192 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.850342 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-855f577f79-r4z4f"] Mar 20 15:34:18 crc kubenswrapper[3552]: I0320 15:34:18.859145 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 15:34:19 crc kubenswrapper[3552]: I0320 15:34:19.006152 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-676dd9bd64-gk2js"] Mar 20 15:34:19 crc kubenswrapper[3552]: W0320 15:34:19.016800 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4581d21f_88be_409f_8105_be49568258e0.slice/crio-74848f1b6b836ee7ae585f4b747c0aadbb459c345ccf43decc92f1f7e51a2226 WatchSource:0}: Error finding container 74848f1b6b836ee7ae585f4b747c0aadbb459c345ccf43decc92f1f7e51a2226: Status 404 returned error can't find the container with id 74848f1b6b836ee7ae585f4b747c0aadbb459c345ccf43decc92f1f7e51a2226 Mar 20 15:34:19 crc kubenswrapper[3552]: I0320 15:34:19.020296 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-758df9885c-2km6z"] Mar 20 15:34:19 crc kubenswrapper[3552]: I0320 15:34:19.186425 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-758df9885c-2km6z" event={"ID":"4581d21f-88be-409f-8105-be49568258e0","Type":"ContainerStarted","Data":"74848f1b6b836ee7ae585f4b747c0aadbb459c345ccf43decc92f1f7e51a2226"} Mar 20 15:34:19 crc kubenswrapper[3552]: I0320 15:34:19.187643 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" event={"ID":"8527c5a5-b8f1-4156-bb87-ebca589bf2ab","Type":"ContainerStarted","Data":"9285a73cda3ff88d0cb2c216f552883b7dd525dafa9fc0fc7107cc5d41c3d55d"} Mar 20 15:34:19 crc kubenswrapper[3552]: I0320 15:34:19.188615 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" event={"ID":"25fd0780-accd-456f-8df8-206823b15fe7","Type":"ContainerStarted","Data":"885ef0150744cde71edfb3821acd23c67f838d46640aada73f79d1fa18a47e5c"} Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.212051 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-758df9885c-2km6z" event={"ID":"4581d21f-88be-409f-8105-be49568258e0","Type":"ContainerStarted","Data":"88a9798a0f4e5027c16803558049a1422f0a2ad1eb0faa5b5c7f93ff791ca8b2"} Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.214266 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" event={"ID":"8527c5a5-b8f1-4156-bb87-ebca589bf2ab","Type":"ContainerStarted","Data":"4c04807482242254d55eceff461444d05ff3a9603646b31361b3cf4b62942852"} Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.216126 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" event={"ID":"25fd0780-accd-456f-8df8-206823b15fe7","Type":"ContainerStarted","Data":"6c8a2fd4997925c4f408f996097c0ff341b5a35f0b7a77a7bf541ebfe475b7df"} Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.216654 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.230377 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="cert-manager/cert-manager-758df9885c-2km6z" podStartSLOduration=1.754964658 podStartE2EDuration="6.2303283s" podCreationTimestamp="2026-03-20 15:34:18 +0000 UTC" firstStartedPulling="2026-03-20 15:34:19.018809822 +0000 UTC m=+558.712506662" lastFinishedPulling="2026-03-20 15:34:23.494173474 +0000 UTC m=+563.187870304" observedRunningTime="2026-03-20 15:34:24.225053747 +0000 UTC m=+563.918750587" watchObservedRunningTime="2026-03-20 15:34:24.2303283 +0000 UTC m=+563.924025150" Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.245258 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-676dd9bd64-gk2js" podStartSLOduration=1.912177507 podStartE2EDuration="6.245207163s" podCreationTimestamp="2026-03-20 15:34:18 +0000 UTC" firstStartedPulling="2026-03-20 15:34:19.01467566 +0000 UTC m=+558.708372510" lastFinishedPulling="2026-03-20 15:34:23.347705326 +0000 UTC m=+563.041402166" observedRunningTime="2026-03-20 15:34:24.241877683 +0000 UTC m=+563.935574563" watchObservedRunningTime="2026-03-20 15:34:24.245207163 +0000 UTC m=+563.938904003" Mar 20 15:34:24 crc kubenswrapper[3552]: I0320 15:34:24.261302 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" podStartSLOduration=1.5610882849999999 podStartE2EDuration="6.261255108s" podCreationTimestamp="2026-03-20 15:34:18 +0000 UTC" firstStartedPulling="2026-03-20 15:34:18.85891167 +0000 UTC m=+558.552608490" lastFinishedPulling="2026-03-20 15:34:23.559078483 +0000 UTC m=+563.252775313" observedRunningTime="2026-03-20 15:34:24.257443684 +0000 UTC m=+563.951140534" watchObservedRunningTime="2026-03-20 15:34:24.261255108 +0000 UTC m=+563.954951938" Mar 20 15:34:26 crc kubenswrapper[3552]: I0320 15:34:26.962447 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-44qcg"] Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.226917 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-controller" containerID="cri-o://36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.226995 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="nbdb" containerID="cri-o://189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.227025 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.227060 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-node" containerID="cri-o://86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.227071 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="northd" containerID="cri-o://3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.227119 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-acl-logging" containerID="cri-o://598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.227134 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="sbdb" containerID="cri-o://78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.283205 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" containerID="cri-o://f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" gracePeriod=30 Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.524099 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovn-acl-logging/1.log" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.524582 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovn-controller/1.log" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.525203 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.586084 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9shnr"] Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.586203 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b0a7b91d-aa71-4e71-ab24-ddac68875d36" podNamespace="openshift-ovn-kubernetes" podName="ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.586907 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-node" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.586930 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-node" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.586945 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kubecfg-setup" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.586957 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kubecfg-setup" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.586977 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="northd" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.586989 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="northd" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.587012 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587023 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.587037 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-acl-logging" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587049 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-acl-logging" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.587062 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="sbdb" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587074 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="sbdb" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.587116 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-ovn-metrics" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587128 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-ovn-metrics" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.587143 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-controller" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587152 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-controller" Mar 20 15:34:27 crc kubenswrapper[3552]: E0320 15:34:27.587166 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="nbdb" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587174 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="nbdb" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587304 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="nbdb" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587322 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="northd" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587332 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-controller" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587344 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovn-acl-logging" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587364 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="ovnkube-controller" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587385 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="sbdb" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587428 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-ovn-metrics" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.587454 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerName="kube-rbac-proxy-node" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.589859 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.593832 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-jpwlq" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723748 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723799 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723846 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723876 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723880 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash" (OuterVolumeSpecName: "host-slash") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723904 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9495\" (UniqueName: \"kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.723984 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724011 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724042 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724061 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724084 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724104 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724121 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724148 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724180 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724206 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724228 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724250 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724270 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724291 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides\") pod \"3e19f9e8-9a37-4ca8-9790-c219750ab482\" (UID: \"3e19f9e8-9a37-4ca8-9790-c219750ab482\") " Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724356 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-node-log\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724382 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-log-socket\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724423 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovn-node-metrics-cert\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724446 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovnkube-script-lib\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724468 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-systemd-units\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724486 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-etc-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724505 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-cni-bin\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724527 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-var-lib-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724547 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-run-netns\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724567 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zc7g\" (UniqueName: \"kubernetes.io/projected/b0a7b91d-aa71-4e71-ab24-ddac68875d36-kube-api-access-6zc7g\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724588 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-env-overrides\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724610 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log" (OuterVolumeSpecName: "node-log") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724659 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724670 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-run-ovn-kubernetes\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724664 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket" (OuterVolumeSpecName: "log-socket") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724693 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-slash\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724691 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724678 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724705 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724723 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724732 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724738 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724753 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724757 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724775 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724800 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-cni-netd\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.724838 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725138 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-run-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725151 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725185 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-run-ovn\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725293 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725370 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725486 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-kubelet\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725489 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725612 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovnkube-config\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725727 3552 reconciler_common.go:300] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-openvswitch\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725750 3552 reconciler_common.go:300] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-kubelet\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725771 3552 reconciler_common.go:300] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-node-log\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725792 3552 reconciler_common.go:300] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725812 3552 reconciler_common.go:300] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-env-overrides\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725833 3552 reconciler_common.go:300] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-slash\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725853 3552 reconciler_common.go:300] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-log-socket\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725880 3552 reconciler_common.go:300] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725901 3552 reconciler_common.go:300] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovnkube-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725922 3552 reconciler_common.go:300] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-bin\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725941 3552 reconciler_common.go:300] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-run-netns\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725964 3552 reconciler_common.go:300] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.725984 3552 reconciler_common.go:300] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.726005 3552 reconciler_common.go:300] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.726024 3552 reconciler_common.go:300] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-host-cni-netd\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.726042 3552 reconciler_common.go:300] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-systemd-units\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.726062 3552 reconciler_common.go:300] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e19f9e8-9a37-4ca8-9790-c219750ab482-run-ovn\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.729277 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.729290 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495" (OuterVolumeSpecName: "kube-api-access-f9495") pod "3e19f9e8-9a37-4ca8-9790-c219750ab482" (UID: "3e19f9e8-9a37-4ca8-9790-c219750ab482"). InnerVolumeSpecName "kube-api-access-f9495". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827293 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-run-netns\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827353 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-var-lib-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827389 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6zc7g\" (UniqueName: \"kubernetes.io/projected/b0a7b91d-aa71-4e71-ab24-ddac68875d36-kube-api-access-6zc7g\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827444 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-env-overrides\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827480 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-run-ovn-kubernetes\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827487 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-run-netns\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827545 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-slash\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827507 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-slash\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827583 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-var-lib-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827609 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-cni-netd\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827642 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-cni-netd\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827696 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-run-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827743 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-run-ovn\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827806 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-run-ovn\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827852 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-run-ovn-kubernetes\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827873 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-run-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827905 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.827959 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-kubelet\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828012 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovnkube-config\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828026 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828045 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-node-log\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828081 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-log-socket\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828082 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-kubelet\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828112 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovn-node-metrics-cert\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828135 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-node-log\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828145 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovnkube-script-lib\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828175 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-systemd-units\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828220 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-systemd-units\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828258 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-log-socket\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828496 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-etc-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828529 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-cni-bin\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828576 3552 reconciler_common.go:300] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e19f9e8-9a37-4ca8-9790-c219750ab482-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828596 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-f9495\" (UniqueName: \"kubernetes.io/projected/3e19f9e8-9a37-4ca8-9790-c219750ab482-kube-api-access-f9495\") on node \"crc\" DevicePath \"\"" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828619 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-env-overrides\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828659 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-etc-openvswitch\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.828631 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b0a7b91d-aa71-4e71-ab24-ddac68875d36-host-cni-bin\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.829316 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovnkube-script-lib\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.829607 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovnkube-config\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.831989 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0a7b91d-aa71-4e71-ab24-ddac68875d36-ovn-node-metrics-cert\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.855433 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zc7g\" (UniqueName: \"kubernetes.io/projected/b0a7b91d-aa71-4e71-ab24-ddac68875d36-kube-api-access-6zc7g\") pod \"ovnkube-node-9shnr\" (UID: \"b0a7b91d-aa71-4e71-ab24-ddac68875d36\") " pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: I0320 15:34:27.922725 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:27 crc kubenswrapper[3552]: W0320 15:34:27.964124 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0a7b91d_aa71_4e71_ab24_ddac68875d36.slice/crio-0d91c5dc7bbb5b43da51abd48dbef270bd75dfd14947eb2c6d4f05a51a44a4cd WatchSource:0}: Error finding container 0d91c5dc7bbb5b43da51abd48dbef270bd75dfd14947eb2c6d4f05a51a44a4cd: Status 404 returned error can't find the container with id 0d91c5dc7bbb5b43da51abd48dbef270bd75dfd14947eb2c6d4f05a51a44a4cd Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.237361 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovn-acl-logging/1.log" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238327 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-44qcg_3e19f9e8-9a37-4ca8-9790-c219750ab482/ovn-controller/1.log" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238858 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238886 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238898 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238912 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238927 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238947 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238947 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.238961 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" exitCode=143 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239021 3552 scope.go:117] "RemoveContainer" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239060 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e19f9e8-9a37-4ca8-9790-c219750ab482" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" exitCode=143 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239005 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239144 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239161 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239176 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239204 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239227 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239237 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239245 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239258 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239269 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239279 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239287 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239294 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239302 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239310 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239317 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239325 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239333 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239347 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239359 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239370 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239382 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239391 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239422 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239431 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239441 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239449 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239458 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239471 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-44qcg" event={"ID":"3e19f9e8-9a37-4ca8-9790-c219750ab482","Type":"ContainerDied","Data":"8b371ed36566aceb82d5304c51128cd66dd3e2fa866c5de347ea1d53f8de7f78"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239483 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239492 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239500 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239507 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239516 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239524 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239533 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239542 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.239551 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.240558 3552 generic.go:334] "Generic (PLEG): container finished" podID="b0a7b91d-aa71-4e71-ab24-ddac68875d36" containerID="38ca5403c397aa6247d6661e016b3ba242b7e528787c7be449539bdfc9b4ed40" exitCode=0 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.240629 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerDied","Data":"38ca5403c397aa6247d6661e016b3ba242b7e528787c7be449539bdfc9b4ed40"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.240685 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"0d91c5dc7bbb5b43da51abd48dbef270bd75dfd14947eb2c6d4f05a51a44a4cd"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.243828 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/7.log" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.244876 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/6.log" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.244950 3552 generic.go:334] "Generic (PLEG): container finished" podID="475321a1-8b7e-4033-8f72-b05a8b377347" containerID="c9e1f7b73e2a30435d1c71c755106c2fc6a4afcec0814197636221fc59b957df" exitCode=2 Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.245001 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerDied","Data":"c9e1f7b73e2a30435d1c71c755106c2fc6a4afcec0814197636221fc59b957df"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.245067 3552 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f403c1698de4def7aadd43b02d3a259f8649cf04c13ee0e528df40e0215d2870"} Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.246010 3552 scope.go:117] "RemoveContainer" containerID="c9e1f7b73e2a30435d1c71c755106c2fc6a4afcec0814197636221fc59b957df" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.248553 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-q88th_openshift-multus(475321a1-8b7e-4033-8f72-b05a8b377347)\"" pod="openshift-multus/multus-q88th" podUID="475321a1-8b7e-4033-8f72-b05a8b377347" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.350447 3552 scope.go:117] "RemoveContainer" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.398956 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-44qcg"] Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.405097 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-44qcg"] Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.430146 3552 scope.go:117] "RemoveContainer" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.460096 3552 scope.go:117] "RemoveContainer" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.483299 3552 scope.go:117] "RemoveContainer" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.506353 3552 scope.go:117] "RemoveContainer" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.545179 3552 scope.go:117] "RemoveContainer" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.584563 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-855f577f79-r4z4f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.624632 3552 scope.go:117] "RemoveContainer" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.658246 3552 scope.go:117] "RemoveContainer" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.692209 3552 scope.go:117] "RemoveContainer" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.694668 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": container with ID starting with f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a not found: ID does not exist" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.694767 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} err="failed to get container status \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": rpc error: code = NotFound desc = could not find container \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": container with ID starting with f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.694789 3552 scope.go:117] "RemoveContainer" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.695377 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": container with ID starting with 78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1 not found: ID does not exist" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.695452 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} err="failed to get container status \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": rpc error: code = NotFound desc = could not find container \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": container with ID starting with 78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.695468 3552 scope.go:117] "RemoveContainer" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.695906 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": container with ID starting with 189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa not found: ID does not exist" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.695961 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} err="failed to get container status \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": rpc error: code = NotFound desc = could not find container \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": container with ID starting with 189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.695976 3552 scope.go:117] "RemoveContainer" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.696598 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": container with ID starting with 3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f not found: ID does not exist" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.696633 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} err="failed to get container status \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": rpc error: code = NotFound desc = could not find container \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": container with ID starting with 3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.696645 3552 scope.go:117] "RemoveContainer" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.696994 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": container with ID starting with 9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad not found: ID does not exist" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.697033 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} err="failed to get container status \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": rpc error: code = NotFound desc = could not find container \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": container with ID starting with 9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.697047 3552 scope.go:117] "RemoveContainer" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.697383 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": container with ID starting with 86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a not found: ID does not exist" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.697432 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} err="failed to get container status \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": rpc error: code = NotFound desc = could not find container \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": container with ID starting with 86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.697443 3552 scope.go:117] "RemoveContainer" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.698121 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": container with ID starting with 598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091 not found: ID does not exist" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.698163 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} err="failed to get container status \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": rpc error: code = NotFound desc = could not find container \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": container with ID starting with 598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.698192 3552 scope.go:117] "RemoveContainer" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.699061 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": container with ID starting with 36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335 not found: ID does not exist" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.699188 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} err="failed to get container status \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": rpc error: code = NotFound desc = could not find container \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": container with ID starting with 36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.699247 3552 scope.go:117] "RemoveContainer" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" Mar 20 15:34:28 crc kubenswrapper[3552]: E0320 15:34:28.700020 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": container with ID starting with 1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f not found: ID does not exist" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.700070 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} err="failed to get container status \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": rpc error: code = NotFound desc = could not find container \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": container with ID starting with 1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.700085 3552 scope.go:117] "RemoveContainer" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.700613 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} err="failed to get container status \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": rpc error: code = NotFound desc = could not find container \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": container with ID starting with f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.700669 3552 scope.go:117] "RemoveContainer" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.701098 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} err="failed to get container status \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": rpc error: code = NotFound desc = could not find container \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": container with ID starting with 78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.701122 3552 scope.go:117] "RemoveContainer" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.701581 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} err="failed to get container status \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": rpc error: code = NotFound desc = could not find container \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": container with ID starting with 189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.701621 3552 scope.go:117] "RemoveContainer" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.702048 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} err="failed to get container status \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": rpc error: code = NotFound desc = could not find container \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": container with ID starting with 3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.702072 3552 scope.go:117] "RemoveContainer" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.702486 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} err="failed to get container status \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": rpc error: code = NotFound desc = could not find container \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": container with ID starting with 9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.702528 3552 scope.go:117] "RemoveContainer" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.702902 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} err="failed to get container status \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": rpc error: code = NotFound desc = could not find container \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": container with ID starting with 86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.702933 3552 scope.go:117] "RemoveContainer" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.703466 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} err="failed to get container status \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": rpc error: code = NotFound desc = could not find container \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": container with ID starting with 598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.703514 3552 scope.go:117] "RemoveContainer" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.704085 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} err="failed to get container status \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": rpc error: code = NotFound desc = could not find container \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": container with ID starting with 36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.704117 3552 scope.go:117] "RemoveContainer" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.704656 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} err="failed to get container status \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": rpc error: code = NotFound desc = could not find container \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": container with ID starting with 1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.704713 3552 scope.go:117] "RemoveContainer" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.705116 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} err="failed to get container status \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": rpc error: code = NotFound desc = could not find container \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": container with ID starting with f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.705142 3552 scope.go:117] "RemoveContainer" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.706721 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} err="failed to get container status \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": rpc error: code = NotFound desc = could not find container \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": container with ID starting with 78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.706743 3552 scope.go:117] "RemoveContainer" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.709368 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} err="failed to get container status \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": rpc error: code = NotFound desc = could not find container \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": container with ID starting with 189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.709432 3552 scope.go:117] "RemoveContainer" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.710143 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} err="failed to get container status \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": rpc error: code = NotFound desc = could not find container \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": container with ID starting with 3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.710174 3552 scope.go:117] "RemoveContainer" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.710767 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} err="failed to get container status \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": rpc error: code = NotFound desc = could not find container \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": container with ID starting with 9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.710796 3552 scope.go:117] "RemoveContainer" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.711172 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} err="failed to get container status \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": rpc error: code = NotFound desc = could not find container \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": container with ID starting with 86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.711196 3552 scope.go:117] "RemoveContainer" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.711702 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} err="failed to get container status \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": rpc error: code = NotFound desc = could not find container \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": container with ID starting with 598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.711749 3552 scope.go:117] "RemoveContainer" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.712348 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} err="failed to get container status \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": rpc error: code = NotFound desc = could not find container \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": container with ID starting with 36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.712421 3552 scope.go:117] "RemoveContainer" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.712978 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} err="failed to get container status \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": rpc error: code = NotFound desc = could not find container \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": container with ID starting with 1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.713001 3552 scope.go:117] "RemoveContainer" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.713619 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} err="failed to get container status \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": rpc error: code = NotFound desc = could not find container \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": container with ID starting with f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.713678 3552 scope.go:117] "RemoveContainer" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.714043 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} err="failed to get container status \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": rpc error: code = NotFound desc = could not find container \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": container with ID starting with 78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.714097 3552 scope.go:117] "RemoveContainer" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.714480 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} err="failed to get container status \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": rpc error: code = NotFound desc = could not find container \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": container with ID starting with 189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.714578 3552 scope.go:117] "RemoveContainer" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.715324 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} err="failed to get container status \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": rpc error: code = NotFound desc = could not find container \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": container with ID starting with 3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.715353 3552 scope.go:117] "RemoveContainer" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.716709 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} err="failed to get container status \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": rpc error: code = NotFound desc = could not find container \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": container with ID starting with 9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.716729 3552 scope.go:117] "RemoveContainer" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.716984 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} err="failed to get container status \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": rpc error: code = NotFound desc = could not find container \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": container with ID starting with 86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.717010 3552 scope.go:117] "RemoveContainer" containerID="598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.717346 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091"} err="failed to get container status \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": rpc error: code = NotFound desc = could not find container \"598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091\": container with ID starting with 598162c76924b56b82655b9cd27f2df6f8ee727f0c19158e342c108838ace091 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.717389 3552 scope.go:117] "RemoveContainer" containerID="36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.717956 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335"} err="failed to get container status \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": rpc error: code = NotFound desc = could not find container \"36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335\": container with ID starting with 36c0e3b0c214a4e5dae4ffa04c0eb47bcdd3f393ee4aa677dea7e65be64a3335 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.717992 3552 scope.go:117] "RemoveContainer" containerID="1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.718383 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f"} err="failed to get container status \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": rpc error: code = NotFound desc = could not find container \"1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f\": container with ID starting with 1921d7cb99e19bd55e71233ff33c673701fbee0a506234678a016e5866258d2f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.718450 3552 scope.go:117] "RemoveContainer" containerID="f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.718870 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a"} err="failed to get container status \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": rpc error: code = NotFound desc = could not find container \"f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a\": container with ID starting with f0e939c19fdd7d1990fc57591877d65d4609721a59b06bb87d283cc87e0f027a not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.718892 3552 scope.go:117] "RemoveContainer" containerID="78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.719965 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1"} err="failed to get container status \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": rpc error: code = NotFound desc = could not find container \"78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1\": container with ID starting with 78fde737195a58d8eeb438a1b7b8f8991db5640e49a28485910f53f3261037f1 not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.719994 3552 scope.go:117] "RemoveContainer" containerID="189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.720281 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa"} err="failed to get container status \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": rpc error: code = NotFound desc = could not find container \"189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa\": container with ID starting with 189783f83dcc4b2003711ff49beeab6d49a8de178dc5d90f136c6b2a71be33aa not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.720304 3552 scope.go:117] "RemoveContainer" containerID="3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.721461 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f"} err="failed to get container status \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": rpc error: code = NotFound desc = could not find container \"3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f\": container with ID starting with 3c62ee4a59821c48f4025f4418db2888e4e4d4dc4d3e1c7f48f6ab9e5c35219f not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.721487 3552 scope.go:117] "RemoveContainer" containerID="9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.721907 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad"} err="failed to get container status \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": rpc error: code = NotFound desc = could not find container \"9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad\": container with ID starting with 9a583557282956ea2773ef3f9a5e00ae56d65eda3f99978fff92d970774c66ad not found: ID does not exist" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.722055 3552 scope.go:117] "RemoveContainer" containerID="86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a" Mar 20 15:34:28 crc kubenswrapper[3552]: I0320 15:34:28.722391 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a"} err="failed to get container status \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": rpc error: code = NotFound desc = could not find container \"86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a\": container with ID starting with 86feee76066a3f568ff6746cdb04e6dd53ea2c44e76a569d89859ad696fcd72a not found: ID does not exist" Mar 20 15:34:29 crc kubenswrapper[3552]: I0320 15:34:29.252823 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"df12e8d49358c25dbe6b892b95177723ef6fae85bb9c16cb102c0d59ed57a0e3"} Mar 20 15:34:29 crc kubenswrapper[3552]: I0320 15:34:29.252856 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"14ac32e4c2762059f8362f01e5b2abf22a271b728c78a509b6142b9d36e31a19"} Mar 20 15:34:29 crc kubenswrapper[3552]: I0320 15:34:29.252865 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"71e64d1093af86c81b1260fe649cb8984a4b6959a8414b8dd78ecba593eeb3b9"} Mar 20 15:34:29 crc kubenswrapper[3552]: I0320 15:34:29.252876 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"4f41c5b31768ea0846f82ee17489d27594794fb9c8ece626a7a7537ce2e9e1f1"} Mar 20 15:34:29 crc kubenswrapper[3552]: I0320 15:34:29.440178 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e19f9e8-9a37-4ca8-9790-c219750ab482" path="/var/lib/kubelet/pods/3e19f9e8-9a37-4ca8-9790-c219750ab482/volumes" Mar 20 15:34:30 crc kubenswrapper[3552]: I0320 15:34:30.264101 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"532856379ecfca7b283ab65a2fd73a4a78e362f115630a7729314422b868cc97"} Mar 20 15:34:30 crc kubenswrapper[3552]: I0320 15:34:30.264151 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"e0541cf2dc3bc6dcfce8594948b025760c258d7f0180dd967cc5c1671f2ad65d"} Mar 20 15:34:32 crc kubenswrapper[3552]: I0320 15:34:32.279107 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"0c465141d02c623e514b64be7f6c63503a439001ed59a7e7e6f10b462033c339"} Mar 20 15:34:34 crc kubenswrapper[3552]: I0320 15:34:34.294649 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" event={"ID":"b0a7b91d-aa71-4e71-ab24-ddac68875d36","Type":"ContainerStarted","Data":"6428f1bb33eac5e1a2342588502f9383ccd32b7629f6b24fbd40c5851e418ccf"} Mar 20 15:34:34 crc kubenswrapper[3552]: I0320 15:34:34.331556 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" podStartSLOduration=7.331475617 podStartE2EDuration="7.331475617s" podCreationTimestamp="2026-03-20 15:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:34:34.329156794 +0000 UTC m=+574.022853704" watchObservedRunningTime="2026-03-20 15:34:34.331475617 +0000 UTC m=+574.025172487" Mar 20 15:34:35 crc kubenswrapper[3552]: I0320 15:34:35.301154 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:35 crc kubenswrapper[3552]: I0320 15:34:35.301208 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:35 crc kubenswrapper[3552]: I0320 15:34:35.301219 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:35 crc kubenswrapper[3552]: I0320 15:34:35.396747 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:35 crc kubenswrapper[3552]: I0320 15:34:35.397037 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:34:42 crc kubenswrapper[3552]: I0320 15:34:42.430541 3552 scope.go:117] "RemoveContainer" containerID="c9e1f7b73e2a30435d1c71c755106c2fc6a4afcec0814197636221fc59b957df" Mar 20 15:34:42 crc kubenswrapper[3552]: I0320 15:34:42.779105 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:34:42 crc kubenswrapper[3552]: I0320 15:34:42.779587 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:34:43 crc kubenswrapper[3552]: I0320 15:34:43.341170 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/7.log" Mar 20 15:34:43 crc kubenswrapper[3552]: I0320 15:34:43.341579 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/6.log" Mar 20 15:34:43 crc kubenswrapper[3552]: I0320 15:34:43.341630 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-q88th" event={"ID":"475321a1-8b7e-4033-8f72-b05a8b377347","Type":"ContainerStarted","Data":"0e0f04fa9ef885174304703959a75d218b000dbcfa040ac6fd808f6d3b581d2e"} Mar 20 15:34:57 crc kubenswrapper[3552]: I0320 15:34:57.977185 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9shnr" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.276256 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.276601 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.276626 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.276648 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.276681 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.638215 3552 scope.go:117] "RemoveContainer" containerID="f403c1698de4def7aadd43b02d3a259f8649cf04c13ee0e528df40e0215d2870" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.715871 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\": container with ID starting with e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea not found: ID does not exist" containerID="e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.715927 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea" err="rpc error: code = NotFound desc = could not find container \"e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea\": container with ID starting with e5e3d809eb606739ae2d22ac6b201e3888700dbb1201f659c78b0ba402c2b5ea not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.716445 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\": container with ID starting with f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57 not found: ID does not exist" containerID="f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.716497 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57" err="rpc error: code = NotFound desc = could not find container \"f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57\": container with ID starting with f0ef3f2b9076d1faf16efc7dbdbfe0e76f7180b7b0a5f5d2cdf916a7ddc78a57 not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.717036 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\": container with ID starting with 9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155 not found: ID does not exist" containerID="9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.717073 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155" err="rpc error: code = NotFound desc = could not find container \"9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155\": container with ID starting with 9613bf7eeaf9dcc7f6aa565ee2ba631217d3cb0e8355152ab843fcef49bcd155 not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.717695 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\": container with ID starting with 82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf not found: ID does not exist" containerID="82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.717724 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf" err="rpc error: code = NotFound desc = could not find container \"82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf\": container with ID starting with 82672f5b470ad12fa4bb9d72b66c89a6dcd4a1b9ccc7d1257819445d9276c5cf not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.718087 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\": container with ID starting with 8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b not found: ID does not exist" containerID="8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.718111 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b" err="rpc error: code = NotFound desc = could not find container \"8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b\": container with ID starting with 8230180b1ae351edffb6b6307ef4ab32b954b5922b26e478fc1d5acccd6bc51b not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.718412 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\": container with ID starting with 0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840 not found: ID does not exist" containerID="0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.718458 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840" err="rpc error: code = NotFound desc = could not find container \"0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840\": container with ID starting with 0985210cf1ba80ebffb4748c1bd9ea42048f5a7efe526d50750deb5e9e1fb840 not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.718759 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\": container with ID starting with 1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770 not found: ID does not exist" containerID="1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.718793 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770" err="rpc error: code = NotFound desc = could not find container \"1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770\": container with ID starting with 1b566172437651ae56f28fb2fcebe7069f1f0a0d851de8ba88ae7989c42d7770 not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.719118 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\": container with ID starting with 6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7 not found: ID does not exist" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.719155 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7" err="rpc error: code = NotFound desc = could not find container \"6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7\": container with ID starting with 6b55c2580f24348def9e700392b39293f03ef2beaece78c7cd07f6f72b1e73c7 not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.719487 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720\": container with ID starting with dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720 not found: ID does not exist" containerID="dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.719520 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720" err="rpc error: code = NotFound desc = could not find container \"dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720\": container with ID starting with dc887e356f1b0a4dbbbd6ce756fad7c2190099b6b56ab566c0f190cfb0d1c720 not found: ID does not exist" Mar 20 15:35:01 crc kubenswrapper[3552]: E0320 15:35:01.719847 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\": container with ID starting with 5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a not found: ID does not exist" containerID="5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a" Mar 20 15:35:01 crc kubenswrapper[3552]: I0320 15:35:01.719890 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a" err="rpc error: code = NotFound desc = could not find container \"5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a\": container with ID starting with 5fcc3daacb146ccc32d36a0d417223b0435bee8988754e0ff18945f800d2ef8a not found: ID does not exist" Mar 20 15:35:02 crc kubenswrapper[3552]: I0320 15:35:02.458700 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-q88th_475321a1-8b7e-4033-8f72-b05a8b377347/kube-multus/7.log" Mar 20 15:35:12 crc kubenswrapper[3552]: I0320 15:35:12.778525 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:35:12 crc kubenswrapper[3552]: I0320 15:35:12.779170 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:35:42 crc kubenswrapper[3552]: I0320 15:35:42.778092 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:35:42 crc kubenswrapper[3552]: I0320 15:35:42.778626 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:35:42 crc kubenswrapper[3552]: I0320 15:35:42.778661 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:35:42 crc kubenswrapper[3552]: I0320 15:35:42.779329 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a6c34bfc12c2223d8144efc535af88e195af690ac65b1064e138b70ee20351af"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:35:42 crc kubenswrapper[3552]: I0320 15:35:42.779512 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://a6c34bfc12c2223d8144efc535af88e195af690ac65b1064e138b70ee20351af" gracePeriod=600 Mar 20 15:35:43 crc kubenswrapper[3552]: I0320 15:35:43.716912 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="a6c34bfc12c2223d8144efc535af88e195af690ac65b1064e138b70ee20351af" exitCode=0 Mar 20 15:35:43 crc kubenswrapper[3552]: I0320 15:35:43.716984 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"a6c34bfc12c2223d8144efc535af88e195af690ac65b1064e138b70ee20351af"} Mar 20 15:35:43 crc kubenswrapper[3552]: I0320 15:35:43.717459 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"786e47a4d6307352496452c605f1f5ce00e08ea17c3f18a4216d752aee09c1e8"} Mar 20 15:35:43 crc kubenswrapper[3552]: I0320 15:35:43.717479 3552 scope.go:117] "RemoveContainer" containerID="b0e20f7a5795a7032ac82d7a882b848725b321b9b7d2af58b2d9df46f95bd747" Mar 20 15:36:01 crc kubenswrapper[3552]: I0320 15:36:01.277027 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:36:01 crc kubenswrapper[3552]: I0320 15:36:01.277920 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:36:01 crc kubenswrapper[3552]: I0320 15:36:01.277976 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:36:01 crc kubenswrapper[3552]: I0320 15:36:01.278007 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:36:01 crc kubenswrapper[3552]: I0320 15:36:01.278070 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:37:01 crc kubenswrapper[3552]: I0320 15:37:01.279017 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:37:01 crc kubenswrapper[3552]: I0320 15:37:01.280552 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:37:01 crc kubenswrapper[3552]: I0320 15:37:01.280656 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:37:01 crc kubenswrapper[3552]: I0320 15:37:01.280736 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:37:01 crc kubenswrapper[3552]: I0320 15:37:01.280809 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.397491 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb"] Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.398279 3552 topology_manager.go:215] "Topology Admit Handler" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" podNamespace="openshift-marketplace" podName="93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.400324 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.408272 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-4w6pc" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.419265 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb"] Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.512294 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtdfx\" (UniqueName: \"kubernetes.io/projected/23ef2ba0-4158-4d94-8e17-ebb474c6b977-kube-api-access-wtdfx\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.512368 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-util\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.512493 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-bundle\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.613988 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wtdfx\" (UniqueName: \"kubernetes.io/projected/23ef2ba0-4158-4d94-8e17-ebb474c6b977-kube-api-access-wtdfx\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.614046 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-util\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.614075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-bundle\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.614728 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-util\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.614840 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-bundle\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.642445 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtdfx\" (UniqueName: \"kubernetes.io/projected/23ef2ba0-4158-4d94-8e17-ebb474c6b977-kube-api-access-wtdfx\") pod \"93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.724634 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:19 crc kubenswrapper[3552]: I0320 15:37:19.930388 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb"] Mar 20 15:37:20 crc kubenswrapper[3552]: I0320 15:37:20.206182 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerStarted","Data":"5b5031e9f4dd3fdc2ee88fec13d5b638616c21e053c507abf88906064595acda"} Mar 20 15:37:20 crc kubenswrapper[3552]: I0320 15:37:20.206438 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerStarted","Data":"8aeaec5ba853402f6b814352d6021a11f394c1d99ee25d85afd9efac59068d4b"} Mar 20 15:37:21 crc kubenswrapper[3552]: I0320 15:37:21.211992 3552 generic.go:334] "Generic (PLEG): container finished" podID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerID="5b5031e9f4dd3fdc2ee88fec13d5b638616c21e053c507abf88906064595acda" exitCode=0 Mar 20 15:37:21 crc kubenswrapper[3552]: I0320 15:37:21.212056 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerDied","Data":"5b5031e9f4dd3fdc2ee88fec13d5b638616c21e053c507abf88906064595acda"} Mar 20 15:37:23 crc kubenswrapper[3552]: I0320 15:37:23.222022 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerStarted","Data":"01ff66bb7175817e13a210402678d0546d704ed3616bb98a6a5e3455ce089dfa"} Mar 20 15:37:24 crc kubenswrapper[3552]: I0320 15:37:24.230512 3552 generic.go:334] "Generic (PLEG): container finished" podID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerID="01ff66bb7175817e13a210402678d0546d704ed3616bb98a6a5e3455ce089dfa" exitCode=0 Mar 20 15:37:24 crc kubenswrapper[3552]: I0320 15:37:24.230638 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerDied","Data":"01ff66bb7175817e13a210402678d0546d704ed3616bb98a6a5e3455ce089dfa"} Mar 20 15:37:25 crc kubenswrapper[3552]: I0320 15:37:25.238755 3552 generic.go:334] "Generic (PLEG): container finished" podID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerID="ef3018330fb0f3fa24e797c9101e904ac89a236bb38bb7f47e70aa08e7b56339" exitCode=0 Mar 20 15:37:25 crc kubenswrapper[3552]: I0320 15:37:25.239092 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerDied","Data":"ef3018330fb0f3fa24e797c9101e904ac89a236bb38bb7f47e70aa08e7b56339"} Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.448861 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.631458 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtdfx\" (UniqueName: \"kubernetes.io/projected/23ef2ba0-4158-4d94-8e17-ebb474c6b977-kube-api-access-wtdfx\") pod \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.632066 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-bundle\") pod \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.632247 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-util\") pod \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\" (UID: \"23ef2ba0-4158-4d94-8e17-ebb474c6b977\") " Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.634026 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-bundle" (OuterVolumeSpecName: "bundle") pod "23ef2ba0-4158-4d94-8e17-ebb474c6b977" (UID: "23ef2ba0-4158-4d94-8e17-ebb474c6b977"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.636976 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23ef2ba0-4158-4d94-8e17-ebb474c6b977-kube-api-access-wtdfx" (OuterVolumeSpecName: "kube-api-access-wtdfx") pod "23ef2ba0-4158-4d94-8e17-ebb474c6b977" (UID: "23ef2ba0-4158-4d94-8e17-ebb474c6b977"). InnerVolumeSpecName "kube-api-access-wtdfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.645880 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-util" (OuterVolumeSpecName: "util") pod "23ef2ba0-4158-4d94-8e17-ebb474c6b977" (UID: "23ef2ba0-4158-4d94-8e17-ebb474c6b977"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.733091 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wtdfx\" (UniqueName: \"kubernetes.io/projected/23ef2ba0-4158-4d94-8e17-ebb474c6b977-kube-api-access-wtdfx\") on node \"crc\" DevicePath \"\"" Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.733130 3552 reconciler_common.go:300] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:37:26 crc kubenswrapper[3552]: I0320 15:37:26.733140 3552 reconciler_common.go:300] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/23ef2ba0-4158-4d94-8e17-ebb474c6b977-util\") on node \"crc\" DevicePath \"\"" Mar 20 15:37:27 crc kubenswrapper[3552]: I0320 15:37:27.249649 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" event={"ID":"23ef2ba0-4158-4d94-8e17-ebb474c6b977","Type":"ContainerDied","Data":"8aeaec5ba853402f6b814352d6021a11f394c1d99ee25d85afd9efac59068d4b"} Mar 20 15:37:27 crc kubenswrapper[3552]: I0320 15:37:27.249693 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8aeaec5ba853402f6b814352d6021a11f394c1d99ee25d85afd9efac59068d4b" Mar 20 15:37:27 crc kubenswrapper[3552]: I0320 15:37:27.249761 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb" Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.966893 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m"] Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.967575 3552 topology_manager.go:215] "Topology Admit Handler" podUID="85ec366f-2967-4402-b803-d1bad5b8dcba" podNamespace="openshift-operators" podName="obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:36 crc kubenswrapper[3552]: E0320 15:37:36.967713 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="util" Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.967725 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="util" Mar 20 15:37:36 crc kubenswrapper[3552]: E0320 15:37:36.967739 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="extract" Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.967746 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="extract" Mar 20 15:37:36 crc kubenswrapper[3552]: E0320 15:37:36.967756 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="pull" Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.967764 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="pull" Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.967885 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="23ef2ba0-4158-4d94-8e17-ebb474c6b977" containerName="extract" Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.968235 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:36 crc kubenswrapper[3552]: W0320 15:37:36.971834 3552 reflector.go:539] object-"openshift-operators"/"obo-prometheus-operator-dockercfg-xw26c": failed to list *v1.Secret: secrets "obo-prometheus-operator-dockercfg-xw26c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Mar 20 15:37:36 crc kubenswrapper[3552]: E0320 15:37:36.971869 3552 reflector.go:147] object-"openshift-operators"/"obo-prometheus-operator-dockercfg-xw26c": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "obo-prometheus-operator-dockercfg-xw26c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Mar 20 15:37:36 crc kubenswrapper[3552]: W0320 15:37:36.973389 3552 reflector.go:539] object-"openshift-operators"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Mar 20 15:37:36 crc kubenswrapper[3552]: E0320 15:37:36.973423 3552 reflector.go:147] object-"openshift-operators"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Mar 20 15:37:36 crc kubenswrapper[3552]: W0320 15:37:36.976078 3552 reflector.go:539] object-"openshift-operators"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Mar 20 15:37:36 crc kubenswrapper[3552]: E0320 15:37:36.976125 3552 reflector.go:147] object-"openshift-operators"/"openshift-service-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Mar 20 15:37:36 crc kubenswrapper[3552]: I0320 15:37:36.984873 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.044172 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj4vm\" (UniqueName: \"kubernetes.io/projected/85ec366f-2967-4402-b803-d1bad5b8dcba-kube-api-access-lj4vm\") pod \"obo-prometheus-operator-658fcfcb8b-54w4m\" (UID: \"85ec366f-2967-4402-b803-d1bad5b8dcba\") " pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.145455 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lj4vm\" (UniqueName: \"kubernetes.io/projected/85ec366f-2967-4402-b803-d1bad5b8dcba-kube-api-access-lj4vm\") pod \"obo-prometheus-operator-658fcfcb8b-54w4m\" (UID: \"85ec366f-2967-4402-b803-d1bad5b8dcba\") " pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.280258 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.280390 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4f85d41f-8ca4-49e6-88a8-a1aab6f2156f" podNamespace="openshift-operators" podName="obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.281166 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.286437 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.289934 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-2xkz2" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.303242 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.303344 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7e93d32b-5615-49fa-a73f-9c81c8cd8cd0" podNamespace="openshift-operators" podName="obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.303912 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.308597 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.321575 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.449120 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e93d32b-5615-49fa-a73f-9c81c8cd8cd0-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852\" (UID: \"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.449166 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f85d41f-8ca4-49e6-88a8-a1aab6f2156f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j\" (UID: \"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.449195 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f85d41f-8ca4-49e6-88a8-a1aab6f2156f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j\" (UID: \"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.449342 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e93d32b-5615-49fa-a73f-9c81c8cd8cd0-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852\" (UID: \"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.550753 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e93d32b-5615-49fa-a73f-9c81c8cd8cd0-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852\" (UID: \"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.550820 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f85d41f-8ca4-49e6-88a8-a1aab6f2156f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j\" (UID: \"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.550846 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f85d41f-8ca4-49e6-88a8-a1aab6f2156f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j\" (UID: \"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.550884 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e93d32b-5615-49fa-a73f-9c81c8cd8cd0-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852\" (UID: \"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.556987 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e93d32b-5615-49fa-a73f-9c81c8cd8cd0-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852\" (UID: \"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.570668 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f85d41f-8ca4-49e6-88a8-a1aab6f2156f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j\" (UID: \"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.573810 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e93d32b-5615-49fa-a73f-9c81c8cd8cd0-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852\" (UID: \"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.576267 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f85d41f-8ca4-49e6-88a8-a1aab6f2156f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j\" (UID: \"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.594884 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.616332 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.748674 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-6dfc56bc4d-27zhf"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.748794 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2831a7cc-6c26-47e1-bd6d-3dfc81f021f9" podNamespace="openshift-operators" podName="observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.749352 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.755984 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.756557 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-v62ml" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.770015 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-6dfc56bc4d-27zhf"] Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.855052 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xz2l\" (UniqueName: \"kubernetes.io/projected/2831a7cc-6c26-47e1-bd6d-3dfc81f021f9-kube-api-access-2xz2l\") pod \"observability-operator-6dfc56bc4d-27zhf\" (UID: \"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9\") " pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.855164 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/2831a7cc-6c26-47e1-bd6d-3dfc81f021f9-observability-operator-tls\") pod \"observability-operator-6dfc56bc4d-27zhf\" (UID: \"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9\") " pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.959008 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2xz2l\" (UniqueName: \"kubernetes.io/projected/2831a7cc-6c26-47e1-bd6d-3dfc81f021f9-kube-api-access-2xz2l\") pod \"observability-operator-6dfc56bc4d-27zhf\" (UID: \"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9\") " pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.959325 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/2831a7cc-6c26-47e1-bd6d-3dfc81f021f9-observability-operator-tls\") pod \"observability-operator-6dfc56bc4d-27zhf\" (UID: \"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9\") " pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.965998 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/2831a7cc-6c26-47e1-bd6d-3dfc81f021f9-observability-operator-tls\") pod \"observability-operator-6dfc56bc4d-27zhf\" (UID: \"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9\") " pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:37 crc kubenswrapper[3552]: I0320 15:37:37.992796 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j"] Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.064333 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852"] Mar 20 15:37:38 crc kubenswrapper[3552]: W0320 15:37:38.076270 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e93d32b_5615_49fa_a73f_9c81c8cd8cd0.slice/crio-4ab9edd8c85e9b9b97a4c87b4fe16c86d78d89eaa057c796a8728c263cab0b09 WatchSource:0}: Error finding container 4ab9edd8c85e9b9b97a4c87b4fe16c86d78d89eaa057c796a8728c263cab0b09: Status 404 returned error can't find the container with id 4ab9edd8c85e9b9b97a4c87b4fe16c86d78d89eaa057c796a8728c263cab0b09 Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.134291 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-7b66ccd595-8l6z8"] Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.134393 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d7e52e7d-5410-447e-8b22-9d97e1b98f74" podNamespace="openshift-operators" podName="perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.135086 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.136888 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-p4xnh" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.140439 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-service-cert" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.152433 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-7b66ccd595-8l6z8"] Mar 20 15:37:38 crc kubenswrapper[3552]: E0320 15:37:38.170503 3552 projected.go:294] Couldn't get configMap openshift-operators/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.262696 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d7e52e7d-5410-447e-8b22-9d97e1b98f74-webhook-cert\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.262751 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e52e7d-5410-447e-8b22-9d97e1b98f74-openshift-service-ca\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.262775 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq7kz\" (UniqueName: \"kubernetes.io/projected/d7e52e7d-5410-447e-8b22-9d97e1b98f74-kube-api-access-dq7kz\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.262825 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d7e52e7d-5410-447e-8b22-9d97e1b98f74-apiservice-cert\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.294664 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" event={"ID":"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f","Type":"ContainerStarted","Data":"530ff600414e75ae52b318d4ef218af7b74b8c4963d622d5ecb23a1179896382"} Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.295920 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" event={"ID":"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0","Type":"ContainerStarted","Data":"4ab9edd8c85e9b9b97a4c87b4fe16c86d78d89eaa057c796a8728c263cab0b09"} Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.364167 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d7e52e7d-5410-447e-8b22-9d97e1b98f74-webhook-cert\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.364468 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e52e7d-5410-447e-8b22-9d97e1b98f74-openshift-service-ca\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.364599 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dq7kz\" (UniqueName: \"kubernetes.io/projected/d7e52e7d-5410-447e-8b22-9d97e1b98f74-kube-api-access-dq7kz\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.364739 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d7e52e7d-5410-447e-8b22-9d97e1b98f74-apiservice-cert\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.368501 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d7e52e7d-5410-447e-8b22-9d97e1b98f74-webhook-cert\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.370755 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d7e52e7d-5410-447e-8b22-9d97e1b98f74-apiservice-cert\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.457590 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.518869 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-xw26c" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.531273 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.535950 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e52e7d-5410-447e-8b22-9d97e1b98f74-openshift-service-ca\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: E0320 15:37:38.541023 3552 projected.go:200] Error preparing data for projected volume kube-api-access-lj4vm for pod openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m: failed to sync configmap cache: timed out waiting for the condition Mar 20 15:37:38 crc kubenswrapper[3552]: E0320 15:37:38.541365 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/85ec366f-2967-4402-b803-d1bad5b8dcba-kube-api-access-lj4vm podName:85ec366f-2967-4402-b803-d1bad5b8dcba nodeName:}" failed. No retries permitted until 2026-03-20 15:37:39.041341312 +0000 UTC m=+758.735038142 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lj4vm" (UniqueName: "kubernetes.io/projected/85ec366f-2967-4402-b803-d1bad5b8dcba-kube-api-access-lj4vm") pod "obo-prometheus-operator-658fcfcb8b-54w4m" (UID: "85ec366f-2967-4402-b803-d1bad5b8dcba") : failed to sync configmap cache: timed out waiting for the condition Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.541624 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xz2l\" (UniqueName: \"kubernetes.io/projected/2831a7cc-6c26-47e1-bd6d-3dfc81f021f9-kube-api-access-2xz2l\") pod \"observability-operator-6dfc56bc4d-27zhf\" (UID: \"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9\") " pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.543665 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq7kz\" (UniqueName: \"kubernetes.io/projected/d7e52e7d-5410-447e-8b22-9d97e1b98f74-kube-api-access-dq7kz\") pod \"perses-operator-7b66ccd595-8l6z8\" (UID: \"d7e52e7d-5410-447e-8b22-9d97e1b98f74\") " pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.671918 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:38 crc kubenswrapper[3552]: I0320 15:37:38.749637 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.078752 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lj4vm\" (UniqueName: \"kubernetes.io/projected/85ec366f-2967-4402-b803-d1bad5b8dcba-kube-api-access-lj4vm\") pod \"obo-prometheus-operator-658fcfcb8b-54w4m\" (UID: \"85ec366f-2967-4402-b803-d1bad5b8dcba\") " pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.085229 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj4vm\" (UniqueName: \"kubernetes.io/projected/85ec366f-2967-4402-b803-d1bad5b8dcba-kube-api-access-lj4vm\") pod \"obo-prometheus-operator-658fcfcb8b-54w4m\" (UID: \"85ec366f-2967-4402-b803-d1bad5b8dcba\") " pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.114936 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-7b66ccd595-8l6z8"] Mar 20 15:37:39 crc kubenswrapper[3552]: W0320 15:37:39.121391 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7e52e7d_5410_447e_8b22_9d97e1b98f74.slice/crio-e12a05c0f67493adf983c26f743a152e6a7acb781ed528f7ab61fe9cfde7e1e9 WatchSource:0}: Error finding container e12a05c0f67493adf983c26f743a152e6a7acb781ed528f7ab61fe9cfde7e1e9: Status 404 returned error can't find the container with id e12a05c0f67493adf983c26f743a152e6a7acb781ed528f7ab61fe9cfde7e1e9 Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.294048 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-6dfc56bc4d-27zhf"] Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.301103 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" event={"ID":"d7e52e7d-5410-447e-8b22-9d97e1b98f74","Type":"ContainerStarted","Data":"e12a05c0f67493adf983c26f743a152e6a7acb781ed528f7ab61fe9cfde7e1e9"} Mar 20 15:37:39 crc kubenswrapper[3552]: W0320 15:37:39.302824 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2831a7cc_6c26_47e1_bd6d_3dfc81f021f9.slice/crio-0d2159f5b6be11ccced9626566b07e2bd2a7a67f541bc95e05a44fd9e021040f WatchSource:0}: Error finding container 0d2159f5b6be11ccced9626566b07e2bd2a7a67f541bc95e05a44fd9e021040f: Status 404 returned error can't find the container with id 0d2159f5b6be11ccced9626566b07e2bd2a7a67f541bc95e05a44fd9e021040f Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.383873 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" Mar 20 15:37:39 crc kubenswrapper[3552]: I0320 15:37:39.671890 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m"] Mar 20 15:37:40 crc kubenswrapper[3552]: I0320 15:37:40.305912 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" event={"ID":"85ec366f-2967-4402-b803-d1bad5b8dcba","Type":"ContainerStarted","Data":"ed46b4764a9f44cc525008a2d0e556247c6f29f2456e1d7f80789907a44062f5"} Mar 20 15:37:40 crc kubenswrapper[3552]: I0320 15:37:40.307114 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" event={"ID":"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9","Type":"ContainerStarted","Data":"0d2159f5b6be11ccced9626566b07e2bd2a7a67f541bc95e05a44fd9e021040f"} Mar 20 15:37:47 crc kubenswrapper[3552]: I0320 15:37:47.386623 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" event={"ID":"4f85d41f-8ca4-49e6-88a8-a1aab6f2156f","Type":"ContainerStarted","Data":"cbf19447e501b29e160fc75d7c46b0defffbbe37bbb324915312e74418f8981f"} Mar 20 15:37:47 crc kubenswrapper[3552]: I0320 15:37:47.389923 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" event={"ID":"7e93d32b-5615-49fa-a73f-9c81c8cd8cd0","Type":"ContainerStarted","Data":"0a083d2718a09677594ce5e358fdd4648e2c9b662f21b6203efc102dd84d5340"} Mar 20 15:37:47 crc kubenswrapper[3552]: I0320 15:37:47.404269 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j" podStartSLOduration=2.3342625679999998 podStartE2EDuration="10.404197187s" podCreationTimestamp="2026-03-20 15:37:37 +0000 UTC" firstStartedPulling="2026-03-20 15:37:38.022966891 +0000 UTC m=+757.716663721" lastFinishedPulling="2026-03-20 15:37:46.09290151 +0000 UTC m=+765.786598340" observedRunningTime="2026-03-20 15:37:47.400079612 +0000 UTC m=+767.093776442" watchObservedRunningTime="2026-03-20 15:37:47.404197187 +0000 UTC m=+767.097894017" Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.428223 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" event={"ID":"2831a7cc-6c26-47e1-bd6d-3dfc81f021f9","Type":"ContainerStarted","Data":"52b0e2e61d66cecc7f7b6e950ad4d3fae10b67ca665541a3a503c5d5a45f1497"} Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.428751 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.435250 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" event={"ID":"d7e52e7d-5410-447e-8b22-9d97e1b98f74","Type":"ContainerStarted","Data":"1adcfcb3aee5a6076961d53b930bdc3579694fa28fc7d6d64d725116a0f037e8"} Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.435307 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.435336 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.454963 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852" podStartSLOduration=8.431080453 podStartE2EDuration="16.454897556s" podCreationTimestamp="2026-03-20 15:37:37 +0000 UTC" firstStartedPulling="2026-03-20 15:37:38.08089705 +0000 UTC m=+757.774593880" lastFinishedPulling="2026-03-20 15:37:46.104714153 +0000 UTC m=+765.798410983" observedRunningTime="2026-03-20 15:37:47.439007492 +0000 UTC m=+767.132704322" watchObservedRunningTime="2026-03-20 15:37:53.454897556 +0000 UTC m=+773.148594386" Mar 20 15:37:53 crc kubenswrapper[3552]: I0320 15:37:53.455132 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operators/observability-operator-6dfc56bc4d-27zhf" podStartSLOduration=3.382156097 podStartE2EDuration="16.455109911s" podCreationTimestamp="2026-03-20 15:37:37 +0000 UTC" firstStartedPulling="2026-03-20 15:37:39.306645159 +0000 UTC m=+759.000341989" lastFinishedPulling="2026-03-20 15:37:52.379598973 +0000 UTC m=+772.073295803" observedRunningTime="2026-03-20 15:37:53.452798002 +0000 UTC m=+773.146494842" watchObservedRunningTime="2026-03-20 15:37:53.455109911 +0000 UTC m=+773.148806741" Mar 20 15:37:54 crc kubenswrapper[3552]: I0320 15:37:54.440163 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" event={"ID":"85ec366f-2967-4402-b803-d1bad5b8dcba","Type":"ContainerStarted","Data":"a50bcd8bb88d7ae684ad2bc1e1f5a2f2bcbbbc87f9e8d4531c0f87e57de9e014"} Mar 20 15:37:54 crc kubenswrapper[3552]: I0320 15:37:54.465109 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" podStartSLOduration=3.25585792 podStartE2EDuration="16.465057215s" podCreationTimestamp="2026-03-20 15:37:38 +0000 UTC" firstStartedPulling="2026-03-20 15:37:39.123547644 +0000 UTC m=+758.817244474" lastFinishedPulling="2026-03-20 15:37:52.332746939 +0000 UTC m=+772.026443769" observedRunningTime="2026-03-20 15:37:53.508532734 +0000 UTC m=+773.202229574" watchObservedRunningTime="2026-03-20 15:37:54.465057215 +0000 UTC m=+774.158754045" Mar 20 15:37:54 crc kubenswrapper[3552]: I0320 15:37:54.470253 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-658fcfcb8b-54w4m" podStartSLOduration=5.835048033 podStartE2EDuration="18.470174936s" podCreationTimestamp="2026-03-20 15:37:36 +0000 UTC" firstStartedPulling="2026-03-20 15:37:39.697657057 +0000 UTC m=+759.391353887" lastFinishedPulling="2026-03-20 15:37:52.33278396 +0000 UTC m=+772.026480790" observedRunningTime="2026-03-20 15:37:54.466345008 +0000 UTC m=+774.160041848" watchObservedRunningTime="2026-03-20 15:37:54.470174936 +0000 UTC m=+774.163871766" Mar 20 15:37:58 crc kubenswrapper[3552]: I0320 15:37:58.752167 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-7b66ccd595-8l6z8" Mar 20 15:38:01 crc kubenswrapper[3552]: I0320 15:38:01.281752 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:38:01 crc kubenswrapper[3552]: I0320 15:38:01.282132 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:38:01 crc kubenswrapper[3552]: I0320 15:38:01.282181 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:38:01 crc kubenswrapper[3552]: I0320 15:38:01.282216 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:38:01 crc kubenswrapper[3552]: I0320 15:38:01.282243 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:38:12 crc kubenswrapper[3552]: I0320 15:38:12.779165 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:38:12 crc kubenswrapper[3552]: I0320 15:38:12.779830 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.717296 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-594ll"] Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.717418 3552 topology_manager.go:215] "Topology Admit Handler" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" podNamespace="openshift-marketplace" podName="redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.723632 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.738007 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-594ll"] Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.776964 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb"] Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.777108 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" podNamespace="openshift-marketplace" podName="9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.778953 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.781824 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-4w6pc" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.787872 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb"] Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.836678 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-catalog-content\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.836986 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qmlt\" (UniqueName: \"kubernetes.io/projected/66a0ac4f-c041-46f8-8f27-1f0431ee7913-kube-api-access-8qmlt\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.837066 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-utilities\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.938706 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfhxp\" (UniqueName: \"kubernetes.io/projected/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-kube-api-access-vfhxp\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.938763 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-catalog-content\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.938893 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-util\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.939007 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8qmlt\" (UniqueName: \"kubernetes.io/projected/66a0ac4f-c041-46f8-8f27-1f0431ee7913-kube-api-access-8qmlt\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.939060 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-utilities\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.939095 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-bundle\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.939227 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-catalog-content\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.939484 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-utilities\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:14 crc kubenswrapper[3552]: I0320 15:38:14.958079 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qmlt\" (UniqueName: \"kubernetes.io/projected/66a0ac4f-c041-46f8-8f27-1f0431ee7913-kube-api-access-8qmlt\") pod \"redhat-operators-594ll\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.040392 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-bundle\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.040528 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vfhxp\" (UniqueName: \"kubernetes.io/projected/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-kube-api-access-vfhxp\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.040568 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-util\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.041003 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-util\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.041280 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-bundle\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.056752 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfhxp\" (UniqueName: \"kubernetes.io/projected/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-kube-api-access-vfhxp\") pod \"9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.062173 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.098841 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.563938 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-594ll"] Mar 20 15:38:15 crc kubenswrapper[3552]: W0320 15:38:15.567190 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6e8a3cc_14b9_4494_bb05_d4a3431b22fd.slice/crio-9f1b3671d395a44aede33f195c313cf5836fd3e7bae245deaf13a2838b06365f WatchSource:0}: Error finding container 9f1b3671d395a44aede33f195c313cf5836fd3e7bae245deaf13a2838b06365f: Status 404 returned error can't find the container with id 9f1b3671d395a44aede33f195c313cf5836fd3e7bae245deaf13a2838b06365f Mar 20 15:38:15 crc kubenswrapper[3552]: I0320 15:38:15.577889 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb"] Mar 20 15:38:16 crc kubenswrapper[3552]: I0320 15:38:16.543871 3552 generic.go:334] "Generic (PLEG): container finished" podID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerID="efddf269b3a0046afcf77b4a3158b0862f11d196655d57f76b2fb0b3bf4ba8d1" exitCode=0 Mar 20 15:38:16 crc kubenswrapper[3552]: I0320 15:38:16.543993 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerDied","Data":"efddf269b3a0046afcf77b4a3158b0862f11d196655d57f76b2fb0b3bf4ba8d1"} Mar 20 15:38:16 crc kubenswrapper[3552]: I0320 15:38:16.544217 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerStarted","Data":"9f1b3671d395a44aede33f195c313cf5836fd3e7bae245deaf13a2838b06365f"} Mar 20 15:38:16 crc kubenswrapper[3552]: I0320 15:38:16.552067 3552 generic.go:334] "Generic (PLEG): container finished" podID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerID="ae18dd3ea564bb1338ea5dd07a91aba6b63116755a14c128093051e5805bc008" exitCode=0 Mar 20 15:38:16 crc kubenswrapper[3552]: I0320 15:38:16.552121 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerDied","Data":"ae18dd3ea564bb1338ea5dd07a91aba6b63116755a14c128093051e5805bc008"} Mar 20 15:38:16 crc kubenswrapper[3552]: I0320 15:38:16.552143 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerStarted","Data":"f261c162dd38775924b0049bb1df210e857ee73b2122b359245edd8de88bc7c2"} Mar 20 15:38:17 crc kubenswrapper[3552]: I0320 15:38:17.558905 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerStarted","Data":"0f34ef81021c957e240b0602fa4fed280a33ba53b8228bd0c4c31b9c771f6210"} Mar 20 15:38:19 crc kubenswrapper[3552]: I0320 15:38:19.570817 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerStarted","Data":"0233b2df33cd42cfe4911122e1149d3ffe7ec7fade0e35ba49daf20e181be6aa"} Mar 20 15:38:27 crc kubenswrapper[3552]: I0320 15:38:27.610164 3552 generic.go:334] "Generic (PLEG): container finished" podID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerID="0f34ef81021c957e240b0602fa4fed280a33ba53b8228bd0c4c31b9c771f6210" exitCode=0 Mar 20 15:38:27 crc kubenswrapper[3552]: I0320 15:38:27.610240 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerDied","Data":"0f34ef81021c957e240b0602fa4fed280a33ba53b8228bd0c4c31b9c771f6210"} Mar 20 15:38:28 crc kubenswrapper[3552]: I0320 15:38:28.620894 3552 generic.go:334] "Generic (PLEG): container finished" podID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerID="0233b2df33cd42cfe4911122e1149d3ffe7ec7fade0e35ba49daf20e181be6aa" exitCode=0 Mar 20 15:38:28 crc kubenswrapper[3552]: I0320 15:38:28.620952 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerDied","Data":"0233b2df33cd42cfe4911122e1149d3ffe7ec7fade0e35ba49daf20e181be6aa"} Mar 20 15:38:29 crc kubenswrapper[3552]: I0320 15:38:29.633295 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerStarted","Data":"6a71e06c8810f381023f59124cfee6f7f4f1ae8e61af9a1befe041cd2f74c4c7"} Mar 20 15:38:29 crc kubenswrapper[3552]: I0320 15:38:29.635225 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerStarted","Data":"dbcae161809487eed4b0999ae3e5b12d7d9c50759c6f3e3637d9d6f49e945e9a"} Mar 20 15:38:30 crc kubenswrapper[3552]: I0320 15:38:30.660566 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" podStartSLOduration=14.449821081 podStartE2EDuration="16.66048816s" podCreationTimestamp="2026-03-20 15:38:14 +0000 UTC" firstStartedPulling="2026-03-20 15:38:16.545181549 +0000 UTC m=+796.238878379" lastFinishedPulling="2026-03-20 15:38:18.755848628 +0000 UTC m=+798.449545458" observedRunningTime="2026-03-20 15:38:30.654909287 +0000 UTC m=+810.348606117" watchObservedRunningTime="2026-03-20 15:38:30.66048816 +0000 UTC m=+810.354185010" Mar 20 15:38:30 crc kubenswrapper[3552]: I0320 15:38:30.680108 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-594ll" podStartSLOduration=5.251596578 podStartE2EDuration="16.680055623s" podCreationTimestamp="2026-03-20 15:38:14 +0000 UTC" firstStartedPulling="2026-03-20 15:38:16.553858082 +0000 UTC m=+796.247554912" lastFinishedPulling="2026-03-20 15:38:27.982317137 +0000 UTC m=+807.676013957" observedRunningTime="2026-03-20 15:38:30.676532592 +0000 UTC m=+810.370229442" watchObservedRunningTime="2026-03-20 15:38:30.680055623 +0000 UTC m=+810.373752453" Mar 20 15:38:35 crc kubenswrapper[3552]: I0320 15:38:35.063511 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:35 crc kubenswrapper[3552]: I0320 15:38:35.064045 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:36 crc kubenswrapper[3552]: I0320 15:38:36.233889 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-594ll" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="registry-server" probeResult="failure" output=< Mar 20 15:38:36 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:38:36 crc kubenswrapper[3552]: > Mar 20 15:38:36 crc kubenswrapper[3552]: I0320 15:38:36.669553 3552 generic.go:334] "Generic (PLEG): container finished" podID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerID="dbcae161809487eed4b0999ae3e5b12d7d9c50759c6f3e3637d9d6f49e945e9a" exitCode=0 Mar 20 15:38:36 crc kubenswrapper[3552]: I0320 15:38:36.669633 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerDied","Data":"dbcae161809487eed4b0999ae3e5b12d7d9c50759c6f3e3637d9d6f49e945e9a"} Mar 20 15:38:37 crc kubenswrapper[3552]: I0320 15:38:37.925213 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.019030 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-util\") pod \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.019444 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-bundle\") pod \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.019510 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfhxp\" (UniqueName: \"kubernetes.io/projected/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-kube-api-access-vfhxp\") pod \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\" (UID: \"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd\") " Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.019903 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-bundle" (OuterVolumeSpecName: "bundle") pod "a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" (UID: "a6e8a3cc-14b9-4494-bb05-d4a3431b22fd"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.024883 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-kube-api-access-vfhxp" (OuterVolumeSpecName: "kube-api-access-vfhxp") pod "a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" (UID: "a6e8a3cc-14b9-4494-bb05-d4a3431b22fd"). InnerVolumeSpecName "kube-api-access-vfhxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.032036 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-util" (OuterVolumeSpecName: "util") pod "a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" (UID: "a6e8a3cc-14b9-4494-bb05-d4a3431b22fd"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.121020 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-vfhxp\" (UniqueName: \"kubernetes.io/projected/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-kube-api-access-vfhxp\") on node \"crc\" DevicePath \"\"" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.121071 3552 reconciler_common.go:300] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-util\") on node \"crc\" DevicePath \"\"" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.121086 3552 reconciler_common.go:300] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6e8a3cc-14b9-4494-bb05-d4a3431b22fd-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.680394 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" event={"ID":"a6e8a3cc-14b9-4494-bb05-d4a3431b22fd","Type":"ContainerDied","Data":"9f1b3671d395a44aede33f195c313cf5836fd3e7bae245deaf13a2838b06365f"} Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.680484 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f1b3671d395a44aede33f195c313cf5836fd3e7bae245deaf13a2838b06365f" Mar 20 15:38:38 crc kubenswrapper[3552]: I0320 15:38:38.680447 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.677101 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb"] Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.677463 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9ecdf50d-ae92-4621-9c2e-f19b7ed40399" podNamespace="openshift-nmstate" podName="nmstate-operator-5bbb58f86c-vm8rb" Mar 20 15:38:40 crc kubenswrapper[3552]: E0320 15:38:40.677600 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="extract" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.677611 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="extract" Mar 20 15:38:40 crc kubenswrapper[3552]: E0320 15:38:40.677625 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="pull" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.677632 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="pull" Mar 20 15:38:40 crc kubenswrapper[3552]: E0320 15:38:40.677641 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="util" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.677648 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="util" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.677757 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6e8a3cc-14b9-4494-bb05-d4a3431b22fd" containerName="extract" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.678149 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.680904 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.680953 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.680978 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-6d8z7" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.688725 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb"] Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.749776 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdg5k\" (UniqueName: \"kubernetes.io/projected/9ecdf50d-ae92-4621-9c2e-f19b7ed40399-kube-api-access-kdg5k\") pod \"nmstate-operator-5bbb58f86c-vm8rb\" (UID: \"9ecdf50d-ae92-4621-9c2e-f19b7ed40399\") " pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.851071 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kdg5k\" (UniqueName: \"kubernetes.io/projected/9ecdf50d-ae92-4621-9c2e-f19b7ed40399-kube-api-access-kdg5k\") pod \"nmstate-operator-5bbb58f86c-vm8rb\" (UID: \"9ecdf50d-ae92-4621-9c2e-f19b7ed40399\") " pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.870328 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdg5k\" (UniqueName: \"kubernetes.io/projected/9ecdf50d-ae92-4621-9c2e-f19b7ed40399-kube-api-access-kdg5k\") pod \"nmstate-operator-5bbb58f86c-vm8rb\" (UID: \"9ecdf50d-ae92-4621-9c2e-f19b7ed40399\") " pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" Mar 20 15:38:40 crc kubenswrapper[3552]: I0320 15:38:40.993139 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" Mar 20 15:38:41 crc kubenswrapper[3552]: I0320 15:38:41.296093 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb"] Mar 20 15:38:41 crc kubenswrapper[3552]: I0320 15:38:41.703583 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" event={"ID":"9ecdf50d-ae92-4621-9c2e-f19b7ed40399","Type":"ContainerStarted","Data":"2dec36e226958614cf569976bb828b7c2e1328865e59c62e41b6159e066e05a9"} Mar 20 15:38:42 crc kubenswrapper[3552]: I0320 15:38:42.778395 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:38:42 crc kubenswrapper[3552]: I0320 15:38:42.778491 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:38:45 crc kubenswrapper[3552]: I0320 15:38:45.158004 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:45 crc kubenswrapper[3552]: I0320 15:38:45.796677 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:45 crc kubenswrapper[3552]: I0320 15:38:45.884448 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-594ll"] Mar 20 15:38:46 crc kubenswrapper[3552]: I0320 15:38:46.725187 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-594ll" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="registry-server" containerID="cri-o://6a71e06c8810f381023f59124cfee6f7f4f1ae8e61af9a1befe041cd2f74c4c7" gracePeriod=2 Mar 20 15:38:47 crc kubenswrapper[3552]: I0320 15:38:47.734499 3552 generic.go:334] "Generic (PLEG): container finished" podID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerID="6a71e06c8810f381023f59124cfee6f7f4f1ae8e61af9a1befe041cd2f74c4c7" exitCode=0 Mar 20 15:38:47 crc kubenswrapper[3552]: I0320 15:38:47.734557 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerDied","Data":"6a71e06c8810f381023f59124cfee6f7f4f1ae8e61af9a1befe041cd2f74c4c7"} Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.559485 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.647568 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-catalog-content\") pod \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.647948 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-utilities\") pod \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.648004 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qmlt\" (UniqueName: \"kubernetes.io/projected/66a0ac4f-c041-46f8-8f27-1f0431ee7913-kube-api-access-8qmlt\") pod \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\" (UID: \"66a0ac4f-c041-46f8-8f27-1f0431ee7913\") " Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.648976 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-utilities" (OuterVolumeSpecName: "utilities") pod "66a0ac4f-c041-46f8-8f27-1f0431ee7913" (UID: "66a0ac4f-c041-46f8-8f27-1f0431ee7913"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.658607 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a0ac4f-c041-46f8-8f27-1f0431ee7913-kube-api-access-8qmlt" (OuterVolumeSpecName: "kube-api-access-8qmlt") pod "66a0ac4f-c041-46f8-8f27-1f0431ee7913" (UID: "66a0ac4f-c041-46f8-8f27-1f0431ee7913"). InnerVolumeSpecName "kube-api-access-8qmlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.740455 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-594ll" event={"ID":"66a0ac4f-c041-46f8-8f27-1f0431ee7913","Type":"ContainerDied","Data":"f261c162dd38775924b0049bb1df210e857ee73b2122b359245edd8de88bc7c2"} Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.740484 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-594ll" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.741286 3552 scope.go:117] "RemoveContainer" containerID="6a71e06c8810f381023f59124cfee6f7f4f1ae8e61af9a1befe041cd2f74c4c7" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.742128 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" event={"ID":"9ecdf50d-ae92-4621-9c2e-f19b7ed40399","Type":"ContainerStarted","Data":"837b8b5e106e4893dc7a8cac7c11b73a1b45ea79524cad73c85f490e21081108"} Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.755711 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.755755 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-8qmlt\" (UniqueName: \"kubernetes.io/projected/66a0ac4f-c041-46f8-8f27-1f0431ee7913-kube-api-access-8qmlt\") on node \"crc\" DevicePath \"\"" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.779609 3552 scope.go:117] "RemoveContainer" containerID="0f34ef81021c957e240b0602fa4fed280a33ba53b8228bd0c4c31b9c771f6210" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.782314 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5bbb58f86c-vm8rb" podStartSLOduration=1.655045856 podStartE2EDuration="8.782258496s" podCreationTimestamp="2026-03-20 15:38:40 +0000 UTC" firstStartedPulling="2026-03-20 15:38:41.308490379 +0000 UTC m=+821.002187209" lastFinishedPulling="2026-03-20 15:38:48.435703019 +0000 UTC m=+828.129399849" observedRunningTime="2026-03-20 15:38:48.779705128 +0000 UTC m=+828.473401958" watchObservedRunningTime="2026-03-20 15:38:48.782258496 +0000 UTC m=+828.475955326" Mar 20 15:38:48 crc kubenswrapper[3552]: I0320 15:38:48.852015 3552 scope.go:117] "RemoveContainer" containerID="ae18dd3ea564bb1338ea5dd07a91aba6b63116755a14c128093051e5805bc008" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.559552 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-thhvp"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.559854 3552 topology_manager.go:215] "Topology Admit Handler" podUID="97f8069b-8a3d-4ad5-83ba-cfb21db47084" podNamespace="openshift-nmstate" podName="nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.559994 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="registry-server" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.560007 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="registry-server" Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.560025 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="extract-content" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.560032 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="extract-content" Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.560046 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="extract-utilities" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.560052 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="extract-utilities" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.560147 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" containerName="registry-server" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.560533 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.580082 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66a0ac4f-c041-46f8-8f27-1f0431ee7913" (UID: "66a0ac4f-c041-46f8-8f27-1f0431ee7913"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.584938 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.585137 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3bc33104-28eb-4add-b717-d172823f68e5" podNamespace="openshift-nmstate" podName="nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.585858 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.588704 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.606156 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.668553 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2657g\" (UniqueName: \"kubernetes.io/projected/97f8069b-8a3d-4ad5-83ba-cfb21db47084-kube-api-access-2657g\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.668599 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-nmstate-lock\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.668647 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-dbus-socket\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.668681 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-ovs-socket\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.668710 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3bc33104-28eb-4add-b717-d172823f68e5-tls-key-pair\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.668730 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnfzf\" (UniqueName: \"kubernetes.io/projected/3bc33104-28eb-4add-b717-d172823f68e5-kube-api-access-rnfzf\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.669017 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a0ac4f-c041-46f8-8f27-1f0431ee7913-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.675872 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.676016 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5c9f4509-5c04-4b38-a0ac-65d9f19d252c" podNamespace="openshift-nmstate" podName="nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.676730 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.682759 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-cntpc" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.682781 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.683052 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.693114 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.702084 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-594ll"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.704810 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-594ll"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770776 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-dbus-socket\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770840 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-ovs-socket\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770872 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-plugin-serving-cert\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770900 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3bc33104-28eb-4add-b717-d172823f68e5-tls-key-pair\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770921 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rnfzf\" (UniqueName: \"kubernetes.io/projected/3bc33104-28eb-4add-b717-d172823f68e5-kube-api-access-rnfzf\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770948 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-nginx-conf\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.770979 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2657g\" (UniqueName: \"kubernetes.io/projected/97f8069b-8a3d-4ad5-83ba-cfb21db47084-kube-api-access-2657g\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.771001 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-nmstate-lock\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.771030 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmgrs\" (UniqueName: \"kubernetes.io/projected/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-kube-api-access-fmgrs\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.771385 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-dbus-socket\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.771757 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-ovs-socket\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.771840 3552 secret.go:194] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.771888 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3bc33104-28eb-4add-b717-d172823f68e5-tls-key-pair podName:3bc33104-28eb-4add-b717-d172823f68e5 nodeName:}" failed. No retries permitted until 2026-03-20 15:38:50.271870463 +0000 UTC m=+829.965567293 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/3bc33104-28eb-4add-b717-d172823f68e5-tls-key-pair") pod "nmstate-webhook-857c948b4f-n6m8f" (UID: "3bc33104-28eb-4add-b717-d172823f68e5") : secret "openshift-nmstate-webhook" not found Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.772170 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/97f8069b-8a3d-4ad5-83ba-cfb21db47084-nmstate-lock\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.804939 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnfzf\" (UniqueName: \"kubernetes.io/projected/3bc33104-28eb-4add-b717-d172823f68e5-kube-api-access-rnfzf\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.814642 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2657g\" (UniqueName: \"kubernetes.io/projected/97f8069b-8a3d-4ad5-83ba-cfb21db47084-kube-api-access-2657g\") pod \"nmstate-handler-thhvp\" (UID: \"97f8069b-8a3d-4ad5-83ba-cfb21db47084\") " pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.862488 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-console/console-697fc8bbf5-k8ss5"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.862617 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c6751a30-d225-4d39-809b-2282a051dddd" podNamespace="openshift-console" podName="console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.863274 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.871661 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-nginx-conf\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.871712 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fmgrs\" (UniqueName: \"kubernetes.io/projected/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-kube-api-access-fmgrs\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.871778 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-plugin-serving-cert\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.871897 3552 secret.go:194] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Mar 20 15:38:49 crc kubenswrapper[3552]: E0320 15:38:49.871944 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-plugin-serving-cert podName:5c9f4509-5c04-4b38-a0ac-65d9f19d252c nodeName:}" failed. No retries permitted until 2026-03-20 15:38:50.371931193 +0000 UTC m=+830.065628023 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-plugin-serving-cert") pod "nmstate-console-plugin-78d6dd6fc5-w48cn" (UID: "5c9f4509-5c04-4b38-a0ac-65d9f19d252c") : secret "plugin-serving-cert" not found Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.873029 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-nginx-conf\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.874173 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-697fc8bbf5-k8ss5"] Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.892629 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.902600 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmgrs\" (UniqueName: \"kubernetes.io/projected/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-kube-api-access-fmgrs\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:49 crc kubenswrapper[3552]: W0320 15:38:49.921102 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97f8069b_8a3d_4ad5_83ba_cfb21db47084.slice/crio-945350533eb1e604a256f77d0715c926153ecd16cb7000cc04d2dfe08889fcb3 WatchSource:0}: Error finding container 945350533eb1e604a256f77d0715c926153ecd16cb7000cc04d2dfe08889fcb3: Status 404 returned error can't find the container with id 945350533eb1e604a256f77d0715c926153ecd16cb7000cc04d2dfe08889fcb3 Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972654 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-oauth-serving-cert\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972733 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c6751a30-d225-4d39-809b-2282a051dddd-console-serving-cert\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972763 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bpjc\" (UniqueName: \"kubernetes.io/projected/c6751a30-d225-4d39-809b-2282a051dddd-kube-api-access-9bpjc\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972783 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-service-ca\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972805 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-trusted-ca-bundle\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972840 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c6751a30-d225-4d39-809b-2282a051dddd-console-oauth-config\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:49 crc kubenswrapper[3552]: I0320 15:38:49.972943 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-console-config\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074163 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9bpjc\" (UniqueName: \"kubernetes.io/projected/c6751a30-d225-4d39-809b-2282a051dddd-kube-api-access-9bpjc\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074216 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-service-ca\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074239 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-trusted-ca-bundle\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074276 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c6751a30-d225-4d39-809b-2282a051dddd-console-oauth-config\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074301 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-console-config\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074334 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-oauth-serving-cert\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.074680 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c6751a30-d225-4d39-809b-2282a051dddd-console-serving-cert\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.075237 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-oauth-serving-cert\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.075356 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-service-ca\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.075426 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-console-config\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.075573 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c6751a30-d225-4d39-809b-2282a051dddd-trusted-ca-bundle\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.088510 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bpjc\" (UniqueName: \"kubernetes.io/projected/c6751a30-d225-4d39-809b-2282a051dddd-kube-api-access-9bpjc\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.090039 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c6751a30-d225-4d39-809b-2282a051dddd-console-oauth-config\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.090046 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c6751a30-d225-4d39-809b-2282a051dddd-console-serving-cert\") pod \"console-697fc8bbf5-k8ss5\" (UID: \"c6751a30-d225-4d39-809b-2282a051dddd\") " pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.186361 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.277965 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3bc33104-28eb-4add-b717-d172823f68e5-tls-key-pair\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.284145 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3bc33104-28eb-4add-b717-d172823f68e5-tls-key-pair\") pod \"nmstate-webhook-857c948b4f-n6m8f\" (UID: \"3bc33104-28eb-4add-b717-d172823f68e5\") " pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.380598 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-plugin-serving-cert\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.392441 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c9f4509-5c04-4b38-a0ac-65d9f19d252c-plugin-serving-cert\") pod \"nmstate-console-plugin-78d6dd6fc5-w48cn\" (UID: \"5c9f4509-5c04-4b38-a0ac-65d9f19d252c\") " pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.515114 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.529851 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-697fc8bbf5-k8ss5"] Mar 20 15:38:50 crc kubenswrapper[3552]: W0320 15:38:50.533589 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6751a30_d225_4d39_809b_2282a051dddd.slice/crio-1affc5640286034a0e03899f3a722053331f5c9db809b6b62d10fd1d1c0fd6e3 WatchSource:0}: Error finding container 1affc5640286034a0e03899f3a722053331f5c9db809b6b62d10fd1d1c0fd6e3: Status 404 returned error can't find the container with id 1affc5640286034a0e03899f3a722053331f5c9db809b6b62d10fd1d1c0fd6e3 Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.596977 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.732785 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f"] Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.753251 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-697fc8bbf5-k8ss5" event={"ID":"c6751a30-d225-4d39-809b-2282a051dddd","Type":"ContainerStarted","Data":"1affc5640286034a0e03899f3a722053331f5c9db809b6b62d10fd1d1c0fd6e3"} Mar 20 15:38:50 crc kubenswrapper[3552]: I0320 15:38:50.754144 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-thhvp" event={"ID":"97f8069b-8a3d-4ad5-83ba-cfb21db47084","Type":"ContainerStarted","Data":"945350533eb1e604a256f77d0715c926153ecd16cb7000cc04d2dfe08889fcb3"} Mar 20 15:38:51 crc kubenswrapper[3552]: I0320 15:38:51.105865 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn"] Mar 20 15:38:51 crc kubenswrapper[3552]: I0320 15:38:51.438173 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a0ac4f-c041-46f8-8f27-1f0431ee7913" path="/var/lib/kubelet/pods/66a0ac4f-c041-46f8-8f27-1f0431ee7913/volumes" Mar 20 15:38:51 crc kubenswrapper[3552]: I0320 15:38:51.765067 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" event={"ID":"3bc33104-28eb-4add-b717-d172823f68e5","Type":"ContainerStarted","Data":"e8df69544404465ddf9ead95f2d0bc78fa0ceb906744fdc4ecc0cf40b23e9d49"} Mar 20 15:38:51 crc kubenswrapper[3552]: I0320 15:38:51.768696 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" event={"ID":"5c9f4509-5c04-4b38-a0ac-65d9f19d252c","Type":"ContainerStarted","Data":"902325a610c0e9dc834d0d72dfc689176d94ee76a441cc080e3e0b98980fa59f"} Mar 20 15:38:51 crc kubenswrapper[3552]: I0320 15:38:51.770303 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-697fc8bbf5-k8ss5" event={"ID":"c6751a30-d225-4d39-809b-2282a051dddd","Type":"ContainerStarted","Data":"e434c0f05ab3b3e70ffa84cdf930f382d269a0d6f7081b5ed43c043861f08c74"} Mar 20 15:38:51 crc kubenswrapper[3552]: I0320 15:38:51.785956 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-console/console-697fc8bbf5-k8ss5" podStartSLOduration=2.785908362 podStartE2EDuration="2.785908362s" podCreationTimestamp="2026-03-20 15:38:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:38:51.784525445 +0000 UTC m=+831.478222285" watchObservedRunningTime="2026-03-20 15:38:51.785908362 +0000 UTC m=+831.479605192" Mar 20 15:38:54 crc kubenswrapper[3552]: I0320 15:38:54.790570 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" event={"ID":"3bc33104-28eb-4add-b717-d172823f68e5","Type":"ContainerStarted","Data":"146e3032c6787d88c87b4fb9c6a4cce8965dab91636c56864b7e9211371e2e46"} Mar 20 15:38:54 crc kubenswrapper[3552]: I0320 15:38:54.791184 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:38:54 crc kubenswrapper[3552]: I0320 15:38:54.831563 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" podStartSLOduration=2.164833054 podStartE2EDuration="5.831497315s" podCreationTimestamp="2026-03-20 15:38:49 +0000 UTC" firstStartedPulling="2026-03-20 15:38:50.803591801 +0000 UTC m=+830.497288641" lastFinishedPulling="2026-03-20 15:38:54.470256072 +0000 UTC m=+834.163952902" observedRunningTime="2026-03-20 15:38:54.822561314 +0000 UTC m=+834.516258154" watchObservedRunningTime="2026-03-20 15:38:54.831497315 +0000 UTC m=+834.525194155" Mar 20 15:38:55 crc kubenswrapper[3552]: I0320 15:38:55.802718 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-thhvp" event={"ID":"97f8069b-8a3d-4ad5-83ba-cfb21db47084","Type":"ContainerStarted","Data":"8ac475665e9565516e864b19cfec24863d0f9cdb6e522d1b44b3cbbbd13a8dbd"} Mar 20 15:38:55 crc kubenswrapper[3552]: I0320 15:38:55.803207 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:38:55 crc kubenswrapper[3552]: I0320 15:38:55.832836 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-thhvp" podStartSLOduration=2.307844008 podStartE2EDuration="6.832779495s" podCreationTimestamp="2026-03-20 15:38:49 +0000 UTC" firstStartedPulling="2026-03-20 15:38:49.927328372 +0000 UTC m=+829.621025202" lastFinishedPulling="2026-03-20 15:38:54.452263859 +0000 UTC m=+834.145960689" observedRunningTime="2026-03-20 15:38:55.816856827 +0000 UTC m=+835.510553667" watchObservedRunningTime="2026-03-20 15:38:55.832779495 +0000 UTC m=+835.526476325" Mar 20 15:38:56 crc kubenswrapper[3552]: I0320 15:38:56.809456 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" event={"ID":"5c9f4509-5c04-4b38-a0ac-65d9f19d252c","Type":"ContainerStarted","Data":"7393a3021cbea02a36e7c0ad45a705204ad858e03833a92bb87aac77daf094c0"} Mar 20 15:38:56 crc kubenswrapper[3552]: I0320 15:38:56.829011 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-78d6dd6fc5-w48cn" podStartSLOduration=2.970442784 podStartE2EDuration="7.828966898s" podCreationTimestamp="2026-03-20 15:38:49 +0000 UTC" firstStartedPulling="2026-03-20 15:38:51.124341055 +0000 UTC m=+830.818037885" lastFinishedPulling="2026-03-20 15:38:55.982865169 +0000 UTC m=+835.676561999" observedRunningTime="2026-03-20 15:38:56.827809377 +0000 UTC m=+836.521506217" watchObservedRunningTime="2026-03-20 15:38:56.828966898 +0000 UTC m=+836.522663728" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.867573 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gxjql"] Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.867694 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" podNamespace="openshift-marketplace" podName="community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.868905 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.882840 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gxjql"] Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.884820 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5vvk\" (UniqueName: \"kubernetes.io/projected/5588d12e-3055-40fb-a72e-cc5e745d0eba-kube-api-access-s5vvk\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.885019 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-catalog-content\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.885142 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-utilities\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.996373 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-s5vvk\" (UniqueName: \"kubernetes.io/projected/5588d12e-3055-40fb-a72e-cc5e745d0eba-kube-api-access-s5vvk\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.996689 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-catalog-content\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.996723 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-utilities\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.997112 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-utilities\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:57 crc kubenswrapper[3552]: I0320 15:38:57.997862 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-catalog-content\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:58 crc kubenswrapper[3552]: I0320 15:38:58.016850 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5vvk\" (UniqueName: \"kubernetes.io/projected/5588d12e-3055-40fb-a72e-cc5e745d0eba-kube-api-access-s5vvk\") pod \"community-operators-gxjql\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:58 crc kubenswrapper[3552]: I0320 15:38:58.194429 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:38:58 crc kubenswrapper[3552]: I0320 15:38:58.406278 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gxjql"] Mar 20 15:38:58 crc kubenswrapper[3552]: W0320 15:38:58.417568 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5588d12e_3055_40fb_a72e_cc5e745d0eba.slice/crio-74d6b125ce4955d76a784561cad2ffeab40d5094d86c4d495e8149344213c20e WatchSource:0}: Error finding container 74d6b125ce4955d76a784561cad2ffeab40d5094d86c4d495e8149344213c20e: Status 404 returned error can't find the container with id 74d6b125ce4955d76a784561cad2ffeab40d5094d86c4d495e8149344213c20e Mar 20 15:38:58 crc kubenswrapper[3552]: I0320 15:38:58.821826 3552 generic.go:334] "Generic (PLEG): container finished" podID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerID="5dfa831bc28b6274e4f6acdb4057d1e5f9abae229c20676ed4bec35991657561" exitCode=0 Mar 20 15:38:58 crc kubenswrapper[3552]: I0320 15:38:58.821882 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerDied","Data":"5dfa831bc28b6274e4f6acdb4057d1e5f9abae229c20676ed4bec35991657561"} Mar 20 15:38:58 crc kubenswrapper[3552]: I0320 15:38:58.821914 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerStarted","Data":"74d6b125ce4955d76a784561cad2ffeab40d5094d86c4d495e8149344213c20e"} Mar 20 15:38:59 crc kubenswrapper[3552]: I0320 15:38:59.848977 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerStarted","Data":"90a1c2091b9d6480fe5c2b5c23a499e561b9286029c41cc72d7ecf8fe3dbebfd"} Mar 20 15:39:00 crc kubenswrapper[3552]: I0320 15:39:00.187194 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:39:00 crc kubenswrapper[3552]: I0320 15:39:00.187261 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:39:00 crc kubenswrapper[3552]: I0320 15:39:00.192238 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:39:00 crc kubenswrapper[3552]: I0320 15:39:00.858290 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-697fc8bbf5-k8ss5" Mar 20 15:39:00 crc kubenswrapper[3552]: I0320 15:39:00.907847 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-console/console-8568c59db8-fspjn"] Mar 20 15:39:01 crc kubenswrapper[3552]: I0320 15:39:01.283411 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:39:01 crc kubenswrapper[3552]: I0320 15:39:01.283528 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:39:01 crc kubenswrapper[3552]: I0320 15:39:01.283559 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:39:01 crc kubenswrapper[3552]: I0320 15:39:01.283587 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:39:01 crc kubenswrapper[3552]: I0320 15:39:01.283601 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:39:02 crc kubenswrapper[3552]: I0320 15:39:02.985167 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ntglf"] Mar 20 15:39:02 crc kubenswrapper[3552]: I0320 15:39:02.985316 3552 topology_manager.go:215] "Topology Admit Handler" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" podNamespace="openshift-marketplace" podName="certified-operators-ntglf" Mar 20 15:39:02 crc kubenswrapper[3552]: I0320 15:39:02.988036 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.006244 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ntglf"] Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.057106 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-catalog-content\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.057187 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-utilities\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.057266 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khzmf\" (UniqueName: \"kubernetes.io/projected/45e4df82-a0e9-47c0-a07b-c9b212d14647-kube-api-access-khzmf\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.158208 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-utilities\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.158595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-khzmf\" (UniqueName: \"kubernetes.io/projected/45e4df82-a0e9-47c0-a07b-c9b212d14647-kube-api-access-khzmf\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.158634 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-catalog-content\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.158710 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-utilities\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.158943 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-catalog-content\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.193875 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-khzmf\" (UniqueName: \"kubernetes.io/projected/45e4df82-a0e9-47c0-a07b-c9b212d14647-kube-api-access-khzmf\") pod \"certified-operators-ntglf\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.304119 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.520327 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ntglf"] Mar 20 15:39:03 crc kubenswrapper[3552]: I0320 15:39:03.868603 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerStarted","Data":"ed5c96a63b13ebb6309a0fb2ca13f54acb4b99ea2a4d752242869ada5879d583"} Mar 20 15:39:04 crc kubenswrapper[3552]: I0320 15:39:04.876097 3552 generic.go:334] "Generic (PLEG): container finished" podID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerID="91df37c5bf8b5131ad3c8b35f4a24f6031dc485619e046548bfbb579b879c9a2" exitCode=0 Mar 20 15:39:04 crc kubenswrapper[3552]: I0320 15:39:04.876206 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerDied","Data":"91df37c5bf8b5131ad3c8b35f4a24f6031dc485619e046548bfbb579b879c9a2"} Mar 20 15:39:04 crc kubenswrapper[3552]: I0320 15:39:04.879130 3552 generic.go:334] "Generic (PLEG): container finished" podID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerID="90a1c2091b9d6480fe5c2b5c23a499e561b9286029c41cc72d7ecf8fe3dbebfd" exitCode=0 Mar 20 15:39:04 crc kubenswrapper[3552]: I0320 15:39:04.879177 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerDied","Data":"90a1c2091b9d6480fe5c2b5c23a499e561b9286029c41cc72d7ecf8fe3dbebfd"} Mar 20 15:39:05 crc kubenswrapper[3552]: I0320 15:39:05.098441 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-thhvp" Mar 20 15:39:06 crc kubenswrapper[3552]: I0320 15:39:06.894467 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerStarted","Data":"616e7c32f7b36672f772a98a9869d960d3f500d4c6bfad5f5b155eb7dd4a1977"} Mar 20 15:39:06 crc kubenswrapper[3552]: I0320 15:39:06.897463 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerStarted","Data":"1402d221ed3f76cceb8e36621cde502ad05a627a40d49b55cdcc639918bf3b50"} Mar 20 15:39:07 crc kubenswrapper[3552]: I0320 15:39:07.904111 3552 generic.go:334] "Generic (PLEG): container finished" podID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerID="616e7c32f7b36672f772a98a9869d960d3f500d4c6bfad5f5b155eb7dd4a1977" exitCode=0 Mar 20 15:39:07 crc kubenswrapper[3552]: I0320 15:39:07.904191 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerDied","Data":"616e7c32f7b36672f772a98a9869d960d3f500d4c6bfad5f5b155eb7dd4a1977"} Mar 20 15:39:07 crc kubenswrapper[3552]: I0320 15:39:07.969507 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gxjql" podStartSLOduration=4.527010595 podStartE2EDuration="10.969437123s" podCreationTimestamp="2026-03-20 15:38:57 +0000 UTC" firstStartedPulling="2026-03-20 15:38:58.823286727 +0000 UTC m=+838.516983557" lastFinishedPulling="2026-03-20 15:39:05.265713255 +0000 UTC m=+844.959410085" observedRunningTime="2026-03-20 15:39:07.963806902 +0000 UTC m=+847.657503742" watchObservedRunningTime="2026-03-20 15:39:07.969437123 +0000 UTC m=+847.663133963" Mar 20 15:39:08 crc kubenswrapper[3552]: I0320 15:39:08.194916 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:39:08 crc kubenswrapper[3552]: I0320 15:39:08.194955 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:39:09 crc kubenswrapper[3552]: I0320 15:39:09.297702 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-gxjql" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="registry-server" probeResult="failure" output=< Mar 20 15:39:09 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:39:09 crc kubenswrapper[3552]: > Mar 20 15:39:09 crc kubenswrapper[3552]: I0320 15:39:09.922339 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerStarted","Data":"c738fec59b62b194789249aa0a2f946605bb58bf3e16ae7bff697ce8e326abf8"} Mar 20 15:39:09 crc kubenswrapper[3552]: I0320 15:39:09.950105 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ntglf" podStartSLOduration=4.59776863 podStartE2EDuration="7.950035361s" podCreationTimestamp="2026-03-20 15:39:02 +0000 UTC" firstStartedPulling="2026-03-20 15:39:04.877492303 +0000 UTC m=+844.571189143" lastFinishedPulling="2026-03-20 15:39:08.229759044 +0000 UTC m=+847.923455874" observedRunningTime="2026-03-20 15:39:09.94588209 +0000 UTC m=+849.639578920" watchObservedRunningTime="2026-03-20 15:39:09.950035361 +0000 UTC m=+849.643732191" Mar 20 15:39:10 crc kubenswrapper[3552]: I0320 15:39:10.523254 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-857c948b4f-n6m8f" Mar 20 15:39:12 crc kubenswrapper[3552]: I0320 15:39:12.778639 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:39:12 crc kubenswrapper[3552]: I0320 15:39:12.779532 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:39:12 crc kubenswrapper[3552]: I0320 15:39:12.779654 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:39:12 crc kubenswrapper[3552]: I0320 15:39:12.780431 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"786e47a4d6307352496452c605f1f5ce00e08ea17c3f18a4216d752aee09c1e8"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:39:12 crc kubenswrapper[3552]: I0320 15:39:12.780672 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://786e47a4d6307352496452c605f1f5ce00e08ea17c3f18a4216d752aee09c1e8" gracePeriod=600 Mar 20 15:39:13 crc kubenswrapper[3552]: I0320 15:39:13.304717 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:13 crc kubenswrapper[3552]: I0320 15:39:13.305668 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:13 crc kubenswrapper[3552]: I0320 15:39:13.390122 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:15 crc kubenswrapper[3552]: I0320 15:39:15.060628 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:15 crc kubenswrapper[3552]: I0320 15:39:15.115430 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ntglf"] Mar 20 15:39:18 crc kubenswrapper[3552]: I0320 15:39:18.292829 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:39:18 crc kubenswrapper[3552]: I0320 15:39:18.399837 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:39:18 crc kubenswrapper[3552]: I0320 15:39:18.453454 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gxjql"] Mar 20 15:39:19 crc kubenswrapper[3552]: I0320 15:39:19.700969 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="786e47a4d6307352496452c605f1f5ce00e08ea17c3f18a4216d752aee09c1e8" exitCode=0 Mar 20 15:39:19 crc kubenswrapper[3552]: I0320 15:39:19.701047 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"786e47a4d6307352496452c605f1f5ce00e08ea17c3f18a4216d752aee09c1e8"} Mar 20 15:39:19 crc kubenswrapper[3552]: I0320 15:39:19.701558 3552 scope.go:117] "RemoveContainer" containerID="a6c34bfc12c2223d8144efc535af88e195af690ac65b1064e138b70ee20351af" Mar 20 15:39:19 crc kubenswrapper[3552]: I0320 15:39:19.701686 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ntglf" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="registry-server" containerID="cri-o://c738fec59b62b194789249aa0a2f946605bb58bf3e16ae7bff697ce8e326abf8" gracePeriod=2 Mar 20 15:39:20 crc kubenswrapper[3552]: I0320 15:39:20.708202 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"902094328c1e9daadeb5f64e0a47c03d126a08ca4a0366a4ea4ca5f17a2975d1"} Mar 20 15:39:21 crc kubenswrapper[3552]: I0320 15:39:21.715321 3552 generic.go:334] "Generic (PLEG): container finished" podID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerID="c738fec59b62b194789249aa0a2f946605bb58bf3e16ae7bff697ce8e326abf8" exitCode=0 Mar 20 15:39:21 crc kubenswrapper[3552]: I0320 15:39:21.715374 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerDied","Data":"c738fec59b62b194789249aa0a2f946605bb58bf3e16ae7bff697ce8e326abf8"} Mar 20 15:39:21 crc kubenswrapper[3552]: I0320 15:39:21.715959 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gxjql" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="registry-server" containerID="cri-o://1402d221ed3f76cceb8e36621cde502ad05a627a40d49b55cdcc639918bf3b50" gracePeriod=2 Mar 20 15:39:22 crc kubenswrapper[3552]: I0320 15:39:22.723357 3552 generic.go:334] "Generic (PLEG): container finished" podID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerID="1402d221ed3f76cceb8e36621cde502ad05a627a40d49b55cdcc639918bf3b50" exitCode=0 Mar 20 15:39:22 crc kubenswrapper[3552]: I0320 15:39:22.723444 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerDied","Data":"1402d221ed3f76cceb8e36621cde502ad05a627a40d49b55cdcc639918bf3b50"} Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.047525 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.227470 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-utilities\") pod \"45e4df82-a0e9-47c0-a07b-c9b212d14647\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.227656 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-catalog-content\") pod \"45e4df82-a0e9-47c0-a07b-c9b212d14647\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.227696 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khzmf\" (UniqueName: \"kubernetes.io/projected/45e4df82-a0e9-47c0-a07b-c9b212d14647-kube-api-access-khzmf\") pod \"45e4df82-a0e9-47c0-a07b-c9b212d14647\" (UID: \"45e4df82-a0e9-47c0-a07b-c9b212d14647\") " Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.229154 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-utilities" (OuterVolumeSpecName: "utilities") pod "45e4df82-a0e9-47c0-a07b-c9b212d14647" (UID: "45e4df82-a0e9-47c0-a07b-c9b212d14647"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.240566 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45e4df82-a0e9-47c0-a07b-c9b212d14647-kube-api-access-khzmf" (OuterVolumeSpecName: "kube-api-access-khzmf") pod "45e4df82-a0e9-47c0-a07b-c9b212d14647" (UID: "45e4df82-a0e9-47c0-a07b-c9b212d14647"). InnerVolumeSpecName "kube-api-access-khzmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.329648 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-khzmf\" (UniqueName: \"kubernetes.io/projected/45e4df82-a0e9-47c0-a07b-c9b212d14647-kube-api-access-khzmf\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.329985 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.616049 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.734095 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5vvk\" (UniqueName: \"kubernetes.io/projected/5588d12e-3055-40fb-a72e-cc5e745d0eba-kube-api-access-s5vvk\") pod \"5588d12e-3055-40fb-a72e-cc5e745d0eba\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.734159 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-utilities\") pod \"5588d12e-3055-40fb-a72e-cc5e745d0eba\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.734231 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-catalog-content\") pod \"5588d12e-3055-40fb-a72e-cc5e745d0eba\" (UID: \"5588d12e-3055-40fb-a72e-cc5e745d0eba\") " Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.735451 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-utilities" (OuterVolumeSpecName: "utilities") pod "5588d12e-3055-40fb-a72e-cc5e745d0eba" (UID: "5588d12e-3055-40fb-a72e-cc5e745d0eba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.745253 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5588d12e-3055-40fb-a72e-cc5e745d0eba-kube-api-access-s5vvk" (OuterVolumeSpecName: "kube-api-access-s5vvk") pod "5588d12e-3055-40fb-a72e-cc5e745d0eba" (UID: "5588d12e-3055-40fb-a72e-cc5e745d0eba"). InnerVolumeSpecName "kube-api-access-s5vvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.763921 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxjql" event={"ID":"5588d12e-3055-40fb-a72e-cc5e745d0eba","Type":"ContainerDied","Data":"74d6b125ce4955d76a784561cad2ffeab40d5094d86c4d495e8149344213c20e"} Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.763951 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxjql" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.763980 3552 scope.go:117] "RemoveContainer" containerID="1402d221ed3f76cceb8e36621cde502ad05a627a40d49b55cdcc639918bf3b50" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.766941 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntglf" event={"ID":"45e4df82-a0e9-47c0-a07b-c9b212d14647","Type":"ContainerDied","Data":"ed5c96a63b13ebb6309a0fb2ca13f54acb4b99ea2a4d752242869ada5879d583"} Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.767049 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntglf" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.830458 3552 scope.go:117] "RemoveContainer" containerID="90a1c2091b9d6480fe5c2b5c23a499e561b9286029c41cc72d7ecf8fe3dbebfd" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.835685 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.835730 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-s5vvk\" (UniqueName: \"kubernetes.io/projected/5588d12e-3055-40fb-a72e-cc5e745d0eba-kube-api-access-s5vvk\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.914308 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "45e4df82-a0e9-47c0-a07b-c9b212d14647" (UID: "45e4df82-a0e9-47c0-a07b-c9b212d14647"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.919053 3552 scope.go:117] "RemoveContainer" containerID="5dfa831bc28b6274e4f6acdb4057d1e5f9abae229c20676ed4bec35991657561" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.938578 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e4df82-a0e9-47c0-a07b-c9b212d14647-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:23 crc kubenswrapper[3552]: I0320 15:39:23.990733 3552 scope.go:117] "RemoveContainer" containerID="c738fec59b62b194789249aa0a2f946605bb58bf3e16ae7bff697ce8e326abf8" Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.046599 3552 scope.go:117] "RemoveContainer" containerID="616e7c32f7b36672f772a98a9869d960d3f500d4c6bfad5f5b155eb7dd4a1977" Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.114523 3552 scope.go:117] "RemoveContainer" containerID="91df37c5bf8b5131ad3c8b35f4a24f6031dc485619e046548bfbb579b879c9a2" Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.137545 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ntglf"] Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.146731 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ntglf"] Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.465895 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5588d12e-3055-40fb-a72e-cc5e745d0eba" (UID: "5588d12e-3055-40fb-a72e-cc5e745d0eba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.554568 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5588d12e-3055-40fb-a72e-cc5e745d0eba-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.701581 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gxjql"] Mar 20 15:39:24 crc kubenswrapper[3552]: I0320 15:39:24.706367 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gxjql"] Mar 20 15:39:25 crc kubenswrapper[3552]: I0320 15:39:25.436393 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" path="/var/lib/kubelet/pods/45e4df82-a0e9-47c0-a07b-c9b212d14647/volumes" Mar 20 15:39:25 crc kubenswrapper[3552]: I0320 15:39:25.437619 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" path="/var/lib/kubelet/pods/5588d12e-3055-40fb-a72e-cc5e745d0eba/volumes" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.026596 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-console/console-8568c59db8-fspjn" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" containerID="cri-o://c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4" gracePeriod=15 Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.449667 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-8568c59db8-fspjn_db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4/console/1.log" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.450389 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580049 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580149 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580204 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580259 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580391 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580438 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.580477 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") pod \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\" (UID: \"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4\") " Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.582478 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.582961 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca" (OuterVolumeSpecName: "service-ca") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.584811 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config" (OuterVolumeSpecName: "console-config") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.585010 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.590461 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.594756 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt" (OuterVolumeSpecName: "kube-api-access-nkbdt") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "kube-api-access-nkbdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.594800 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" (UID: "db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682009 3552 reconciler_common.go:300] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682044 3552 reconciler_common.go:300] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-oauth-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682055 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nkbdt\" (UniqueName: \"kubernetes.io/projected/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-kube-api-access-nkbdt\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682067 3552 reconciler_common.go:300] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682077 3552 reconciler_common.go:300] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-console-serving-cert\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682089 3552 reconciler_common.go:300] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-service-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.682100 3552 reconciler_common.go:300] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.792393 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-console_console-8568c59db8-fspjn_db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4/console/1.log" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.792471 3552 generic.go:334] "Generic (PLEG): container finished" podID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerID="c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4" exitCode=2 Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.792499 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8568c59db8-fspjn" event={"ID":"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4","Type":"ContainerDied","Data":"c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4"} Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.792525 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8568c59db8-fspjn" event={"ID":"db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4","Type":"ContainerDied","Data":"14cc749e239ff43ea4823b0c7de8c695afe34d2ed2edfb0f26666f2e5acbcaeb"} Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.792544 3552 scope.go:117] "RemoveContainer" containerID="c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.792635 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8568c59db8-fspjn" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.837658 3552 scope.go:117] "RemoveContainer" containerID="c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.837732 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-console/console-8568c59db8-fspjn"] Mar 20 15:39:26 crc kubenswrapper[3552]: E0320 15:39:26.838184 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4\": container with ID starting with c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4 not found: ID does not exist" containerID="c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.838229 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4"} err="failed to get container status \"c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4\": rpc error: code = NotFound desc = could not find container \"c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4\": container with ID starting with c7bbe65719b42fa3ccbc9c848c3a4b215a0039915212f709503132bfd40d1ad4 not found: ID does not exist" Mar 20 15:39:26 crc kubenswrapper[3552]: I0320 15:39:26.839475 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-8568c59db8-fspjn"] Mar 20 15:39:27 crc kubenswrapper[3552]: I0320 15:39:27.438535 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" path="/var/lib/kubelet/pods/db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4/volumes" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.915754 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7tzwm"] Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.917416 3552 topology_manager.go:215] "Topology Admit Handler" podUID="92de20d5-0e92-4206-9b86-72495402e055" podNamespace="openshift-marketplace" podName="redhat-marketplace-7tzwm" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.917701 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.917799 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.917889 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="registry-server" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.917972 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="registry-server" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.918060 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="extract-content" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.918141 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="extract-content" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.918233 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="registry-server" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.918311 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="registry-server" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.918421 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="extract-utilities" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.918508 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="extract-utilities" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.918609 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="extract-utilities" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.918690 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="extract-utilities" Mar 20 15:39:33 crc kubenswrapper[3552]: E0320 15:39:33.918771 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="extract-content" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.918844 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="extract-content" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.919061 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e4df82-a0e9-47c0-a07b-c9b212d14647" containerName="registry-server" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.919148 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="5588d12e-3055-40fb-a72e-cc5e745d0eba" containerName="registry-server" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.919228 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="db223d47-9acc-4b0d-b8e8-39d4d9f3e4c4" containerName="console" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.920258 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:33 crc kubenswrapper[3552]: I0320 15:39:33.923612 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7tzwm"] Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.074515 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-utilities\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.074584 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wngcm\" (UniqueName: \"kubernetes.io/projected/92de20d5-0e92-4206-9b86-72495402e055-kube-api-access-wngcm\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.074632 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-catalog-content\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.175877 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-utilities\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.175939 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wngcm\" (UniqueName: \"kubernetes.io/projected/92de20d5-0e92-4206-9b86-72495402e055-kube-api-access-wngcm\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.175972 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-catalog-content\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.176370 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-catalog-content\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.176425 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-utilities\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.196808 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wngcm\" (UniqueName: \"kubernetes.io/projected/92de20d5-0e92-4206-9b86-72495402e055-kube-api-access-wngcm\") pod \"redhat-marketplace-7tzwm\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.235704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.487534 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7tzwm"] Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.851843 3552 generic.go:334] "Generic (PLEG): container finished" podID="92de20d5-0e92-4206-9b86-72495402e055" containerID="ee334712262343c28dafdea1b1e1987b42d72dd5bde4077211774d022a787c42" exitCode=0 Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.851892 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerDied","Data":"ee334712262343c28dafdea1b1e1987b42d72dd5bde4077211774d022a787c42"} Mar 20 15:39:34 crc kubenswrapper[3552]: I0320 15:39:34.851918 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerStarted","Data":"dbb41a563ce948257aa978162d98517a8df6f488608b3fc78975a0ec8ff83ca5"} Mar 20 15:39:35 crc kubenswrapper[3552]: I0320 15:39:35.857958 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 15:39:36 crc kubenswrapper[3552]: I0320 15:39:36.861998 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerStarted","Data":"759fee85a4b8b812629438ff8f829621ebe1dbaa051a0c4d7d9cab28214511ea"} Mar 20 15:39:37 crc kubenswrapper[3552]: I0320 15:39:37.869087 3552 generic.go:334] "Generic (PLEG): container finished" podID="92de20d5-0e92-4206-9b86-72495402e055" containerID="759fee85a4b8b812629438ff8f829621ebe1dbaa051a0c4d7d9cab28214511ea" exitCode=0 Mar 20 15:39:37 crc kubenswrapper[3552]: I0320 15:39:37.869133 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerDied","Data":"759fee85a4b8b812629438ff8f829621ebe1dbaa051a0c4d7d9cab28214511ea"} Mar 20 15:39:38 crc kubenswrapper[3552]: I0320 15:39:38.876867 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerStarted","Data":"69722e40db83cef69ed513d1feb3209d732644939c7f5ff18941b71ef8d52ae7"} Mar 20 15:39:38 crc kubenswrapper[3552]: I0320 15:39:38.895196 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7tzwm" podStartSLOduration=3.616833669 podStartE2EDuration="5.89514746s" podCreationTimestamp="2026-03-20 15:39:33 +0000 UTC" firstStartedPulling="2026-03-20 15:39:35.857733918 +0000 UTC m=+875.551430748" lastFinishedPulling="2026-03-20 15:39:38.136047709 +0000 UTC m=+877.829744539" observedRunningTime="2026-03-20 15:39:38.893730642 +0000 UTC m=+878.587427472" watchObservedRunningTime="2026-03-20 15:39:38.89514746 +0000 UTC m=+878.588844280" Mar 20 15:39:44 crc kubenswrapper[3552]: I0320 15:39:44.236360 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:44 crc kubenswrapper[3552]: I0320 15:39:44.237071 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:44 crc kubenswrapper[3552]: I0320 15:39:44.332938 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:44 crc kubenswrapper[3552]: I0320 15:39:44.985724 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:45 crc kubenswrapper[3552]: I0320 15:39:45.024825 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7tzwm"] Mar 20 15:39:46 crc kubenswrapper[3552]: I0320 15:39:46.919511 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7tzwm" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="registry-server" containerID="cri-o://69722e40db83cef69ed513d1feb3209d732644939c7f5ff18941b71ef8d52ae7" gracePeriod=2 Mar 20 15:39:47 crc kubenswrapper[3552]: I0320 15:39:47.925272 3552 generic.go:334] "Generic (PLEG): container finished" podID="92de20d5-0e92-4206-9b86-72495402e055" containerID="69722e40db83cef69ed513d1feb3209d732644939c7f5ff18941b71ef8d52ae7" exitCode=0 Mar 20 15:39:47 crc kubenswrapper[3552]: I0320 15:39:47.925359 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerDied","Data":"69722e40db83cef69ed513d1feb3209d732644939c7f5ff18941b71ef8d52ae7"} Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.355929 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.517898 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-utilities\") pod \"92de20d5-0e92-4206-9b86-72495402e055\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.517981 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wngcm\" (UniqueName: \"kubernetes.io/projected/92de20d5-0e92-4206-9b86-72495402e055-kube-api-access-wngcm\") pod \"92de20d5-0e92-4206-9b86-72495402e055\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.518561 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-catalog-content\") pod \"92de20d5-0e92-4206-9b86-72495402e055\" (UID: \"92de20d5-0e92-4206-9b86-72495402e055\") " Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.522631 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-utilities" (OuterVolumeSpecName: "utilities") pod "92de20d5-0e92-4206-9b86-72495402e055" (UID: "92de20d5-0e92-4206-9b86-72495402e055"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.533784 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92de20d5-0e92-4206-9b86-72495402e055-kube-api-access-wngcm" (OuterVolumeSpecName: "kube-api-access-wngcm") pod "92de20d5-0e92-4206-9b86-72495402e055" (UID: "92de20d5-0e92-4206-9b86-72495402e055"). InnerVolumeSpecName "kube-api-access-wngcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.619973 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.620024 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wngcm\" (UniqueName: \"kubernetes.io/projected/92de20d5-0e92-4206-9b86-72495402e055-kube-api-access-wngcm\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.657913 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92de20d5-0e92-4206-9b86-72495402e055" (UID: "92de20d5-0e92-4206-9b86-72495402e055"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.721484 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92de20d5-0e92-4206-9b86-72495402e055-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.935357 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7tzwm" event={"ID":"92de20d5-0e92-4206-9b86-72495402e055","Type":"ContainerDied","Data":"dbb41a563ce948257aa978162d98517a8df6f488608b3fc78975a0ec8ff83ca5"} Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.935430 3552 scope.go:117] "RemoveContainer" containerID="69722e40db83cef69ed513d1feb3209d732644939c7f5ff18941b71ef8d52ae7" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.936584 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7tzwm" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.970786 3552 scope.go:117] "RemoveContainer" containerID="759fee85a4b8b812629438ff8f829621ebe1dbaa051a0c4d7d9cab28214511ea" Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.972303 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7tzwm"] Mar 20 15:39:48 crc kubenswrapper[3552]: I0320 15:39:48.977031 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7tzwm"] Mar 20 15:39:49 crc kubenswrapper[3552]: I0320 15:39:49.000769 3552 scope.go:117] "RemoveContainer" containerID="ee334712262343c28dafdea1b1e1987b42d72dd5bde4077211774d022a787c42" Mar 20 15:39:49 crc kubenswrapper[3552]: I0320 15:39:49.439020 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92de20d5-0e92-4206-9b86-72495402e055" path="/var/lib/kubelet/pods/92de20d5-0e92-4206-9b86-72495402e055/volumes" Mar 20 15:40:01 crc kubenswrapper[3552]: I0320 15:40:01.284595 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:40:01 crc kubenswrapper[3552]: I0320 15:40:01.285214 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:40:01 crc kubenswrapper[3552]: I0320 15:40:01.285245 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:40:01 crc kubenswrapper[3552]: I0320 15:40:01.285283 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:40:01 crc kubenswrapper[3552]: I0320 15:40:01.285307 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:40:01 crc kubenswrapper[3552]: E0320 15:40:01.924577 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1\": container with ID starting with 61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1 not found: ID does not exist" containerID="61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1" Mar 20 15:40:01 crc kubenswrapper[3552]: I0320 15:40:01.924646 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1" err="rpc error: code = NotFound desc = could not find container \"61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1\": container with ID starting with 61c1772d4376c754b5d81a2bae0d964bdbb400f716ff6934c6341fbf77f2eed1 not found: ID does not exist" Mar 20 15:41:01 crc kubenswrapper[3552]: I0320 15:41:01.293928 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:41:01 crc kubenswrapper[3552]: I0320 15:41:01.295172 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:41:01 crc kubenswrapper[3552]: I0320 15:41:01.295258 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:41:01 crc kubenswrapper[3552]: I0320 15:41:01.295305 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:41:01 crc kubenswrapper[3552]: I0320 15:41:01.295352 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.618437 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf"] Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.619143 3552 topology_manager.go:215] "Topology Admit Handler" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" podNamespace="openshift-marketplace" podName="f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: E0320 15:41:32.619327 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="extract-content" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.619344 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="extract-content" Mar 20 15:41:32 crc kubenswrapper[3552]: E0320 15:41:32.619361 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="registry-server" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.619371 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="registry-server" Mar 20 15:41:32 crc kubenswrapper[3552]: E0320 15:41:32.619390 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="extract-utilities" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.619415 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="extract-utilities" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.619551 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="92de20d5-0e92-4206-9b86-72495402e055" containerName="registry-server" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.620490 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.623670 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-4w6pc" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.624475 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf"] Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.640704 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmghk\" (UniqueName: \"kubernetes.io/projected/de1b0afb-ad33-40f3-a3ed-919158631191-kube-api-access-mmghk\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.640776 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-bundle\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.640807 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-util\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.741979 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mmghk\" (UniqueName: \"kubernetes.io/projected/de1b0afb-ad33-40f3-a3ed-919158631191-kube-api-access-mmghk\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.742087 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-bundle\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.742140 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-util\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.742732 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-util\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.743508 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-bundle\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.778847 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmghk\" (UniqueName: \"kubernetes.io/projected/de1b0afb-ad33-40f3-a3ed-919158631191-kube-api-access-mmghk\") pod \"f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:32 crc kubenswrapper[3552]: I0320 15:41:32.936003 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:33 crc kubenswrapper[3552]: I0320 15:41:33.338105 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf"] Mar 20 15:41:33 crc kubenswrapper[3552]: I0320 15:41:33.537015 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" event={"ID":"de1b0afb-ad33-40f3-a3ed-919158631191","Type":"ContainerStarted","Data":"6421c91f880d26bf7697c1bc855dd26f89d4da23117bc395bf4d3a2121fa6fb5"} Mar 20 15:41:34 crc kubenswrapper[3552]: I0320 15:41:34.543750 3552 generic.go:334] "Generic (PLEG): container finished" podID="de1b0afb-ad33-40f3-a3ed-919158631191" containerID="6918999b04c82b35da420acd7baa4c3ca60cc209b3618589e990ccb5e3850de5" exitCode=0 Mar 20 15:41:34 crc kubenswrapper[3552]: I0320 15:41:34.543857 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" event={"ID":"de1b0afb-ad33-40f3-a3ed-919158631191","Type":"ContainerDied","Data":"6918999b04c82b35da420acd7baa4c3ca60cc209b3618589e990ccb5e3850de5"} Mar 20 15:41:38 crc kubenswrapper[3552]: I0320 15:41:38.563734 3552 generic.go:334] "Generic (PLEG): container finished" podID="de1b0afb-ad33-40f3-a3ed-919158631191" containerID="a7abca2251f4f6ab184ec91f8867ba97d0bc96bb784b318784fafd586aed446e" exitCode=0 Mar 20 15:41:38 crc kubenswrapper[3552]: I0320 15:41:38.563874 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" event={"ID":"de1b0afb-ad33-40f3-a3ed-919158631191","Type":"ContainerDied","Data":"a7abca2251f4f6ab184ec91f8867ba97d0bc96bb784b318784fafd586aed446e"} Mar 20 15:41:39 crc kubenswrapper[3552]: I0320 15:41:39.570852 3552 generic.go:334] "Generic (PLEG): container finished" podID="de1b0afb-ad33-40f3-a3ed-919158631191" containerID="770e19e838fdbb2378c3db208f650e181ad9175419d5f1aa21582fee09f28b76" exitCode=0 Mar 20 15:41:39 crc kubenswrapper[3552]: I0320 15:41:39.570914 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" event={"ID":"de1b0afb-ad33-40f3-a3ed-919158631191","Type":"ContainerDied","Data":"770e19e838fdbb2378c3db208f650e181ad9175419d5f1aa21582fee09f28b76"} Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.753633 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.933499 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmghk\" (UniqueName: \"kubernetes.io/projected/de1b0afb-ad33-40f3-a3ed-919158631191-kube-api-access-mmghk\") pod \"de1b0afb-ad33-40f3-a3ed-919158631191\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.933575 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-util\") pod \"de1b0afb-ad33-40f3-a3ed-919158631191\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.933628 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-bundle\") pod \"de1b0afb-ad33-40f3-a3ed-919158631191\" (UID: \"de1b0afb-ad33-40f3-a3ed-919158631191\") " Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.934990 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-bundle" (OuterVolumeSpecName: "bundle") pod "de1b0afb-ad33-40f3-a3ed-919158631191" (UID: "de1b0afb-ad33-40f3-a3ed-919158631191"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.942952 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de1b0afb-ad33-40f3-a3ed-919158631191-kube-api-access-mmghk" (OuterVolumeSpecName: "kube-api-access-mmghk") pod "de1b0afb-ad33-40f3-a3ed-919158631191" (UID: "de1b0afb-ad33-40f3-a3ed-919158631191"). InnerVolumeSpecName "kube-api-access-mmghk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:41:40 crc kubenswrapper[3552]: I0320 15:41:40.945934 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-util" (OuterVolumeSpecName: "util") pod "de1b0afb-ad33-40f3-a3ed-919158631191" (UID: "de1b0afb-ad33-40f3-a3ed-919158631191"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:41:41 crc kubenswrapper[3552]: I0320 15:41:41.034479 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mmghk\" (UniqueName: \"kubernetes.io/projected/de1b0afb-ad33-40f3-a3ed-919158631191-kube-api-access-mmghk\") on node \"crc\" DevicePath \"\"" Mar 20 15:41:41 crc kubenswrapper[3552]: I0320 15:41:41.034511 3552 reconciler_common.go:300] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-util\") on node \"crc\" DevicePath \"\"" Mar 20 15:41:41 crc kubenswrapper[3552]: I0320 15:41:41.034522 3552 reconciler_common.go:300] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de1b0afb-ad33-40f3-a3ed-919158631191-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:41:41 crc kubenswrapper[3552]: I0320 15:41:41.582578 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" event={"ID":"de1b0afb-ad33-40f3-a3ed-919158631191","Type":"ContainerDied","Data":"6421c91f880d26bf7697c1bc855dd26f89d4da23117bc395bf4d3a2121fa6fb5"} Mar 20 15:41:41 crc kubenswrapper[3552]: I0320 15:41:41.582921 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6421c91f880d26bf7697c1bc855dd26f89d4da23117bc395bf4d3a2121fa6fb5" Mar 20 15:41:41 crc kubenswrapper[3552]: I0320 15:41:41.582679 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf" Mar 20 15:41:42 crc kubenswrapper[3552]: I0320 15:41:42.779107 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:41:42 crc kubenswrapper[3552]: I0320 15:41:42.779192 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.373177 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk"] Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.373919 3552 topology_manager.go:215] "Topology Admit Handler" podUID="09ba95df-e4cc-4819-8244-426a6a13e8e8" podNamespace="metallb-system" podName="metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: E0320 15:41:52.374114 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="extract" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.374128 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="extract" Mar 20 15:41:52 crc kubenswrapper[3552]: E0320 15:41:52.374147 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="pull" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.374155 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="pull" Mar 20 15:41:52 crc kubenswrapper[3552]: E0320 15:41:52.374171 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="util" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.374179 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="util" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.374317 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="de1b0afb-ad33-40f3-a3ed-919158631191" containerName="extract" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.374955 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.377393 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.379095 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.379311 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-qfnst" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.379816 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.383197 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.397383 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk"] Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.548023 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx"] Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.548132 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7e937e0b-c631-4b12-a33e-d98c139398d5" podNamespace="metallb-system" podName="metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.548710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.551810 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-5gnvc" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.551892 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.552531 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.563388 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rj4c\" (UniqueName: \"kubernetes.io/projected/09ba95df-e4cc-4819-8244-426a6a13e8e8-kube-api-access-6rj4c\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.563482 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/09ba95df-e4cc-4819-8244-426a6a13e8e8-apiservice-cert\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.563548 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/09ba95df-e4cc-4819-8244-426a6a13e8e8-webhook-cert\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.574713 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx"] Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.664760 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrkf7\" (UniqueName: \"kubernetes.io/projected/7e937e0b-c631-4b12-a33e-d98c139398d5-kube-api-access-jrkf7\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.664812 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e937e0b-c631-4b12-a33e-d98c139398d5-apiservice-cert\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.664849 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/09ba95df-e4cc-4819-8244-426a6a13e8e8-apiservice-cert\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.664929 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e937e0b-c631-4b12-a33e-d98c139398d5-webhook-cert\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.664969 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/09ba95df-e4cc-4819-8244-426a6a13e8e8-webhook-cert\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.665005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6rj4c\" (UniqueName: \"kubernetes.io/projected/09ba95df-e4cc-4819-8244-426a6a13e8e8-kube-api-access-6rj4c\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.670340 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/09ba95df-e4cc-4819-8244-426a6a13e8e8-webhook-cert\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.670390 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/09ba95df-e4cc-4819-8244-426a6a13e8e8-apiservice-cert\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.686351 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rj4c\" (UniqueName: \"kubernetes.io/projected/09ba95df-e4cc-4819-8244-426a6a13e8e8-kube-api-access-6rj4c\") pod \"metallb-operator-controller-manager-597cc54c77-tc5lk\" (UID: \"09ba95df-e4cc-4819-8244-426a6a13e8e8\") " pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.690729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.766709 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-jrkf7\" (UniqueName: \"kubernetes.io/projected/7e937e0b-c631-4b12-a33e-d98c139398d5-kube-api-access-jrkf7\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.766775 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e937e0b-c631-4b12-a33e-d98c139398d5-apiservice-cert\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.766842 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e937e0b-c631-4b12-a33e-d98c139398d5-webhook-cert\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.771244 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e937e0b-c631-4b12-a33e-d98c139398d5-webhook-cert\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.786390 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrkf7\" (UniqueName: \"kubernetes.io/projected/7e937e0b-c631-4b12-a33e-d98c139398d5-kube-api-access-jrkf7\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.788266 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e937e0b-c631-4b12-a33e-d98c139398d5-apiservice-cert\") pod \"metallb-operator-webhook-server-8c8685c97-pdplx\" (UID: \"7e937e0b-c631-4b12-a33e-d98c139398d5\") " pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:52 crc kubenswrapper[3552]: I0320 15:41:52.864471 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:41:53 crc kubenswrapper[3552]: I0320 15:41:53.035197 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk"] Mar 20 15:41:53 crc kubenswrapper[3552]: W0320 15:41:53.076919 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09ba95df_e4cc_4819_8244_426a6a13e8e8.slice/crio-3eb5b3cc26a6c78ec441d8fcd51df1883eb8ea7cc2248acfcb6571c57269b42a WatchSource:0}: Error finding container 3eb5b3cc26a6c78ec441d8fcd51df1883eb8ea7cc2248acfcb6571c57269b42a: Status 404 returned error can't find the container with id 3eb5b3cc26a6c78ec441d8fcd51df1883eb8ea7cc2248acfcb6571c57269b42a Mar 20 15:41:53 crc kubenswrapper[3552]: I0320 15:41:53.471898 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx"] Mar 20 15:41:53 crc kubenswrapper[3552]: W0320 15:41:53.479752 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e937e0b_c631_4b12_a33e_d98c139398d5.slice/crio-c2d03880b3837dbd1a44353e49e9a9f6443336a490e6b51f8f886e43bbf9e6cb WatchSource:0}: Error finding container c2d03880b3837dbd1a44353e49e9a9f6443336a490e6b51f8f886e43bbf9e6cb: Status 404 returned error can't find the container with id c2d03880b3837dbd1a44353e49e9a9f6443336a490e6b51f8f886e43bbf9e6cb Mar 20 15:41:53 crc kubenswrapper[3552]: I0320 15:41:53.651386 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" event={"ID":"7e937e0b-c631-4b12-a33e-d98c139398d5","Type":"ContainerStarted","Data":"c2d03880b3837dbd1a44353e49e9a9f6443336a490e6b51f8f886e43bbf9e6cb"} Mar 20 15:41:53 crc kubenswrapper[3552]: I0320 15:41:53.652456 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" event={"ID":"09ba95df-e4cc-4819-8244-426a6a13e8e8","Type":"ContainerStarted","Data":"3eb5b3cc26a6c78ec441d8fcd51df1883eb8ea7cc2248acfcb6571c57269b42a"} Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.295979 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.297524 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.297581 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.297606 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.297629 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.691138 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" event={"ID":"7e937e0b-c631-4b12-a33e-d98c139398d5","Type":"ContainerStarted","Data":"ac4917b22cd5fd8c3307838a41b71f2606bf78d444d76f27db67f58f5b0c2d3d"} Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.692474 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" event={"ID":"09ba95df-e4cc-4819-8244-426a6a13e8e8","Type":"ContainerStarted","Data":"b98db6da452398410d941ced13074091be97dc4c5e61a078ecf387d8703ed748"} Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.711061 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" podStartSLOduration=2.022320775 podStartE2EDuration="9.711020921s" podCreationTimestamp="2026-03-20 15:41:52 +0000 UTC" firstStartedPulling="2026-03-20 15:41:53.482263678 +0000 UTC m=+1013.175960508" lastFinishedPulling="2026-03-20 15:42:01.170963824 +0000 UTC m=+1020.864660654" observedRunningTime="2026-03-20 15:42:01.70833984 +0000 UTC m=+1021.402036670" watchObservedRunningTime="2026-03-20 15:42:01.711020921 +0000 UTC m=+1021.404717751" Mar 20 15:42:01 crc kubenswrapper[3552]: I0320 15:42:01.733695 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" podStartSLOduration=1.6413770140000001 podStartE2EDuration="9.733648167s" podCreationTimestamp="2026-03-20 15:41:52 +0000 UTC" firstStartedPulling="2026-03-20 15:41:53.105387586 +0000 UTC m=+1012.799084416" lastFinishedPulling="2026-03-20 15:42:01.197658739 +0000 UTC m=+1020.891355569" observedRunningTime="2026-03-20 15:42:01.729743142 +0000 UTC m=+1021.423439972" watchObservedRunningTime="2026-03-20 15:42:01.733648167 +0000 UTC m=+1021.427344997" Mar 20 15:42:02 crc kubenswrapper[3552]: I0320 15:42:02.691939 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:42:02 crc kubenswrapper[3552]: I0320 15:42:02.696003 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:42:12 crc kubenswrapper[3552]: I0320 15:42:12.778969 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:42:12 crc kubenswrapper[3552]: I0320 15:42:12.780625 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:42:12 crc kubenswrapper[3552]: I0320 15:42:12.885186 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-8c8685c97-pdplx" Mar 20 15:42:32 crc kubenswrapper[3552]: I0320 15:42:32.694406 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-597cc54c77-tc5lk" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.495978 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["metallb-system/controller-568fbbdf85-2dltt"] Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.496064 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5752c9c1-702c-4b5d-9c15-f978fae70a49" podNamespace="metallb-system" podName="controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.496942 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.499675 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-pg572"] Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.499785 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7" podNamespace="metallb-system" podName="speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.501791 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.505848 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.505899 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.506857 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.507189 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-8hdqp" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.507278 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.509674 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-cg27t" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.511687 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-568fbbdf85-2dltt"] Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.515958 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.656718 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-sockets\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.656797 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-reloader\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.656847 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-startup\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.656909 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metrics-certs\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657072 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657180 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzb5b\" (UniqueName: \"kubernetes.io/projected/5752c9c1-702c-4b5d-9c15-f978fae70a49-kube-api-access-hzb5b\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657221 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metrics\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657256 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-conf\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657358 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5752c9c1-702c-4b5d-9c15-f978fae70a49-metrics-certs\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657415 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fszz8\" (UniqueName: \"kubernetes.io/projected/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-kube-api-access-fszz8\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657491 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5752c9c1-702c-4b5d-9c15-f978fae70a49-cert\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.657526 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metallb-excludel2\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758387 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5752c9c1-702c-4b5d-9c15-f978fae70a49-cert\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758483 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metallb-excludel2\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758518 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-sockets\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758552 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-reloader\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758579 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-startup\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758612 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metrics-certs\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758638 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758676 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hzb5b\" (UniqueName: \"kubernetes.io/projected/5752c9c1-702c-4b5d-9c15-f978fae70a49-kube-api-access-hzb5b\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758706 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metrics\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758731 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-conf\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758774 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5752c9c1-702c-4b5d-9c15-f978fae70a49-metrics-certs\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.758803 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fszz8\" (UniqueName: \"kubernetes.io/projected/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-kube-api-access-fszz8\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.759867 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metallb-excludel2\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.760135 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-sockets\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.760355 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-reloader\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.761108 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-startup\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: E0320 15:42:33.761669 3552 secret.go:194] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Mar 20 15:42:33 crc kubenswrapper[3552]: E0320 15:42:33.761779 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist podName:cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7 nodeName:}" failed. No retries permitted until 2026-03-20 15:42:34.261750738 +0000 UTC m=+1053.955447608 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist") pod "speaker-pg572" (UID: "cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7") : secret "metallb-memberlist" not found Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.762290 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metrics\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.762526 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-frr-conf\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.764491 3552 reflector.go:351] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.771173 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-metrics-certs\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.772705 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5752c9c1-702c-4b5d-9c15-f978fae70a49-metrics-certs\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.773780 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5752c9c1-702c-4b5d-9c15-f978fae70a49-cert\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.776496 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fszz8\" (UniqueName: \"kubernetes.io/projected/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-kube-api-access-fszz8\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.784479 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzb5b\" (UniqueName: \"kubernetes.io/projected/5752c9c1-702c-4b5d-9c15-f978fae70a49-kube-api-access-hzb5b\") pod \"controller-568fbbdf85-2dltt\" (UID: \"5752c9c1-702c-4b5d-9c15-f978fae70a49\") " pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:33 crc kubenswrapper[3552]: I0320 15:42:33.815601 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:34 crc kubenswrapper[3552]: I0320 15:42:34.219488 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-568fbbdf85-2dltt"] Mar 20 15:42:34 crc kubenswrapper[3552]: I0320 15:42:34.265676 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:34 crc kubenswrapper[3552]: E0320 15:42:34.265907 3552 secret.go:194] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Mar 20 15:42:34 crc kubenswrapper[3552]: E0320 15:42:34.265966 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist podName:cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7 nodeName:}" failed. No retries permitted until 2026-03-20 15:42:35.265946855 +0000 UTC m=+1054.959643685 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist") pod "speaker-pg572" (UID: "cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7") : secret "metallb-memberlist" not found Mar 20 15:42:34 crc kubenswrapper[3552]: I0320 15:42:34.916832 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-568fbbdf85-2dltt" event={"ID":"5752c9c1-702c-4b5d-9c15-f978fae70a49","Type":"ContainerStarted","Data":"2b6ea60b249e90b019936ac285d8cb805a9f24323592bb3e96ad582d3351dd5b"} Mar 20 15:42:34 crc kubenswrapper[3552]: I0320 15:42:34.917167 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-568fbbdf85-2dltt" event={"ID":"5752c9c1-702c-4b5d-9c15-f978fae70a49","Type":"ContainerStarted","Data":"64ded0f284907ca50fa8a632b54cc0bfc6ff6e76ad50f890f49d2cd54320295d"} Mar 20 15:42:35 crc kubenswrapper[3552]: I0320 15:42:35.278349 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:35 crc kubenswrapper[3552]: I0320 15:42:35.284373 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7-memberlist\") pod \"speaker-pg572\" (UID: \"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7\") " pod="metallb-system/speaker-pg572" Mar 20 15:42:35 crc kubenswrapper[3552]: I0320 15:42:35.335787 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-pg572" Mar 20 15:42:35 crc kubenswrapper[3552]: I0320 15:42:35.922239 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"0e8e90922921e6ff45d4e5afbb322e2d6b43a9e7ced32beca102fa3601b1e2ec"} Mar 20 15:42:39 crc kubenswrapper[3552]: I0320 15:42:39.956594 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-568fbbdf85-2dltt" event={"ID":"5752c9c1-702c-4b5d-9c15-f978fae70a49","Type":"ContainerStarted","Data":"6b4e2926a0e5196732b8abe203e2999862236aebe3e461e15293c92de0256226"} Mar 20 15:42:39 crc kubenswrapper[3552]: I0320 15:42:39.976917 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="metallb-system/controller-568fbbdf85-2dltt" podStartSLOduration=2.7794405920000003 podStartE2EDuration="6.976868905s" podCreationTimestamp="2026-03-20 15:42:33 +0000 UTC" firstStartedPulling="2026-03-20 15:42:34.561690166 +0000 UTC m=+1054.255386996" lastFinishedPulling="2026-03-20 15:42:38.759118479 +0000 UTC m=+1058.452815309" observedRunningTime="2026-03-20 15:42:39.975347474 +0000 UTC m=+1059.669044314" watchObservedRunningTime="2026-03-20 15:42:39.976868905 +0000 UTC m=+1059.670565735" Mar 20 15:42:40 crc kubenswrapper[3552]: I0320 15:42:40.962282 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:42 crc kubenswrapper[3552]: I0320 15:42:42.778538 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:42:42 crc kubenswrapper[3552]: I0320 15:42:42.778620 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:42:42 crc kubenswrapper[3552]: I0320 15:42:42.778663 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:42:42 crc kubenswrapper[3552]: I0320 15:42:42.779629 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"902094328c1e9daadeb5f64e0a47c03d126a08ca4a0366a4ea4ca5f17a2975d1"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:42:42 crc kubenswrapper[3552]: I0320 15:42:42.779856 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://902094328c1e9daadeb5f64e0a47c03d126a08ca4a0366a4ea4ca5f17a2975d1" gracePeriod=600 Mar 20 15:42:43 crc kubenswrapper[3552]: I0320 15:42:43.986038 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="902094328c1e9daadeb5f64e0a47c03d126a08ca4a0366a4ea4ca5f17a2975d1" exitCode=0 Mar 20 15:42:43 crc kubenswrapper[3552]: I0320 15:42:43.986098 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"902094328c1e9daadeb5f64e0a47c03d126a08ca4a0366a4ea4ca5f17a2975d1"} Mar 20 15:42:43 crc kubenswrapper[3552]: I0320 15:42:43.986318 3552 scope.go:117] "RemoveContainer" containerID="786e47a4d6307352496452c605f1f5ce00e08ea17c3f18a4216d752aee09c1e8" Mar 20 15:42:45 crc kubenswrapper[3552]: I0320 15:42:45.997574 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"7f985d384e5c2566938c7d880ef875555c77af84a50df3c9a9abd4000ed8661c"} Mar 20 15:42:47 crc kubenswrapper[3552]: I0320 15:42:47.004581 3552 generic.go:334] "Generic (PLEG): container finished" podID="cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7" containerID="f491810f9e5d1c24ea173649ad3eb6870e5d9a491af0fc88921b8eb5ccb5b356" exitCode=0 Mar 20 15:42:47 crc kubenswrapper[3552]: I0320 15:42:47.004646 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerDied","Data":"f491810f9e5d1c24ea173649ad3eb6870e5d9a491af0fc88921b8eb5ccb5b356"} Mar 20 15:42:49 crc kubenswrapper[3552]: I0320 15:42:49.018774 3552 generic.go:334] "Generic (PLEG): container finished" podID="cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7" containerID="b7d41e481e7d4ecb03d094fd8ea4faeb3758e9777f46d8c5e8825ed2c80de675" exitCode=0 Mar 20 15:42:49 crc kubenswrapper[3552]: I0320 15:42:49.019061 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerDied","Data":"b7d41e481e7d4ecb03d094fd8ea4faeb3758e9777f46d8c5e8825ed2c80de675"} Mar 20 15:42:50 crc kubenswrapper[3552]: I0320 15:42:50.024690 3552 generic.go:334] "Generic (PLEG): container finished" podID="cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7" containerID="748ec38de8ee3518fa0aabe91b9273a634c51233dd2226461cfd0e5fde0ad64d" exitCode=0 Mar 20 15:42:50 crc kubenswrapper[3552]: I0320 15:42:50.024861 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerDied","Data":"748ec38de8ee3518fa0aabe91b9273a634c51233dd2226461cfd0e5fde0ad64d"} Mar 20 15:42:51 crc kubenswrapper[3552]: I0320 15:42:51.037033 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"1e0c4c7447c7ee1c3d3b85ae2af40f5808490e971722979b42178cc385e13319"} Mar 20 15:42:51 crc kubenswrapper[3552]: I0320 15:42:51.037373 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"54895926be0e5e09018dae3d522ad460a19f4a41088c69bed141fd05ba802122"} Mar 20 15:42:51 crc kubenswrapper[3552]: I0320 15:42:51.037424 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"2e68873334e536f236b2d11f671d75f85efed319609097e68abf14df7f847d48"} Mar 20 15:42:51 crc kubenswrapper[3552]: I0320 15:42:51.037439 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"d5aa6b2c28292b9acc57b7f2cff5e97b8a9d0e8044442f6acd2d3063d80ea214"} Mar 20 15:42:52 crc kubenswrapper[3552]: I0320 15:42:52.045896 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"e7294599fdacd002ab1d464e9b9689e8ebc0a0b629af49a4bc63de4f402eec66"} Mar 20 15:42:52 crc kubenswrapper[3552]: I0320 15:42:52.045932 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-pg572" event={"ID":"cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7","Type":"ContainerStarted","Data":"eddeac4877b9f03702c4dfd4e94d10970e42168d9f8ac63a8e8673b282e1bda0"} Mar 20 15:42:52 crc kubenswrapper[3552]: I0320 15:42:52.075421 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="metallb-system/speaker-pg572" podStartSLOduration=8.261582671 podStartE2EDuration="19.075349493s" podCreationTimestamp="2026-03-20 15:42:33 +0000 UTC" firstStartedPulling="2026-03-20 15:42:35.747435646 +0000 UTC m=+1055.441132476" lastFinishedPulling="2026-03-20 15:42:46.561202468 +0000 UTC m=+1066.254899298" observedRunningTime="2026-03-20 15:42:52.067785801 +0000 UTC m=+1071.761482641" watchObservedRunningTime="2026-03-20 15:42:52.075349493 +0000 UTC m=+1071.769046333" Mar 20 15:42:53 crc kubenswrapper[3552]: I0320 15:42:53.067530 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-pg572" Mar 20 15:42:53 crc kubenswrapper[3552]: I0320 15:42:53.819943 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-568fbbdf85-2dltt" Mar 20 15:42:55 crc kubenswrapper[3552]: I0320 15:42:55.336188 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/speaker-pg572" Mar 20 15:42:55 crc kubenswrapper[3552]: I0320 15:42:55.385045 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/speaker-pg572" Mar 20 15:43:01 crc kubenswrapper[3552]: I0320 15:43:01.298052 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:43:01 crc kubenswrapper[3552]: I0320 15:43:01.298734 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:43:01 crc kubenswrapper[3552]: I0320 15:43:01.298771 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:43:01 crc kubenswrapper[3552]: I0320 15:43:01.298805 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:43:01 crc kubenswrapper[3552]: I0320 15:43:01.298859 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:43:05 crc kubenswrapper[3552]: I0320 15:43:05.341344 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-pg572" Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.808846 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8jt8t"] Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.809304 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9a2da8d3-efae-407e-bd00-74dc74646eb5" podNamespace="openstack-operators" podName="openstack-operator-index-8jt8t" Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.809947 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.813425 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.813656 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.822770 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8jt8t"] Mar 20 15:43:07 crc kubenswrapper[3552]: I0320 15:43:07.905383 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2thf2\" (UniqueName: \"kubernetes.io/projected/9a2da8d3-efae-407e-bd00-74dc74646eb5-kube-api-access-2thf2\") pod \"openstack-operator-index-8jt8t\" (UID: \"9a2da8d3-efae-407e-bd00-74dc74646eb5\") " pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.006715 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2thf2\" (UniqueName: \"kubernetes.io/projected/9a2da8d3-efae-407e-bd00-74dc74646eb5-kube-api-access-2thf2\") pod \"openstack-operator-index-8jt8t\" (UID: \"9a2da8d3-efae-407e-bd00-74dc74646eb5\") " pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.042541 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2thf2\" (UniqueName: \"kubernetes.io/projected/9a2da8d3-efae-407e-bd00-74dc74646eb5-kube-api-access-2thf2\") pod \"openstack-operator-index-8jt8t\" (UID: \"9a2da8d3-efae-407e-bd00-74dc74646eb5\") " pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.177228 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.577738 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8jt8t"] Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.793105 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8jt8t"] Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.993704 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-c64kg"] Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.994053 3552 topology_manager.go:215] "Topology Admit Handler" podUID="13bbb361-b8be-4148-b9a0-e1fe341db57b" podNamespace="openstack-operators" podName="openstack-operator-index-c64kg" Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.994695 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:08 crc kubenswrapper[3552]: I0320 15:43:08.997247 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-l777n" Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.006025 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-c64kg"] Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.121137 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l5rc\" (UniqueName: \"kubernetes.io/projected/13bbb361-b8be-4148-b9a0-e1fe341db57b-kube-api-access-4l5rc\") pod \"openstack-operator-index-c64kg\" (UID: \"13bbb361-b8be-4148-b9a0-e1fe341db57b\") " pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.164025 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8jt8t" event={"ID":"9a2da8d3-efae-407e-bd00-74dc74646eb5","Type":"ContainerStarted","Data":"d970e73f1874f3f91b26e718544e6ce1a09bfda75a265904164525f49c05e50b"} Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.222185 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4l5rc\" (UniqueName: \"kubernetes.io/projected/13bbb361-b8be-4148-b9a0-e1fe341db57b-kube-api-access-4l5rc\") pod \"openstack-operator-index-c64kg\" (UID: \"13bbb361-b8be-4148-b9a0-e1fe341db57b\") " pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.241983 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l5rc\" (UniqueName: \"kubernetes.io/projected/13bbb361-b8be-4148-b9a0-e1fe341db57b-kube-api-access-4l5rc\") pod \"openstack-operator-index-c64kg\" (UID: \"13bbb361-b8be-4148-b9a0-e1fe341db57b\") " pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.347666 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:09 crc kubenswrapper[3552]: I0320 15:43:09.944450 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-c64kg"] Mar 20 15:43:11 crc kubenswrapper[3552]: I0320 15:43:11.174898 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-c64kg" event={"ID":"13bbb361-b8be-4148-b9a0-e1fe341db57b","Type":"ContainerStarted","Data":"b465fbe1ffcc4c4e2d8f1df92e0d22fdbf239e628116a618ce76cafac928c706"} Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.183575 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8jt8t" event={"ID":"9a2da8d3-efae-407e-bd00-74dc74646eb5","Type":"ContainerStarted","Data":"48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955"} Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.183670 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-8jt8t" podUID="9a2da8d3-efae-407e-bd00-74dc74646eb5" containerName="registry-server" containerID="cri-o://48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955" gracePeriod=2 Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.185020 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-c64kg" event={"ID":"13bbb361-b8be-4148-b9a0-e1fe341db57b","Type":"ContainerStarted","Data":"2af8f6104d1f095e631b00853cec5752fbff29cd9784d72a3e8fa4f41d016304"} Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.201312 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8jt8t" podStartSLOduration=1.997302562 podStartE2EDuration="5.20126814s" podCreationTimestamp="2026-03-20 15:43:07 +0000 UTC" firstStartedPulling="2026-03-20 15:43:08.59575803 +0000 UTC m=+1088.289454870" lastFinishedPulling="2026-03-20 15:43:11.799723618 +0000 UTC m=+1091.493420448" observedRunningTime="2026-03-20 15:43:12.200661154 +0000 UTC m=+1091.894358054" watchObservedRunningTime="2026-03-20 15:43:12.20126814 +0000 UTC m=+1091.894964970" Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.218607 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-c64kg" podStartSLOduration=2.792571116 podStartE2EDuration="4.218565703s" podCreationTimestamp="2026-03-20 15:43:08 +0000 UTC" firstStartedPulling="2026-03-20 15:43:10.372969801 +0000 UTC m=+1090.066666631" lastFinishedPulling="2026-03-20 15:43:11.798964388 +0000 UTC m=+1091.492661218" observedRunningTime="2026-03-20 15:43:12.216317583 +0000 UTC m=+1091.910014453" watchObservedRunningTime="2026-03-20 15:43:12.218565703 +0000 UTC m=+1091.912262523" Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.649956 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.773042 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2thf2\" (UniqueName: \"kubernetes.io/projected/9a2da8d3-efae-407e-bd00-74dc74646eb5-kube-api-access-2thf2\") pod \"9a2da8d3-efae-407e-bd00-74dc74646eb5\" (UID: \"9a2da8d3-efae-407e-bd00-74dc74646eb5\") " Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.778364 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a2da8d3-efae-407e-bd00-74dc74646eb5-kube-api-access-2thf2" (OuterVolumeSpecName: "kube-api-access-2thf2") pod "9a2da8d3-efae-407e-bd00-74dc74646eb5" (UID: "9a2da8d3-efae-407e-bd00-74dc74646eb5"). InnerVolumeSpecName "kube-api-access-2thf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:43:12 crc kubenswrapper[3552]: I0320 15:43:12.874877 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2thf2\" (UniqueName: \"kubernetes.io/projected/9a2da8d3-efae-407e-bd00-74dc74646eb5-kube-api-access-2thf2\") on node \"crc\" DevicePath \"\"" Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.194187 3552 generic.go:334] "Generic (PLEG): container finished" podID="9a2da8d3-efae-407e-bd00-74dc74646eb5" containerID="48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955" exitCode=0 Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.194230 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8jt8t" Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.194285 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8jt8t" event={"ID":"9a2da8d3-efae-407e-bd00-74dc74646eb5","Type":"ContainerDied","Data":"48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955"} Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.195807 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8jt8t" event={"ID":"9a2da8d3-efae-407e-bd00-74dc74646eb5","Type":"ContainerDied","Data":"d970e73f1874f3f91b26e718544e6ce1a09bfda75a265904164525f49c05e50b"} Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.195842 3552 scope.go:117] "RemoveContainer" containerID="48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955" Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.245647 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8jt8t"] Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.251170 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-8jt8t"] Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.254059 3552 scope.go:117] "RemoveContainer" containerID="48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955" Mar 20 15:43:13 crc kubenswrapper[3552]: E0320 15:43:13.254520 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955\": container with ID starting with 48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955 not found: ID does not exist" containerID="48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955" Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.254571 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955"} err="failed to get container status \"48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955\": rpc error: code = NotFound desc = could not find container \"48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955\": container with ID starting with 48c0f33eb781f30f46c907bf34566dbeb097cbdbc355ba1a575c8bd76b53f955 not found: ID does not exist" Mar 20 15:43:13 crc kubenswrapper[3552]: I0320 15:43:13.448911 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a2da8d3-efae-407e-bd00-74dc74646eb5" path="/var/lib/kubelet/pods/9a2da8d3-efae-407e-bd00-74dc74646eb5/volumes" Mar 20 15:43:19 crc kubenswrapper[3552]: I0320 15:43:19.347893 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:19 crc kubenswrapper[3552]: I0320 15:43:19.349448 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:19 crc kubenswrapper[3552]: I0320 15:43:19.418720 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:20 crc kubenswrapper[3552]: I0320 15:43:20.283273 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-c64kg" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.043052 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8"] Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.043174 3552 topology_manager.go:215] "Topology Admit Handler" podUID="af734422-b4a5-4d64-8057-fef16a296420" podNamespace="openstack-operators" podName="7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: E0320 15:43:21.043330 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="9a2da8d3-efae-407e-bd00-74dc74646eb5" containerName="registry-server" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.043341 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a2da8d3-efae-407e-bd00-74dc74646eb5" containerName="registry-server" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.043477 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a2da8d3-efae-407e-bd00-74dc74646eb5" containerName="registry-server" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.044225 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.055692 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8"] Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.056337 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9rv9g" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.177417 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-util\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.177704 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtjmk\" (UniqueName: \"kubernetes.io/projected/af734422-b4a5-4d64-8057-fef16a296420-kube-api-access-rtjmk\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.177845 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-bundle\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.279182 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-bundle\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.279268 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-util\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.279297 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rtjmk\" (UniqueName: \"kubernetes.io/projected/af734422-b4a5-4d64-8057-fef16a296420-kube-api-access-rtjmk\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.279722 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-bundle\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.280102 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-util\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.298258 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtjmk\" (UniqueName: \"kubernetes.io/projected/af734422-b4a5-4d64-8057-fef16a296420-kube-api-access-rtjmk\") pod \"7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.361314 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:21 crc kubenswrapper[3552]: I0320 15:43:21.766123 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8"] Mar 20 15:43:22 crc kubenswrapper[3552]: I0320 15:43:22.244418 3552 generic.go:334] "Generic (PLEG): container finished" podID="af734422-b4a5-4d64-8057-fef16a296420" containerID="830c25f36efffd5ccf2516e907c12916de7ea447ba54a0325722d7920b3483c4" exitCode=0 Mar 20 15:43:22 crc kubenswrapper[3552]: I0320 15:43:22.244518 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" event={"ID":"af734422-b4a5-4d64-8057-fef16a296420","Type":"ContainerDied","Data":"830c25f36efffd5ccf2516e907c12916de7ea447ba54a0325722d7920b3483c4"} Mar 20 15:43:22 crc kubenswrapper[3552]: I0320 15:43:22.244797 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" event={"ID":"af734422-b4a5-4d64-8057-fef16a296420","Type":"ContainerStarted","Data":"c69988729f049e9348f3ce4fb799942a872457c776a1559c6fd28bea393a8f5d"} Mar 20 15:43:23 crc kubenswrapper[3552]: I0320 15:43:23.251457 3552 generic.go:334] "Generic (PLEG): container finished" podID="af734422-b4a5-4d64-8057-fef16a296420" containerID="dabf1f110894eb7cbb3e23bc51837f91aa1cd4b2fa9f84e49a1a3302dfba40af" exitCode=0 Mar 20 15:43:23 crc kubenswrapper[3552]: I0320 15:43:23.251801 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" event={"ID":"af734422-b4a5-4d64-8057-fef16a296420","Type":"ContainerDied","Data":"dabf1f110894eb7cbb3e23bc51837f91aa1cd4b2fa9f84e49a1a3302dfba40af"} Mar 20 15:43:24 crc kubenswrapper[3552]: I0320 15:43:24.258753 3552 generic.go:334] "Generic (PLEG): container finished" podID="af734422-b4a5-4d64-8057-fef16a296420" containerID="86237e1f01ccf27c035fa9c83390cc9502be77dd097af321f9ac530ba9455a41" exitCode=0 Mar 20 15:43:24 crc kubenswrapper[3552]: I0320 15:43:24.258800 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" event={"ID":"af734422-b4a5-4d64-8057-fef16a296420","Type":"ContainerDied","Data":"86237e1f01ccf27c035fa9c83390cc9502be77dd097af321f9ac530ba9455a41"} Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.585509 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.736842 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-bundle\") pod \"af734422-b4a5-4d64-8057-fef16a296420\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.737291 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtjmk\" (UniqueName: \"kubernetes.io/projected/af734422-b4a5-4d64-8057-fef16a296420-kube-api-access-rtjmk\") pod \"af734422-b4a5-4d64-8057-fef16a296420\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.737365 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-util\") pod \"af734422-b4a5-4d64-8057-fef16a296420\" (UID: \"af734422-b4a5-4d64-8057-fef16a296420\") " Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.737735 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-bundle" (OuterVolumeSpecName: "bundle") pod "af734422-b4a5-4d64-8057-fef16a296420" (UID: "af734422-b4a5-4d64-8057-fef16a296420"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.742777 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af734422-b4a5-4d64-8057-fef16a296420-kube-api-access-rtjmk" (OuterVolumeSpecName: "kube-api-access-rtjmk") pod "af734422-b4a5-4d64-8057-fef16a296420" (UID: "af734422-b4a5-4d64-8057-fef16a296420"). InnerVolumeSpecName "kube-api-access-rtjmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.750973 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-util" (OuterVolumeSpecName: "util") pod "af734422-b4a5-4d64-8057-fef16a296420" (UID: "af734422-b4a5-4d64-8057-fef16a296420"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.838455 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rtjmk\" (UniqueName: \"kubernetes.io/projected/af734422-b4a5-4d64-8057-fef16a296420-kube-api-access-rtjmk\") on node \"crc\" DevicePath \"\"" Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.838486 3552 reconciler_common.go:300] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-util\") on node \"crc\" DevicePath \"\"" Mar 20 15:43:25 crc kubenswrapper[3552]: I0320 15:43:25.838508 3552 reconciler_common.go:300] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/af734422-b4a5-4d64-8057-fef16a296420-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:43:26 crc kubenswrapper[3552]: I0320 15:43:26.272609 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" event={"ID":"af734422-b4a5-4d64-8057-fef16a296420","Type":"ContainerDied","Data":"c69988729f049e9348f3ce4fb799942a872457c776a1559c6fd28bea393a8f5d"} Mar 20 15:43:26 crc kubenswrapper[3552]: I0320 15:43:26.272682 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c69988729f049e9348f3ce4fb799942a872457c776a1559c6fd28bea393a8f5d" Mar 20 15:43:26 crc kubenswrapper[3552]: I0320 15:43:26.272681 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.156923 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6"] Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.159572 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd" podNamespace="openstack-operators" podName="openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:33 crc kubenswrapper[3552]: E0320 15:43:33.159792 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="util" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.159807 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="util" Mar 20 15:43:33 crc kubenswrapper[3552]: E0320 15:43:33.159824 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="pull" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.159832 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="pull" Mar 20 15:43:33 crc kubenswrapper[3552]: E0320 15:43:33.159847 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="extract" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.159854 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="extract" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.159977 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="af734422-b4a5-4d64-8057-fef16a296420" containerName="extract" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.160517 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.163291 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-jt9pv" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.192616 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6"] Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.236111 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8jzx\" (UniqueName: \"kubernetes.io/projected/9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd-kube-api-access-v8jzx\") pod \"openstack-operator-controller-init-646b9cdcdc-9z4d6\" (UID: \"9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd\") " pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.337109 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-v8jzx\" (UniqueName: \"kubernetes.io/projected/9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd-kube-api-access-v8jzx\") pod \"openstack-operator-controller-init-646b9cdcdc-9z4d6\" (UID: \"9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd\") " pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.357127 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8jzx\" (UniqueName: \"kubernetes.io/projected/9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd-kube-api-access-v8jzx\") pod \"openstack-operator-controller-init-646b9cdcdc-9z4d6\" (UID: \"9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd\") " pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.478851 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:33 crc kubenswrapper[3552]: I0320 15:43:33.809986 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6"] Mar 20 15:43:34 crc kubenswrapper[3552]: I0320 15:43:34.318708 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" event={"ID":"9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd","Type":"ContainerStarted","Data":"c92058e8043bb83a2ea9f3474f959706035dc7225601b9741bc702f2600eb7bc"} Mar 20 15:43:41 crc kubenswrapper[3552]: I0320 15:43:41.355444 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" event={"ID":"9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd","Type":"ContainerStarted","Data":"16471f9d8f93595e1d8890cb6e8ce0b85fa7c778929e9d8d04e8e8736cbb9740"} Mar 20 15:43:41 crc kubenswrapper[3552]: I0320 15:43:41.356090 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:43:41 crc kubenswrapper[3552]: I0320 15:43:41.383671 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" podStartSLOduration=1.94005378 podStartE2EDuration="8.383619218s" podCreationTimestamp="2026-03-20 15:43:33 +0000 UTC" firstStartedPulling="2026-03-20 15:43:33.822042693 +0000 UTC m=+1113.515739523" lastFinishedPulling="2026-03-20 15:43:40.265608121 +0000 UTC m=+1119.959304961" observedRunningTime="2026-03-20 15:43:41.380348721 +0000 UTC m=+1121.074045571" watchObservedRunningTime="2026-03-20 15:43:41.383619218 +0000 UTC m=+1121.077316048" Mar 20 15:43:53 crc kubenswrapper[3552]: I0320 15:43:53.481754 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-646b9cdcdc-9z4d6" Mar 20 15:44:01 crc kubenswrapper[3552]: I0320 15:44:01.300029 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:44:01 crc kubenswrapper[3552]: I0320 15:44:01.300364 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:44:01 crc kubenswrapper[3552]: I0320 15:44:01.300393 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:44:01 crc kubenswrapper[3552]: I0320 15:44:01.300436 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:44:01 crc kubenswrapper[3552]: I0320 15:44:01.300471 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.256852 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.257357 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cba9b971-9846-434b-be0f-c49675291fc6" podNamespace="openstack-operators" podName="barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.258050 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.266383 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5666895465-zvh67"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.266513 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4ad6253a-a3ac-4428-a1e0-4059ca5b02f5" podNamespace="openstack-operators" podName="cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.267121 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.267472 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-lf4q7" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.270446 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-d447m" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.274907 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.280675 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.280803 3552 topology_manager.go:215] "Topology Admit Handler" podUID="354e53f6-1fa0-4b98-8a19-741caf032b5a" podNamespace="openstack-operators" podName="glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.281603 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.286302 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-fzf5n" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.288542 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.288646 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4ecd96e3-ccef-44d8-b099-803a81f0d35d" podNamespace="openstack-operators" podName="designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.289232 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.292262 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-rvx5j" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.303862 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.320878 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.330518 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5666895465-zvh67"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.342474 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.342606 3552 topology_manager.go:215] "Topology Admit Handler" podUID="105486bf-4db8-4e8b-b7e6-b36f272e8042" podNamespace="openstack-operators" podName="heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.342985 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj52s\" (UniqueName: \"kubernetes.io/projected/4ecd96e3-ccef-44d8-b099-803a81f0d35d-kube-api-access-fj52s\") pod \"designate-operator-controller-manager-674dd8bdd9-pvjxb\" (UID: \"4ecd96e3-ccef-44d8-b099-803a81f0d35d\") " pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.343048 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klwxg\" (UniqueName: \"kubernetes.io/projected/354e53f6-1fa0-4b98-8a19-741caf032b5a-kube-api-access-klwxg\") pod \"glance-operator-controller-manager-f8f7f758d-r5lmz\" (UID: \"354e53f6-1fa0-4b98-8a19-741caf032b5a\") " pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.343078 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqfz2\" (UniqueName: \"kubernetes.io/projected/4ad6253a-a3ac-4428-a1e0-4059ca5b02f5-kube-api-access-xqfz2\") pod \"cinder-operator-controller-manager-5666895465-zvh67\" (UID: \"4ad6253a-a3ac-4428-a1e0-4059ca5b02f5\") " pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.343117 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbxwf\" (UniqueName: \"kubernetes.io/projected/cba9b971-9846-434b-be0f-c49675291fc6-kube-api-access-bbxwf\") pod \"barbican-operator-controller-manager-85887bf46b-ddjrz\" (UID: \"cba9b971-9846-434b-be0f-c49675291fc6\") " pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.343289 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.349342 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-wlcp7" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.353821 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.353972 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ce6883bb-9cc0-494d-acc0-28ab4cb8ba66" podNamespace="openstack-operators" podName="horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.354730 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.358626 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-lctqt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.361005 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.369858 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.395158 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.395291 3552 topology_manager.go:215] "Topology Admit Handler" podUID="85fc9bb8-3e61-4a3f-bc9a-54c327b0b278" podNamespace="openstack-operators" podName="infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.396015 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.400493 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qwgnk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.400673 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.409338 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.423476 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.423614 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4e980880-d1a1-40dc-9a05-394a72cfb983" podNamespace="openstack-operators" podName="ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.424290 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.428919 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-7pb7b" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445154 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq6fz\" (UniqueName: \"kubernetes.io/projected/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-kube-api-access-gq6fz\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445229 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fj52s\" (UniqueName: \"kubernetes.io/projected/4ecd96e3-ccef-44d8-b099-803a81f0d35d-kube-api-access-fj52s\") pod \"designate-operator-controller-manager-674dd8bdd9-pvjxb\" (UID: \"4ecd96e3-ccef-44d8-b099-803a81f0d35d\") " pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445263 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9l9g\" (UniqueName: \"kubernetes.io/projected/ce6883bb-9cc0-494d-acc0-28ab4cb8ba66-kube-api-access-h9l9g\") pod \"horizon-operator-controller-manager-6f77bd5775-9s4lh\" (UID: \"ce6883bb-9cc0-494d-acc0-28ab4cb8ba66\") " pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445297 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-klwxg\" (UniqueName: \"kubernetes.io/projected/354e53f6-1fa0-4b98-8a19-741caf032b5a-kube-api-access-klwxg\") pod \"glance-operator-controller-manager-f8f7f758d-r5lmz\" (UID: \"354e53f6-1fa0-4b98-8a19-741caf032b5a\") " pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445325 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hgtq\" (UniqueName: \"kubernetes.io/projected/4e980880-d1a1-40dc-9a05-394a72cfb983-kube-api-access-4hgtq\") pod \"ironic-operator-controller-manager-dcb9f85b6-xjgbk\" (UID: \"4e980880-d1a1-40dc-9a05-394a72cfb983\") " pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445361 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xqfz2\" (UniqueName: \"kubernetes.io/projected/4ad6253a-a3ac-4428-a1e0-4059ca5b02f5-kube-api-access-xqfz2\") pod \"cinder-operator-controller-manager-5666895465-zvh67\" (UID: \"4ad6253a-a3ac-4428-a1e0-4059ca5b02f5\") " pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445424 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klgvg\" (UniqueName: \"kubernetes.io/projected/105486bf-4db8-4e8b-b7e6-b36f272e8042-kube-api-access-klgvg\") pod \"heat-operator-controller-manager-5dfddf8d94-vwkxd\" (UID: \"105486bf-4db8-4e8b-b7e6-b36f272e8042\") " pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445465 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.445502 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bbxwf\" (UniqueName: \"kubernetes.io/projected/cba9b971-9846-434b-be0f-c49675291fc6-kube-api-access-bbxwf\") pod \"barbican-operator-controller-manager-85887bf46b-ddjrz\" (UID: \"cba9b971-9846-434b-be0f-c49675291fc6\") " pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.478596 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.478695 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c64483e3-8ac2-4242-9a5e-85839f40cb42" podNamespace="openstack-operators" podName="keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.480724 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqfz2\" (UniqueName: \"kubernetes.io/projected/4ad6253a-a3ac-4428-a1e0-4059ca5b02f5-kube-api-access-xqfz2\") pod \"cinder-operator-controller-manager-5666895465-zvh67\" (UID: \"4ad6253a-a3ac-4428-a1e0-4059ca5b02f5\") " pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.480749 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj52s\" (UniqueName: \"kubernetes.io/projected/4ecd96e3-ccef-44d8-b099-803a81f0d35d-kube-api-access-fj52s\") pod \"designate-operator-controller-manager-674dd8bdd9-pvjxb\" (UID: \"4ecd96e3-ccef-44d8-b099-803a81f0d35d\") " pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.482052 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.483172 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbxwf\" (UniqueName: \"kubernetes.io/projected/cba9b971-9846-434b-be0f-c49675291fc6-kube-api-access-bbxwf\") pod \"barbican-operator-controller-manager-85887bf46b-ddjrz\" (UID: \"cba9b971-9846-434b-be0f-c49675291fc6\") " pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.487543 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-lvl5p" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.494282 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-klwxg\" (UniqueName: \"kubernetes.io/projected/354e53f6-1fa0-4b98-8a19-741caf032b5a-kube-api-access-klwxg\") pod \"glance-operator-controller-manager-f8f7f758d-r5lmz\" (UID: \"354e53f6-1fa0-4b98-8a19-741caf032b5a\") " pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.511583 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.671099 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.672047 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.672611 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-klgvg\" (UniqueName: \"kubernetes.io/projected/105486bf-4db8-4e8b-b7e6-b36f272e8042-kube-api-access-klgvg\") pod \"heat-operator-controller-manager-5dfddf8d94-vwkxd\" (UID: \"105486bf-4db8-4e8b-b7e6-b36f272e8042\") " pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.672717 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.672815 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gq6fz\" (UniqueName: \"kubernetes.io/projected/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-kube-api-access-gq6fz\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.672895 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-h9l9g\" (UniqueName: \"kubernetes.io/projected/ce6883bb-9cc0-494d-acc0-28ab4cb8ba66-kube-api-access-h9l9g\") pod \"horizon-operator-controller-manager-6f77bd5775-9s4lh\" (UID: \"ce6883bb-9cc0-494d-acc0-28ab4cb8ba66\") " pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.672941 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4hgtq\" (UniqueName: \"kubernetes.io/projected/4e980880-d1a1-40dc-9a05-394a72cfb983-kube-api-access-4hgtq\") pod \"ironic-operator-controller-manager-dcb9f85b6-xjgbk\" (UID: \"4e980880-d1a1-40dc-9a05-394a72cfb983\") " pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:13 crc kubenswrapper[3552]: E0320 15:44:13.673373 3552 secret.go:194] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.692658 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.707968 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:13 crc kubenswrapper[3552]: E0320 15:44:13.708134 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert podName:85fc9bb8-3e61-4a3f-bc9a-54c327b0b278 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:14.20810882 +0000 UTC m=+1153.901805650 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert") pod "infra-operator-controller-manager-b68b4cfdf-h8ljt" (UID: "85fc9bb8-3e61-4a3f-bc9a-54c327b0b278") : secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.707965 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.708380 3552 topology_manager.go:215] "Topology Admit Handler" podUID="951c8176-4fe8-41d9-8bea-c6bf299dec7c" podNamespace="openstack-operators" podName="manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.709237 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.721796 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.724317 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-h76lp" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.726539 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hgtq\" (UniqueName: \"kubernetes.io/projected/4e980880-d1a1-40dc-9a05-394a72cfb983-kube-api-access-4hgtq\") pod \"ironic-operator-controller-manager-dcb9f85b6-xjgbk\" (UID: \"4e980880-d1a1-40dc-9a05-394a72cfb983\") " pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.747658 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9l9g\" (UniqueName: \"kubernetes.io/projected/ce6883bb-9cc0-494d-acc0-28ab4cb8ba66-kube-api-access-h9l9g\") pod \"horizon-operator-controller-manager-6f77bd5775-9s4lh\" (UID: \"ce6883bb-9cc0-494d-acc0-28ab4cb8ba66\") " pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.748451 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.766812 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.766958 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a9eee008-53da-425f-bb6e-7a39e6b07754" podNamespace="openstack-operators" podName="neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.767744 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.773676 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-jd2fj" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.778681 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfrlf\" (UniqueName: \"kubernetes.io/projected/c64483e3-8ac2-4242-9a5e-85839f40cb42-kube-api-access-tfrlf\") pod \"keystone-operator-controller-manager-854bd9766b-t7rzd\" (UID: \"c64483e3-8ac2-4242-9a5e-85839f40cb42\") " pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.779938 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.789487 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.789611 3552 topology_manager.go:215] "Topology Admit Handler" podUID="f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1" podNamespace="openstack-operators" podName="mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.790386 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.801889 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-xk98h" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.802067 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.802092 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.807989 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.808131 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e288b597-0ccf-4abc-9f7a-634d63242553" podNamespace="openstack-operators" podName="nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.809006 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.813517 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-brkdw" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.814227 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.828068 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.828215 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b808d486-c6e9-4167-92f4-7c854ead72f7" podNamespace="openstack-operators" podName="octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.829103 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.833798 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.833927 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6620147f-34ac-4892-9c3c-7886a2bd6558" podNamespace="openstack-operators" podName="openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.833958 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-wfn6r" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.841920 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.845203 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.845458 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-d5fmn" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.848414 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.848504 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b1a25fa6-822d-4376-8721-bd7802437838" podNamespace="openstack-operators" podName="ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.849524 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.852163 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-v4zgh" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.857316 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.857437 3552 topology_manager.go:215] "Topology Admit Handler" podUID="71432b9e-c028-469c-904e-9a74b0ffa5ec" podNamespace="openstack-operators" podName="placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.858091 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.877326 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.877525 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9ad61c28-5162-4ceb-b703-79e0f4d20a43" podNamespace="openstack-operators" podName="swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.878628 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j"] Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.878714 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b672dc41-4a65-47db-aeeb-d9858305445e" podNamespace="openstack-operators" podName="telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.879445 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.879905 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.880759 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tfrlf\" (UniqueName: \"kubernetes.io/projected/c64483e3-8ac2-4242-9a5e-85839f40cb42-kube-api-access-tfrlf\") pod \"keystone-operator-controller-manager-854bd9766b-t7rzd\" (UID: \"c64483e3-8ac2-4242-9a5e-85839f40cb42\") " pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.880807 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfjq5\" (UniqueName: \"kubernetes.io/projected/f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1-kube-api-access-cfjq5\") pod \"mariadb-operator-controller-manager-6ff8887465-j5znl\" (UID: \"f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1\") " pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.880859 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jls5w\" (UniqueName: \"kubernetes.io/projected/a9eee008-53da-425f-bb6e-7a39e6b07754-kube-api-access-jls5w\") pod \"neutron-operator-controller-manager-c9c4d5ccf-zqzbw\" (UID: \"a9eee008-53da-425f-bb6e-7a39e6b07754\") " pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.880921 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztkkn\" (UniqueName: \"kubernetes.io/projected/951c8176-4fe8-41d9-8bea-c6bf299dec7c-kube-api-access-ztkkn\") pod \"manila-operator-controller-manager-75d675bfc4-mlclf\" (UID: \"951c8176-4fe8-41d9-8bea-c6bf299dec7c\") " pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.881004 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh7m8\" (UniqueName: \"kubernetes.io/projected/e288b597-0ccf-4abc-9f7a-634d63242553-kube-api-access-sh7m8\") pod \"nova-operator-controller-manager-5475cc9b67-clc46\" (UID: \"e288b597-0ccf-4abc-9f7a-634d63242553\") " pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.900725 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-qd947" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.901085 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-4ls79" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.901244 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-zbgmj" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982044 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982114 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q5x9\" (UniqueName: \"kubernetes.io/projected/b672dc41-4a65-47db-aeeb-d9858305445e-kube-api-access-2q5x9\") pod \"telemetry-operator-controller-manager-c9f654fc4-lxs7j\" (UID: \"b672dc41-4a65-47db-aeeb-d9858305445e\") " pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982141 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-jls5w\" (UniqueName: \"kubernetes.io/projected/a9eee008-53da-425f-bb6e-7a39e6b07754-kube-api-access-jls5w\") pod \"neutron-operator-controller-manager-c9c4d5ccf-zqzbw\" (UID: \"a9eee008-53da-425f-bb6e-7a39e6b07754\") " pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982177 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxsks\" (UniqueName: \"kubernetes.io/projected/b1a25fa6-822d-4376-8721-bd7802437838-kube-api-access-vxsks\") pod \"ovn-operator-controller-manager-6dd5cfd685-sv9dk\" (UID: \"b1a25fa6-822d-4376-8721-bd7802437838\") " pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982205 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbttp\" (UniqueName: \"kubernetes.io/projected/b808d486-c6e9-4167-92f4-7c854ead72f7-kube-api-access-zbttp\") pod \"octavia-operator-controller-manager-7598758bfb-fd8f7\" (UID: \"b808d486-c6e9-4167-92f4-7c854ead72f7\") " pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982237 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6wb8\" (UniqueName: \"kubernetes.io/projected/9ad61c28-5162-4ceb-b703-79e0f4d20a43-kube-api-access-n6wb8\") pod \"swift-operator-controller-manager-5fccf6868d-8zrxq\" (UID: \"9ad61c28-5162-4ceb-b703-79e0f4d20a43\") " pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982262 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk6ds\" (UniqueName: \"kubernetes.io/projected/71432b9e-c028-469c-904e-9a74b0ffa5ec-kube-api-access-hk6ds\") pod \"placement-operator-controller-manager-666c4df8d6-k4pgg\" (UID: \"71432b9e-c028-469c-904e-9a74b0ffa5ec\") " pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982290 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ztkkn\" (UniqueName: \"kubernetes.io/projected/951c8176-4fe8-41d9-8bea-c6bf299dec7c-kube-api-access-ztkkn\") pod \"manila-operator-controller-manager-75d675bfc4-mlclf\" (UID: \"951c8176-4fe8-41d9-8bea-c6bf299dec7c\") " pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982317 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6mxx\" (UniqueName: \"kubernetes.io/projected/6620147f-34ac-4892-9c3c-7886a2bd6558-kube-api-access-m6mxx\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sh7m8\" (UniqueName: \"kubernetes.io/projected/e288b597-0ccf-4abc-9f7a-634d63242553-kube-api-access-sh7m8\") pod \"nova-operator-controller-manager-5475cc9b67-clc46\" (UID: \"e288b597-0ccf-4abc-9f7a-634d63242553\") " pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.982454 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-cfjq5\" (UniqueName: \"kubernetes.io/projected/f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1-kube-api-access-cfjq5\") pod \"mariadb-operator-controller-manager-6ff8887465-j5znl\" (UID: \"f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1\") " pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:13 crc kubenswrapper[3552]: I0320 15:44:13.994592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.009559 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfjq5\" (UniqueName: \"kubernetes.io/projected/f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1-kube-api-access-cfjq5\") pod \"mariadb-operator-controller-manager-6ff8887465-j5znl\" (UID: \"f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1\") " pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.011300 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-klgvg\" (UniqueName: \"kubernetes.io/projected/105486bf-4db8-4e8b-b7e6-b36f272e8042-kube-api-access-klgvg\") pod \"heat-operator-controller-manager-5dfddf8d94-vwkxd\" (UID: \"105486bf-4db8-4e8b-b7e6-b36f272e8042\") " pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.016715 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq6fz\" (UniqueName: \"kubernetes.io/projected/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-kube-api-access-gq6fz\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.034917 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-jls5w\" (UniqueName: \"kubernetes.io/projected/a9eee008-53da-425f-bb6e-7a39e6b07754-kube-api-access-jls5w\") pod \"neutron-operator-controller-manager-c9c4d5ccf-zqzbw\" (UID: \"a9eee008-53da-425f-bb6e-7a39e6b07754\") " pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.042590 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.042916 3552 topology_manager.go:215] "Topology Admit Handler" podUID="42323066-562e-46fc-8616-3d244ae35b2d" podNamespace="openstack-operators" podName="test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.043713 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.046298 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.046430 3552 topology_manager.go:215] "Topology Admit Handler" podUID="f4aee2da-fb97-4e64-a4d1-223cf14816e8" podNamespace="openstack-operators" podName="watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.047068 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.049248 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztkkn\" (UniqueName: \"kubernetes.io/projected/951c8176-4fe8-41d9-8bea-c6bf299dec7c-kube-api-access-ztkkn\") pod \"manila-operator-controller-manager-75d675bfc4-mlclf\" (UID: \"951c8176-4fe8-41d9-8bea-c6bf299dec7c\") " pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.051066 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfrlf\" (UniqueName: \"kubernetes.io/projected/c64483e3-8ac2-4242-9a5e-85839f40cb42-kube-api-access-tfrlf\") pod \"keystone-operator-controller-manager-854bd9766b-t7rzd\" (UID: \"c64483e3-8ac2-4242-9a5e-85839f40cb42\") " pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.054395 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-4k7b7" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.054665 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-vzwgg" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.057555 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh7m8\" (UniqueName: \"kubernetes.io/projected/e288b597-0ccf-4abc-9f7a-634d63242553-kube-api-access-sh7m8\") pod \"nova-operator-controller-manager-5475cc9b67-clc46\" (UID: \"e288b597-0ccf-4abc-9f7a-634d63242553\") " pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.066219 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.082979 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.084379 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-m6mxx\" (UniqueName: \"kubernetes.io/projected/6620147f-34ac-4892-9c3c-7886a2bd6558-kube-api-access-m6mxx\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.087295 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.087360 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2q5x9\" (UniqueName: \"kubernetes.io/projected/b672dc41-4a65-47db-aeeb-d9858305445e-kube-api-access-2q5x9\") pod \"telemetry-operator-controller-manager-c9f654fc4-lxs7j\" (UID: \"b672dc41-4a65-47db-aeeb-d9858305445e\") " pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.088139 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vxsks\" (UniqueName: \"kubernetes.io/projected/b1a25fa6-822d-4376-8721-bd7802437838-kube-api-access-vxsks\") pod \"ovn-operator-controller-manager-6dd5cfd685-sv9dk\" (UID: \"b1a25fa6-822d-4376-8721-bd7802437838\") " pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.088199 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zbttp\" (UniqueName: \"kubernetes.io/projected/b808d486-c6e9-4167-92f4-7c854ead72f7-kube-api-access-zbttp\") pod \"octavia-operator-controller-manager-7598758bfb-fd8f7\" (UID: \"b808d486-c6e9-4167-92f4-7c854ead72f7\") " pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.088287 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n6wb8\" (UniqueName: \"kubernetes.io/projected/9ad61c28-5162-4ceb-b703-79e0f4d20a43-kube-api-access-n6wb8\") pod \"swift-operator-controller-manager-5fccf6868d-8zrxq\" (UID: \"9ad61c28-5162-4ceb-b703-79e0f4d20a43\") " pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.088328 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hk6ds\" (UniqueName: \"kubernetes.io/projected/71432b9e-c028-469c-904e-9a74b0ffa5ec-kube-api-access-hk6ds\") pod \"placement-operator-controller-manager-666c4df8d6-k4pgg\" (UID: \"71432b9e-c028-469c-904e-9a74b0ffa5ec\") " pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.084545 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq"] Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.088985 3552 secret.go:194] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.089030 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert podName:6620147f-34ac-4892-9c3c-7886a2bd6558 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:14.589014109 +0000 UTC m=+1154.282710939 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert") pod "openstack-baremetal-operator-controller-manager-6557559db7m9cdd" (UID: "6620147f-34ac-4892-9c3c-7886a2bd6558") : secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.100725 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.102170 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.115592 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk6ds\" (UniqueName: \"kubernetes.io/projected/71432b9e-c028-469c-904e-9a74b0ffa5ec-kube-api-access-hk6ds\") pod \"placement-operator-controller-manager-666c4df8d6-k4pgg\" (UID: \"71432b9e-c028-469c-904e-9a74b0ffa5ec\") " pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.119326 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6mxx\" (UniqueName: \"kubernetes.io/projected/6620147f-34ac-4892-9c3c-7886a2bd6558-kube-api-access-m6mxx\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.122820 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6wb8\" (UniqueName: \"kubernetes.io/projected/9ad61c28-5162-4ceb-b703-79e0f4d20a43-kube-api-access-n6wb8\") pod \"swift-operator-controller-manager-5fccf6868d-8zrxq\" (UID: \"9ad61c28-5162-4ceb-b703-79e0f4d20a43\") " pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.123331 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q5x9\" (UniqueName: \"kubernetes.io/projected/b672dc41-4a65-47db-aeeb-d9858305445e-kube-api-access-2q5x9\") pod \"telemetry-operator-controller-manager-c9f654fc4-lxs7j\" (UID: \"b672dc41-4a65-47db-aeeb-d9858305445e\") " pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.126242 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.127940 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxsks\" (UniqueName: \"kubernetes.io/projected/b1a25fa6-822d-4376-8721-bd7802437838-kube-api-access-vxsks\") pod \"ovn-operator-controller-manager-6dd5cfd685-sv9dk\" (UID: \"b1a25fa6-822d-4376-8721-bd7802437838\") " pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.132591 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbttp\" (UniqueName: \"kubernetes.io/projected/b808d486-c6e9-4167-92f4-7c854ead72f7-kube-api-access-zbttp\") pod \"octavia-operator-controller-manager-7598758bfb-fd8f7\" (UID: \"b808d486-c6e9-4167-92f4-7c854ead72f7\") " pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.138131 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.148780 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.151745 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.157168 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.184210 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.194516 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.195655 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg94n\" (UniqueName: \"kubernetes.io/projected/42323066-562e-46fc-8616-3d244ae35b2d-kube-api-access-hg94n\") pod \"test-operator-controller-manager-665dbccdfd-t2b8h\" (UID: \"42323066-562e-46fc-8616-3d244ae35b2d\") " pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.196084 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj6nz\" (UniqueName: \"kubernetes.io/projected/f4aee2da-fb97-4e64-a4d1-223cf14816e8-kube-api-access-sj6nz\") pod \"watcher-operator-controller-manager-6dc7cb7d75-5cl45\" (UID: \"f4aee2da-fb97-4e64-a4d1-223cf14816e8\") " pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.196261 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.205822 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.231550 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.266169 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.280109 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.297137 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.297194 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sj6nz\" (UniqueName: \"kubernetes.io/projected/f4aee2da-fb97-4e64-a4d1-223cf14816e8-kube-api-access-sj6nz\") pod \"watcher-operator-controller-manager-6dc7cb7d75-5cl45\" (UID: \"f4aee2da-fb97-4e64-a4d1-223cf14816e8\") " pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.297227 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hg94n\" (UniqueName: \"kubernetes.io/projected/42323066-562e-46fc-8616-3d244ae35b2d-kube-api-access-hg94n\") pod \"test-operator-controller-manager-665dbccdfd-t2b8h\" (UID: \"42323066-562e-46fc-8616-3d244ae35b2d\") " pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.298808 3552 secret.go:194] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.298844 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert podName:85fc9bb8-3e61-4a3f-bc9a-54c327b0b278 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:15.298832442 +0000 UTC m=+1154.992529272 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert") pod "infra-operator-controller-manager-b68b4cfdf-h8ljt" (UID: "85fc9bb8-3e61-4a3f-bc9a-54c327b0b278") : secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.299204 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.299305 3552 topology_manager.go:215] "Topology Admit Handler" podUID="145a49ae-4a3b-4096-8fdb-974a93c8194a" podNamespace="openstack-operators" podName="openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.302749 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.304850 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.305478 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.310152 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.349294 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-vcwqs" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.370204 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.378205 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj6nz\" (UniqueName: \"kubernetes.io/projected/f4aee2da-fb97-4e64-a4d1-223cf14816e8-kube-api-access-sj6nz\") pod \"watcher-operator-controller-manager-6dc7cb7d75-5cl45\" (UID: \"f4aee2da-fb97-4e64-a4d1-223cf14816e8\") " pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.380450 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.380608 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b9502382-d33a-43df-bbfa-f4462e6c426a" podNamespace="openstack-operators" podName="rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.381379 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.382977 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-stdvj" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.383745 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg94n\" (UniqueName: \"kubernetes.io/projected/42323066-562e-46fc-8616-3d244ae35b2d-kube-api-access-hg94n\") pod \"test-operator-controller-manager-665dbccdfd-t2b8h\" (UID: \"42323066-562e-46fc-8616-3d244ae35b2d\") " pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.386385 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.388704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.400721 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.400783 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.400814 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f78wj\" (UniqueName: \"kubernetes.io/projected/145a49ae-4a3b-4096-8fdb-974a93c8194a-kube-api-access-f78wj\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.442734 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.504370 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.504437 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.504469 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f78wj\" (UniqueName: \"kubernetes.io/projected/145a49ae-4a3b-4096-8fdb-974a93c8194a-kube-api-access-f78wj\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.504508 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmqwm\" (UniqueName: \"kubernetes.io/projected/b9502382-d33a-43df-bbfa-f4462e6c426a-kube-api-access-lmqwm\") pod \"rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf\" (UID: \"b9502382-d33a-43df-bbfa-f4462e6c426a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.504993 3552 secret.go:194] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.505032 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:15.005018867 +0000 UTC m=+1154.698715697 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.505242 3552 secret.go:194] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.505276 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:15.005268664 +0000 UTC m=+1154.698965494 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "metrics-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.605897 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.606232 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lmqwm\" (UniqueName: \"kubernetes.io/projected/b9502382-d33a-43df-bbfa-f4462e6c426a-kube-api-access-lmqwm\") pod \"rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf\" (UID: \"b9502382-d33a-43df-bbfa-f4462e6c426a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.606580 3552 secret.go:194] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: E0320 15:44:14.606620 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert podName:6620147f-34ac-4892-9c3c-7886a2bd6558 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:15.606606754 +0000 UTC m=+1155.300303584 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert") pod "openstack-baremetal-operator-controller-manager-6557559db7m9cdd" (UID: "6620147f-34ac-4892-9c3c-7886a2bd6558") : secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.617628 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.636661 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f78wj\" (UniqueName: \"kubernetes.io/projected/145a49ae-4a3b-4096-8fdb-974a93c8194a-kube-api-access-f78wj\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.647081 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmqwm\" (UniqueName: \"kubernetes.io/projected/b9502382-d33a-43df-bbfa-f4462e6c426a-kube-api-access-lmqwm\") pod \"rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf\" (UID: \"b9502382-d33a-43df-bbfa-f4462e6c426a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.698678 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb"] Mar 20 15:44:14 crc kubenswrapper[3552]: I0320 15:44:14.730582 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" Mar 20 15:44:14 crc kubenswrapper[3552]: W0320 15:44:14.743092 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ecd96e3_ccef_44d8_b099_803a81f0d35d.slice/crio-327d050059c5fa48ea8efb80a7f8183226ca9ea0adc6a446089b6851e75bccc7 WatchSource:0}: Error finding container 327d050059c5fa48ea8efb80a7f8183226ca9ea0adc6a446089b6851e75bccc7: Status 404 returned error can't find the container with id 327d050059c5fa48ea8efb80a7f8183226ca9ea0adc6a446089b6851e75bccc7 Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.018286 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.018585 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.018717 3552 secret.go:194] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.019000 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:16.018985476 +0000 UTC m=+1155.712682306 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "metrics-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.019098 3552 secret.go:194] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.019122 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:16.019116049 +0000 UTC m=+1155.712812879 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "webhook-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.081523 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5666895465-zvh67"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.118755 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" event={"ID":"4ad6253a-a3ac-4428-a1e0-4059ca5b02f5","Type":"ContainerStarted","Data":"dd96b2a2aef8abeb57210a14d6a085ad1b82f15a78b2cd31d791942e8683c95f"} Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.120632 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" event={"ID":"4ecd96e3-ccef-44d8-b099-803a81f0d35d","Type":"ContainerStarted","Data":"327d050059c5fa48ea8efb80a7f8183226ca9ea0adc6a446089b6851e75bccc7"} Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.321871 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.322068 3552 secret.go:194] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.322136 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert podName:85fc9bb8-3e61-4a3f-bc9a-54c327b0b278 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:17.322114085 +0000 UTC m=+1157.015810935 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert") pod "infra-operator-controller-manager-b68b4cfdf-h8ljt" (UID: "85fc9bb8-3e61-4a3f-bc9a-54c327b0b278") : secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.572658 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.584503 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.598729 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.621599 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg"] Mar 20 15:44:15 crc kubenswrapper[3552]: W0320 15:44:15.622232 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71432b9e_c028_469c_904e_9a74b0ffa5ec.slice/crio-a7c156b7689f4a7cafc1e293bdbdff2904b46c958a7b519227f8fa20d24f7c3a WatchSource:0}: Error finding container a7c156b7689f4a7cafc1e293bdbdff2904b46c958a7b519227f8fa20d24f7c3a: Status 404 returned error can't find the container with id a7c156b7689f4a7cafc1e293bdbdff2904b46c958a7b519227f8fa20d24f7c3a Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.627251 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.628584 3552 secret.go:194] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: E0320 15:44:15.629468 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert podName:6620147f-34ac-4892-9c3c-7886a2bd6558 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:17.629451656 +0000 UTC m=+1157.323148486 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert") pod "openstack-baremetal-operator-controller-manager-6557559db7m9cdd" (UID: "6620147f-34ac-4892-9c3c-7886a2bd6558") : secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.635662 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz"] Mar 20 15:44:15 crc kubenswrapper[3552]: W0320 15:44:15.637534 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcba9b971_9846_434b_be0f_c49675291fc6.slice/crio-8450f99d2fe7b63e8c94a7a21542dbffbd0cadb844db74b608bc1b0befe5512d WatchSource:0}: Error finding container 8450f99d2fe7b63e8c94a7a21542dbffbd0cadb844db74b608bc1b0befe5512d: Status 404 returned error can't find the container with id 8450f99d2fe7b63e8c94a7a21542dbffbd0cadb844db74b608bc1b0befe5512d Mar 20 15:44:15 crc kubenswrapper[3552]: W0320 15:44:15.641839 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod951c8176_4fe8_41d9_8bea_c6bf299dec7c.slice/crio-385ba765d22108afc8864a82a8d76d0526017293b4f49b52620fa3b87ea7f1de WatchSource:0}: Error finding container 385ba765d22108afc8864a82a8d76d0526017293b4f49b52620fa3b87ea7f1de: Status 404 returned error can't find the container with id 385ba765d22108afc8864a82a8d76d0526017293b4f49b52620fa3b87ea7f1de Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.642325 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd"] Mar 20 15:44:15 crc kubenswrapper[3552]: W0320 15:44:15.645224 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod105486bf_4db8_4e8b_b7e6_b36f272e8042.slice/crio-828689ada72963a88370e720e8a279366f5535c73f07ff62f3509395ba3ed586 WatchSource:0}: Error finding container 828689ada72963a88370e720e8a279366f5535c73f07ff62f3509395ba3ed586: Status 404 returned error can't find the container with id 828689ada72963a88370e720e8a279366f5535c73f07ff62f3509395ba3ed586 Mar 20 15:44:15 crc kubenswrapper[3552]: W0320 15:44:15.646170 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5ec1a5f_b0ee_4476_9347_d5e3244b1ea1.slice/crio-0377b016209985b2f652aab072a21f91814a4ef38232fd3c46e0360c931fce00 WatchSource:0}: Error finding container 0377b016209985b2f652aab072a21f91814a4ef38232fd3c46e0360c931fce00: Status 404 returned error can't find the container with id 0377b016209985b2f652aab072a21f91814a4ef38232fd3c46e0360c931fce00 Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.648431 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.657437 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.661149 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl"] Mar 20 15:44:15 crc kubenswrapper[3552]: I0320 15:44:15.991278 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j"] Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.037145 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.037216 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.037366 3552 secret.go:194] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.037434 3552 secret.go:194] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.037472 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:18.0374539 +0000 UTC m=+1157.731150730 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "webhook-server-cert" not found Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.037498 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:18.037479941 +0000 UTC m=+1157.731176771 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "metrics-server-cert" not found Mar 20 15:44:16 crc kubenswrapper[3552]: W0320 15:44:16.037602 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42323066_562e_46fc_8616_3d244ae35b2d.slice/crio-6f99aa15c5df5511d8859e56720e049e5851b7395722f1ba6892918fdc5775c7 WatchSource:0}: Error finding container 6f99aa15c5df5511d8859e56720e049e5851b7395722f1ba6892918fdc5775c7: Status 404 returned error can't find the container with id 6f99aa15c5df5511d8859e56720e049e5851b7395722f1ba6892918fdc5775c7 Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.054342 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45"] Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.060124 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq"] Mar 20 15:44:16 crc kubenswrapper[3552]: W0320 15:44:16.063327 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb808d486_c6e9_4167_92f4_7c854ead72f7.slice/crio-1631d8f95f15ab32bb078902bb60f572439dace4a1c45bcf877c3596e65ab5a7 WatchSource:0}: Error finding container 1631d8f95f15ab32bb078902bb60f572439dace4a1c45bcf877c3596e65ab5a7: Status 404 returned error can't find the container with id 1631d8f95f15ab32bb078902bb60f572439dace4a1c45bcf877c3596e65ab5a7 Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.065815 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw"] Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.071130 3552 kuberuntime_manager.go:1262] container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:ec36a9083657587022f8471c9d5a71b87a7895398496e7fc546c73aa1eae4b56,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tfrlf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-854bd9766b-t7rzd_openstack-operators(c64483e3-8ac2-4242-9a5e-85839f40cb42): ErrImagePull: pull QPS exceeded Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.071146 3552 kuberuntime_manager.go:1262] container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:425fd66675becbe0ca2b2fe1a5a6694ac6e0b1cdce9a77a7a37f99785eadc74a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zbttp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7598758bfb-fd8f7_openstack-operators(b808d486-c6e9-4167-92f4-7c854ead72f7): ErrImagePull: pull QPS exceeded Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.071180 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" podUID="c64483e3-8ac2-4242-9a5e-85839f40cb42" Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.071253 3552 kuberuntime_manager.go:1262] container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:bef93f71d3b42a72d8b96c69bdb4db4b8bd797c5093a0a719443d7a5c9aaab55,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vxsks,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-6dd5cfd685-sv9dk_openstack-operators(b1a25fa6-822d-4376-8721-bd7802437838): ErrImagePull: pull QPS exceeded Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.072096 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd"] Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.072223 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" podUID="b1a25fa6-822d-4376-8721-bd7802437838" Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.072248 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" podUID="b808d486-c6e9-4167-92f4-7c854ead72f7" Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.072724 3552 kuberuntime_manager.go:1262] container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:866844c5b88e1e0518ceb7490cac9d093da3fb8b2f27ba7bd9bd89f946b9ee6e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n6wb8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5fccf6868d-8zrxq_openstack-operators(9ad61c28-5162-4ceb-b703-79e0f4d20a43): ErrImagePull: pull QPS exceeded Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.072772 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" podUID="9ad61c28-5162-4ceb-b703-79e0f4d20a43" Mar 20 15:44:16 crc kubenswrapper[3552]: W0320 15:44:16.082489 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9502382_d33a_43df_bbfa_f4462e6c426a.slice/crio-da9cbd179507cb0eaf01c2c54e1caf81db9ec5cee2ee94457ed1ae41e4ecd60b WatchSource:0}: Error finding container da9cbd179507cb0eaf01c2c54e1caf81db9ec5cee2ee94457ed1ae41e4ecd60b: Status 404 returned error can't find the container with id da9cbd179507cb0eaf01c2c54e1caf81db9ec5cee2ee94457ed1ae41e4ecd60b Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.082616 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk"] Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.083738 3552 kuberuntime_manager.go:1262] container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:526f9d4965431e1a5e4f8c3224bcee3f636a3108a5e0767296a994c2a517404a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jls5w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-c9c4d5ccf-zqzbw_openstack-operators(a9eee008-53da-425f-bb6e-7a39e6b07754): ErrImagePull: pull QPS exceeded Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.083773 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" podUID="a9eee008-53da-425f-bb6e-7a39e6b07754" Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.088208 3552 kuberuntime_manager.go:1262] container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lmqwm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf_openstack-operators(b9502382-d33a-43df-bbfa-f4462e6c426a): ErrImagePull: pull QPS exceeded Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.088248 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" podUID="b9502382-d33a-43df-bbfa-f4462e6c426a" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.088642 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h"] Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.098753 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7"] Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.104851 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf"] Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.140042 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" event={"ID":"b1a25fa6-822d-4376-8721-bd7802437838","Type":"ContainerStarted","Data":"281024539b19359999677faa0beeb054ff774c38d40c754b0a0b8152d8bee460"} Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.142152 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:bef93f71d3b42a72d8b96c69bdb4db4b8bd797c5093a0a719443d7a5c9aaab55\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" podUID="b1a25fa6-822d-4376-8721-bd7802437838" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.143276 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" event={"ID":"ce6883bb-9cc0-494d-acc0-28ab4cb8ba66","Type":"ContainerStarted","Data":"83ba6974fe83dfda8a53dae55bde4a986e0418c9938e7c29445a80dd557d10b8"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.147698 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" event={"ID":"f4aee2da-fb97-4e64-a4d1-223cf14816e8","Type":"ContainerStarted","Data":"17a95b4ed5b589739ebc6f7d1cc59b1bbb5dae46859f54f6e6cbc88b36e58285"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.151505 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" event={"ID":"e288b597-0ccf-4abc-9f7a-634d63242553","Type":"ContainerStarted","Data":"d0397f30edd1c32e3824938a5c2bd733d1fc6b23f8b6c14eb157e6d347640874"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.160646 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" event={"ID":"f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1","Type":"ContainerStarted","Data":"0377b016209985b2f652aab072a21f91814a4ef38232fd3c46e0360c931fce00"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.167066 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" event={"ID":"cba9b971-9846-434b-be0f-c49675291fc6","Type":"ContainerStarted","Data":"8450f99d2fe7b63e8c94a7a21542dbffbd0cadb844db74b608bc1b0befe5512d"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.168023 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" event={"ID":"71432b9e-c028-469c-904e-9a74b0ffa5ec","Type":"ContainerStarted","Data":"a7c156b7689f4a7cafc1e293bdbdff2904b46c958a7b519227f8fa20d24f7c3a"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.170017 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" event={"ID":"951c8176-4fe8-41d9-8bea-c6bf299dec7c","Type":"ContainerStarted","Data":"385ba765d22108afc8864a82a8d76d0526017293b4f49b52620fa3b87ea7f1de"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.172662 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" event={"ID":"9ad61c28-5162-4ceb-b703-79e0f4d20a43","Type":"ContainerStarted","Data":"1ec68eb1f078cd63e003d551936a2ce52df310eb796895d6bb2e498dc76e7280"} Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.188271 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:866844c5b88e1e0518ceb7490cac9d093da3fb8b2f27ba7bd9bd89f946b9ee6e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" podUID="9ad61c28-5162-4ceb-b703-79e0f4d20a43" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.190796 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" event={"ID":"b808d486-c6e9-4167-92f4-7c854ead72f7","Type":"ContainerStarted","Data":"1631d8f95f15ab32bb078902bb60f572439dace4a1c45bcf877c3596e65ab5a7"} Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.192675 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:425fd66675becbe0ca2b2fe1a5a6694ac6e0b1cdce9a77a7a37f99785eadc74a\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" podUID="b808d486-c6e9-4167-92f4-7c854ead72f7" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.193893 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" event={"ID":"b9502382-d33a-43df-bbfa-f4462e6c426a","Type":"ContainerStarted","Data":"da9cbd179507cb0eaf01c2c54e1caf81db9ec5cee2ee94457ed1ae41e4ecd60b"} Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.195823 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" podUID="b9502382-d33a-43df-bbfa-f4462e6c426a" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.196444 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" event={"ID":"105486bf-4db8-4e8b-b7e6-b36f272e8042","Type":"ContainerStarted","Data":"828689ada72963a88370e720e8a279366f5535c73f07ff62f3509395ba3ed586"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.198390 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" event={"ID":"354e53f6-1fa0-4b98-8a19-741caf032b5a","Type":"ContainerStarted","Data":"9aaca0126ace94ea9abe0f93cee2223ee2b0c89aeacd013f0c292be34fc5f628"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.209740 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" event={"ID":"b672dc41-4a65-47db-aeeb-d9858305445e","Type":"ContainerStarted","Data":"1ed5e52e673939642ef7a14fa48b4fa9fced8a34d1d354ef0c54d5298b38505b"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.215879 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" event={"ID":"4e980880-d1a1-40dc-9a05-394a72cfb983","Type":"ContainerStarted","Data":"c08f39003a2f7ec701bb48cc8181f93e1f0decdd68a9168b84ab494fa7bcb41b"} Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.252367 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" event={"ID":"c64483e3-8ac2-4242-9a5e-85839f40cb42","Type":"ContainerStarted","Data":"f2b8f35cb6263a3448eb4a9db651b9fca3ba512f1d9686196d902692fb3fc1c0"} Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.254733 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:ec36a9083657587022f8471c9d5a71b87a7895398496e7fc546c73aa1eae4b56\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" podUID="c64483e3-8ac2-4242-9a5e-85839f40cb42" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.255473 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" event={"ID":"a9eee008-53da-425f-bb6e-7a39e6b07754","Type":"ContainerStarted","Data":"d711b4f89e96c2e010620b6f2a97725223dcae33cd8263d2a11c6e81dec5148a"} Mar 20 15:44:16 crc kubenswrapper[3552]: E0320 15:44:16.256674 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:526f9d4965431e1a5e4f8c3224bcee3f636a3108a5e0767296a994c2a517404a\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" podUID="a9eee008-53da-425f-bb6e-7a39e6b07754" Mar 20 15:44:16 crc kubenswrapper[3552]: I0320 15:44:16.260703 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" event={"ID":"42323066-562e-46fc-8616-3d244ae35b2d","Type":"ContainerStarted","Data":"6f99aa15c5df5511d8859e56720e049e5851b7395722f1ba6892918fdc5775c7"} Mar 20 15:44:17 crc kubenswrapper[3552]: I0320 15:44:17.362290 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.362727 3552 secret.go:194] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.362773 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert podName:85fc9bb8-3e61-4a3f-bc9a-54c327b0b278 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:21.362759793 +0000 UTC m=+1161.056456623 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert") pod "infra-operator-controller-manager-b68b4cfdf-h8ljt" (UID: "85fc9bb8-3e61-4a3f-bc9a-54c327b0b278") : secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:17 crc kubenswrapper[3552]: I0320 15:44:17.632350 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.632533 3552 secret.go:194] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.632579 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert podName:6620147f-34ac-4892-9c3c-7886a2bd6558 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:21.632564801 +0000 UTC m=+1161.326261631 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert") pod "openstack-baremetal-operator-controller-manager-6557559db7m9cdd" (UID: "6620147f-34ac-4892-9c3c-7886a2bd6558") : secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.935089 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" podUID="b9502382-d33a-43df-bbfa-f4462e6c426a" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.943755 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:ec36a9083657587022f8471c9d5a71b87a7895398496e7fc546c73aa1eae4b56\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" podUID="c64483e3-8ac2-4242-9a5e-85839f40cb42" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.943838 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:bef93f71d3b42a72d8b96c69bdb4db4b8bd797c5093a0a719443d7a5c9aaab55\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" podUID="b1a25fa6-822d-4376-8721-bd7802437838" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.943882 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:526f9d4965431e1a5e4f8c3224bcee3f636a3108a5e0767296a994c2a517404a\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" podUID="a9eee008-53da-425f-bb6e-7a39e6b07754" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.943921 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:866844c5b88e1e0518ceb7490cac9d093da3fb8b2f27ba7bd9bd89f946b9ee6e\\\"\"" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" podUID="9ad61c28-5162-4ceb-b703-79e0f4d20a43" Mar 20 15:44:17 crc kubenswrapper[3552]: E0320 15:44:17.944008 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:425fd66675becbe0ca2b2fe1a5a6694ac6e0b1cdce9a77a7a37f99785eadc74a\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" podUID="b808d486-c6e9-4167-92f4-7c854ead72f7" Mar 20 15:44:18 crc kubenswrapper[3552]: I0320 15:44:18.081942 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:18 crc kubenswrapper[3552]: I0320 15:44:18.082283 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:18 crc kubenswrapper[3552]: E0320 15:44:18.084392 3552 secret.go:194] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Mar 20 15:44:18 crc kubenswrapper[3552]: E0320 15:44:18.084648 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:22.084627153 +0000 UTC m=+1161.778323993 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "webhook-server-cert" not found Mar 20 15:44:18 crc kubenswrapper[3552]: E0320 15:44:18.085173 3552 secret.go:194] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Mar 20 15:44:18 crc kubenswrapper[3552]: E0320 15:44:18.085329 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:22.085316801 +0000 UTC m=+1161.779013631 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "metrics-server-cert" not found Mar 20 15:44:21 crc kubenswrapper[3552]: I0320 15:44:21.429137 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:21 crc kubenswrapper[3552]: E0320 15:44:21.429370 3552 secret.go:194] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:21 crc kubenswrapper[3552]: E0320 15:44:21.429813 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert podName:85fc9bb8-3e61-4a3f-bc9a-54c327b0b278 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:29.429792018 +0000 UTC m=+1169.123488848 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert") pod "infra-operator-controller-manager-b68b4cfdf-h8ljt" (UID: "85fc9bb8-3e61-4a3f-bc9a-54c327b0b278") : secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:21 crc kubenswrapper[3552]: I0320 15:44:21.733001 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:21 crc kubenswrapper[3552]: E0320 15:44:21.733214 3552 secret.go:194] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:21 crc kubenswrapper[3552]: E0320 15:44:21.733306 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert podName:6620147f-34ac-4892-9c3c-7886a2bd6558 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:29.733283425 +0000 UTC m=+1169.426980325 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert") pod "openstack-baremetal-operator-controller-manager-6557559db7m9cdd" (UID: "6620147f-34ac-4892-9c3c-7886a2bd6558") : secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:22 crc kubenswrapper[3552]: I0320 15:44:22.138454 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:22 crc kubenswrapper[3552]: I0320 15:44:22.138514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:22 crc kubenswrapper[3552]: E0320 15:44:22.138690 3552 secret.go:194] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Mar 20 15:44:22 crc kubenswrapper[3552]: E0320 15:44:22.138748 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:30.138730641 +0000 UTC m=+1169.832427471 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "metrics-server-cert" not found Mar 20 15:44:22 crc kubenswrapper[3552]: E0320 15:44:22.139134 3552 secret.go:194] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Mar 20 15:44:22 crc kubenswrapper[3552]: E0320 15:44:22.139174 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:30.139162743 +0000 UTC m=+1169.832859573 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "webhook-server-cert" not found Mar 20 15:44:29 crc kubenswrapper[3552]: I0320 15:44:29.429947 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:29 crc kubenswrapper[3552]: E0320 15:44:29.430259 3552 secret.go:194] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:29 crc kubenswrapper[3552]: E0320 15:44:29.430781 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert podName:85fc9bb8-3e61-4a3f-bc9a-54c327b0b278 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:45.430761216 +0000 UTC m=+1185.124458046 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert") pod "infra-operator-controller-manager-b68b4cfdf-h8ljt" (UID: "85fc9bb8-3e61-4a3f-bc9a-54c327b0b278") : secret "infra-operator-webhook-server-cert" not found Mar 20 15:44:29 crc kubenswrapper[3552]: I0320 15:44:29.734217 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:29 crc kubenswrapper[3552]: E0320 15:44:29.734431 3552 secret.go:194] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:29 crc kubenswrapper[3552]: E0320 15:44:29.734684 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert podName:6620147f-34ac-4892-9c3c-7886a2bd6558 nodeName:}" failed. No retries permitted until 2026-03-20 15:44:45.734664426 +0000 UTC m=+1185.428361256 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert") pod "openstack-baremetal-operator-controller-manager-6557559db7m9cdd" (UID: "6620147f-34ac-4892-9c3c-7886a2bd6558") : secret "openstack-baremetal-operator-webhook-server-cert" not found Mar 20 15:44:30 crc kubenswrapper[3552]: I0320 15:44:30.140231 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:30 crc kubenswrapper[3552]: I0320 15:44:30.140329 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:30 crc kubenswrapper[3552]: E0320 15:44:30.140394 3552 secret.go:194] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Mar 20 15:44:30 crc kubenswrapper[3552]: E0320 15:44:30.140534 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:46.140511112 +0000 UTC m=+1185.834208032 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "webhook-server-cert" not found Mar 20 15:44:30 crc kubenswrapper[3552]: E0320 15:44:30.140532 3552 secret.go:194] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Mar 20 15:44:30 crc kubenswrapper[3552]: E0320 15:44:30.140627 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs podName:145a49ae-4a3b-4096-8fdb-974a93c8194a nodeName:}" failed. No retries permitted until 2026-03-20 15:44:46.140608615 +0000 UTC m=+1185.834305445 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs") pod "openstack-operator-controller-manager-597d5dd64-rmm62" (UID: "145a49ae-4a3b-4096-8fdb-974a93c8194a") : secret "metrics-server-cert" not found Mar 20 15:44:33 crc kubenswrapper[3552]: I0320 15:44:33.065115 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" event={"ID":"951c8176-4fe8-41d9-8bea-c6bf299dec7c","Type":"ContainerStarted","Data":"5fcba3298de932742c3e942b08b6bbb3b6c0407d217b30367347dcdb3d780ab0"} Mar 20 15:44:33 crc kubenswrapper[3552]: I0320 15:44:33.065924 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:33 crc kubenswrapper[3552]: I0320 15:44:33.077739 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" event={"ID":"4ecd96e3-ccef-44d8-b099-803a81f0d35d","Type":"ContainerStarted","Data":"e2c1462ffd3d6d5258b6de280d8e64d16202d4e677156a9e49695e39a52d66ca"} Mar 20 15:44:33 crc kubenswrapper[3552]: I0320 15:44:33.078066 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:33 crc kubenswrapper[3552]: I0320 15:44:33.087240 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" podStartSLOduration=10.578753453000001 podStartE2EDuration="20.087188047s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.646523823 +0000 UTC m=+1155.340220653" lastFinishedPulling="2026-03-20 15:44:25.154958417 +0000 UTC m=+1164.848655247" observedRunningTime="2026-03-20 15:44:33.081509765 +0000 UTC m=+1172.775206605" watchObservedRunningTime="2026-03-20 15:44:33.087188047 +0000 UTC m=+1172.780884897" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.095735 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" event={"ID":"71432b9e-c028-469c-904e-9a74b0ffa5ec","Type":"ContainerStarted","Data":"3dbc33f6b2f05bc319db364a89107fb5b03afedbbd1a64d5474a668f601f117e"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.096157 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.119624 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" event={"ID":"f4aee2da-fb97-4e64-a4d1-223cf14816e8","Type":"ContainerStarted","Data":"9d40653932c5de75008cd4f58c8825cc6563f0ad32bd59987a0480a5f30af0bf"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.120476 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.120727 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" podStartSLOduration=12.389475369 podStartE2EDuration="21.120679603s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:14.756549675 +0000 UTC m=+1154.450246505" lastFinishedPulling="2026-03-20 15:44:23.487753909 +0000 UTC m=+1163.181450739" observedRunningTime="2026-03-20 15:44:33.127084204 +0000 UTC m=+1172.820781034" watchObservedRunningTime="2026-03-20 15:44:34.120679603 +0000 UTC m=+1173.814376433" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.120876 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" podStartSLOduration=4.160259276 podStartE2EDuration="21.120860268s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.629716533 +0000 UTC m=+1155.323413363" lastFinishedPulling="2026-03-20 15:44:32.590317525 +0000 UTC m=+1172.284014355" observedRunningTime="2026-03-20 15:44:34.115527625 +0000 UTC m=+1173.809224465" watchObservedRunningTime="2026-03-20 15:44:34.120860268 +0000 UTC m=+1173.814557098" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.130954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" event={"ID":"e288b597-0ccf-4abc-9f7a-634d63242553","Type":"ContainerStarted","Data":"8f12e0830b5b6e8e05d3588beb0590ae1943c2bbe742108291ce29e5d290e076"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.148296 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" podStartSLOduration=4.643745159 podStartE2EDuration="21.14823962s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.037694187 +0000 UTC m=+1155.731391017" lastFinishedPulling="2026-03-20 15:44:32.542188648 +0000 UTC m=+1172.235885478" observedRunningTime="2026-03-20 15:44:34.147632864 +0000 UTC m=+1173.841329704" watchObservedRunningTime="2026-03-20 15:44:34.14823962 +0000 UTC m=+1173.841936450" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.151306 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" event={"ID":"f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1","Type":"ContainerStarted","Data":"bc5c5d62f4052f9c9fa0b1e8e60458e1f431622773045f728ff147559b0b9ab1"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.157805 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" event={"ID":"cba9b971-9846-434b-be0f-c49675291fc6","Type":"ContainerStarted","Data":"07d8fe3296e03d5f6d8a064014dd0960960c3b1fe9d7594fd4677376f0c3b653"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.169487 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" event={"ID":"42323066-562e-46fc-8616-3d244ae35b2d","Type":"ContainerStarted","Data":"ba960f4b68049c43d36ee501261eb1ac81527edbed2dfdbe0c288ed5757129ed"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.170572 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.195854 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" event={"ID":"ce6883bb-9cc0-494d-acc0-28ab4cb8ba66","Type":"ContainerStarted","Data":"96865a0449b5089c5917ccd0b835e0a44d186d1a3f67f7362e742d49bf61e401"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.197207 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.204238 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" event={"ID":"4ad6253a-a3ac-4428-a1e0-4059ca5b02f5","Type":"ContainerStarted","Data":"00e45beecd6cd133c2172c2ceeaf3503328ad013531c39e54139bbfb3d38bc81"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.205187 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.221388 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" event={"ID":"105486bf-4db8-4e8b-b7e6-b36f272e8042","Type":"ContainerStarted","Data":"2e8cb5f6ddf03643f1681509434465bda9f920cc5d15f13e8fe9720e82e696da"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.222009 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.241613 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" podStartSLOduration=4.274454431 podStartE2EDuration="21.241557557s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.640109061 +0000 UTC m=+1155.333805891" lastFinishedPulling="2026-03-20 15:44:32.607212177 +0000 UTC m=+1172.300909017" observedRunningTime="2026-03-20 15:44:34.234800456 +0000 UTC m=+1173.928497296" watchObservedRunningTime="2026-03-20 15:44:34.241557557 +0000 UTC m=+1173.935254397" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.243111 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" podStartSLOduration=4.270158715 podStartE2EDuration="21.243079887s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.617350132 +0000 UTC m=+1155.311046962" lastFinishedPulling="2026-03-20 15:44:32.590271294 +0000 UTC m=+1172.283968134" observedRunningTime="2026-03-20 15:44:34.199660646 +0000 UTC m=+1173.893357486" watchObservedRunningTime="2026-03-20 15:44:34.243079887 +0000 UTC m=+1173.936776717" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.244800 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" event={"ID":"354e53f6-1fa0-4b98-8a19-741caf032b5a","Type":"ContainerStarted","Data":"d7a28a66a04ce1030676b663bbfd116ea745074d5c92fc984c108b92f3b5045d"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.245713 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.247541 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" event={"ID":"b672dc41-4a65-47db-aeeb-d9858305445e","Type":"ContainerStarted","Data":"ffcc5e27df670f1f240d849cd9f8c5b261e85430036a7c290715d16069b1fdd3"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.247908 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.266363 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" podStartSLOduration=4.722607629 podStartE2EDuration="21.266320799s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.063511417 +0000 UTC m=+1155.757208247" lastFinishedPulling="2026-03-20 15:44:32.607224577 +0000 UTC m=+1172.300921417" observedRunningTime="2026-03-20 15:44:34.265482067 +0000 UTC m=+1173.959178907" watchObservedRunningTime="2026-03-20 15:44:34.266320799 +0000 UTC m=+1173.960017639" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.285864 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" event={"ID":"4e980880-d1a1-40dc-9a05-394a72cfb983","Type":"ContainerStarted","Data":"2bbfcc770c75183039c5acb4af12f23a85fc7b2bb32d6d947a15befc80fb33d4"} Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.285918 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.305065 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" podStartSLOduration=4.555886719 podStartE2EDuration="21.304905491s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.641704094 +0000 UTC m=+1155.335400924" lastFinishedPulling="2026-03-20 15:44:32.390722866 +0000 UTC m=+1172.084419696" observedRunningTime="2026-03-20 15:44:34.30450014 +0000 UTC m=+1173.998196980" watchObservedRunningTime="2026-03-20 15:44:34.304905491 +0000 UTC m=+1173.998602321" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.327984 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" podStartSLOduration=4.443257097 podStartE2EDuration="21.327945008s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.649098692 +0000 UTC m=+1155.342795522" lastFinishedPulling="2026-03-20 15:44:32.533786603 +0000 UTC m=+1172.227483433" observedRunningTime="2026-03-20 15:44:34.32577922 +0000 UTC m=+1174.019476060" watchObservedRunningTime="2026-03-20 15:44:34.327945008 +0000 UTC m=+1174.021641838" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.367975 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" podStartSLOduration=4.425710077 podStartE2EDuration="21.367915237s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.648078784 +0000 UTC m=+1155.341775614" lastFinishedPulling="2026-03-20 15:44:32.590283934 +0000 UTC m=+1172.283980774" observedRunningTime="2026-03-20 15:44:34.361205707 +0000 UTC m=+1174.054902557" watchObservedRunningTime="2026-03-20 15:44:34.367915237 +0000 UTC m=+1174.061612067" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.385989 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" podStartSLOduration=4.459285395 podStartE2EDuration="21.385925359s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.616117949 +0000 UTC m=+1155.309814779" lastFinishedPulling="2026-03-20 15:44:32.542757893 +0000 UTC m=+1172.236454743" observedRunningTime="2026-03-20 15:44:34.379833226 +0000 UTC m=+1174.073530056" watchObservedRunningTime="2026-03-20 15:44:34.385925359 +0000 UTC m=+1174.079622189" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.415672 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" podStartSLOduration=4.486284647 podStartE2EDuration="21.415608143s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.613505599 +0000 UTC m=+1155.307202429" lastFinishedPulling="2026-03-20 15:44:32.542829075 +0000 UTC m=+1172.236525925" observedRunningTime="2026-03-20 15:44:34.411714288 +0000 UTC m=+1174.105411128" watchObservedRunningTime="2026-03-20 15:44:34.415608143 +0000 UTC m=+1174.109304983" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.468023 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" podStartSLOduration=4.888674491 podStartE2EDuration="21.467979164s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:15.096863129 +0000 UTC m=+1154.790559959" lastFinishedPulling="2026-03-20 15:44:31.676167802 +0000 UTC m=+1171.369864632" observedRunningTime="2026-03-20 15:44:34.448008519 +0000 UTC m=+1174.141705369" watchObservedRunningTime="2026-03-20 15:44:34.467979164 +0000 UTC m=+1174.161675994" Mar 20 15:44:34 crc kubenswrapper[3552]: I0320 15:44:34.470199 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" podStartSLOduration=4.908481662 podStartE2EDuration="21.470167212s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.028637515 +0000 UTC m=+1155.722334345" lastFinishedPulling="2026-03-20 15:44:32.590323065 +0000 UTC m=+1172.284019895" observedRunningTime="2026-03-20 15:44:34.468715193 +0000 UTC m=+1174.162412033" watchObservedRunningTime="2026-03-20 15:44:34.470167212 +0000 UTC m=+1174.163864042" Mar 20 15:44:35 crc kubenswrapper[3552]: I0320 15:44:35.289972 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:35 crc kubenswrapper[3552]: I0320 15:44:35.290255 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:35 crc kubenswrapper[3552]: I0320 15:44:35.290809 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:43 crc kubenswrapper[3552]: I0320 15:44:43.675287 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-674dd8bdd9-pvjxb" Mar 20 15:44:43 crc kubenswrapper[3552]: I0320 15:44:43.676183 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-5666895465-zvh67" Mar 20 15:44:43 crc kubenswrapper[3552]: I0320 15:44:43.699388 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-85887bf46b-ddjrz" Mar 20 15:44:43 crc kubenswrapper[3552]: I0320 15:44:43.711179 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-f8f7f758d-r5lmz" Mar 20 15:44:43 crc kubenswrapper[3552]: I0320 15:44:43.754422 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-dcb9f85b6-xjgbk" Mar 20 15:44:43 crc kubenswrapper[3552]: I0320 15:44:43.997660 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-6f77bd5775-9s4lh" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.085660 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6ff8887465-j5znl" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.111851 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-5475cc9b67-clc46" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.152030 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-75d675bfc4-mlclf" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.210533 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-666c4df8d6-k4pgg" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.233781 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-c9f654fc4-lxs7j" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.268820 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5dfddf8d94-vwkxd" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.391793 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-665dbccdfd-t2b8h" Mar 20 15:44:44 crc kubenswrapper[3552]: I0320 15:44:44.620189 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6dc7cb7d75-5cl45" Mar 20 15:44:45 crc kubenswrapper[3552]: I0320 15:44:45.506006 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:45 crc kubenswrapper[3552]: I0320 15:44:45.548057 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85fc9bb8-3e61-4a3f-bc9a-54c327b0b278-cert\") pod \"infra-operator-controller-manager-b68b4cfdf-h8ljt\" (UID: \"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278\") " pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:45 crc kubenswrapper[3552]: I0320 15:44:45.810218 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:45 crc kubenswrapper[3552]: I0320 15:44:45.813808 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:44:45 crc kubenswrapper[3552]: I0320 15:44:45.814282 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6620147f-34ac-4892-9c3c-7886a2bd6558-cert\") pod \"openstack-baremetal-operator-controller-manager-6557559db7m9cdd\" (UID: \"6620147f-34ac-4892-9c3c-7886a2bd6558\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:46 crc kubenswrapper[3552]: I0320 15:44:46.724519 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:46 crc kubenswrapper[3552]: I0320 15:44:46.724637 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:44:46 crc kubenswrapper[3552]: I0320 15:44:46.725040 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:46 crc kubenswrapper[3552]: I0320 15:44:46.731575 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-webhook-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:46 crc kubenswrapper[3552]: I0320 15:44:46.749946 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/145a49ae-4a3b-4096-8fdb-974a93c8194a-metrics-certs\") pod \"openstack-operator-controller-manager-597d5dd64-rmm62\" (UID: \"145a49ae-4a3b-4096-8fdb-974a93c8194a\") " pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:46 crc kubenswrapper[3552]: I0320 15:44:46.800044 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.484156 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.490647 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt"] Mar 20 15:44:52 crc kubenswrapper[3552]: W0320 15:44:52.559262 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6620147f_34ac_4892_9c3c_7886a2bd6558.slice/crio-78d8d960b7781ba09cc5a11dfc978350814ccc531dcfd7405d2e9897fdff54f2 WatchSource:0}: Error finding container 78d8d960b7781ba09cc5a11dfc978350814ccc531dcfd7405d2e9897fdff54f2: Status 404 returned error can't find the container with id 78d8d960b7781ba09cc5a11dfc978350814ccc531dcfd7405d2e9897fdff54f2 Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.559865 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd"] Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.672782 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62"] Mar 20 15:44:52 crc kubenswrapper[3552]: W0320 15:44:52.684940 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod145a49ae_4a3b_4096_8fdb_974a93c8194a.slice/crio-76657c0727c49e11d54414e3133afd421e1f4d43b036d53b30e08ee5dad280dd WatchSource:0}: Error finding container 76657c0727c49e11d54414e3133afd421e1f4d43b036d53b30e08ee5dad280dd: Status 404 returned error can't find the container with id 76657c0727c49e11d54414e3133afd421e1f4d43b036d53b30e08ee5dad280dd Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.772141 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" event={"ID":"c64483e3-8ac2-4242-9a5e-85839f40cb42","Type":"ContainerStarted","Data":"eeb63a38510dbc1448f7a1c8bc5b2921884c2905c623a32e31be27aed60509e0"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.773354 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" event={"ID":"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278","Type":"ContainerStarted","Data":"525ba6af53f92256c38ae41014dff85b433c3230a0c10a472c8d53f04df59898"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.774633 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" event={"ID":"a9eee008-53da-425f-bb6e-7a39e6b07754","Type":"ContainerStarted","Data":"7b3827f60c2421a4a638b1e7b91be8d19a49a7950bab25eb88f7cc6294a1d158"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.774828 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.776750 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" event={"ID":"b808d486-c6e9-4167-92f4-7c854ead72f7","Type":"ContainerStarted","Data":"d8ac9c7c900c4105ab64fc1a3c421dee1ffb7696955e307090142038816de3df"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.778786 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" event={"ID":"b1a25fa6-822d-4376-8721-bd7802437838","Type":"ContainerStarted","Data":"27841da2ee57117547c8fd2e54c702b62fe46a38f472359829af4650f2d441b9"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.778962 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.780643 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" event={"ID":"b9502382-d33a-43df-bbfa-f4462e6c426a","Type":"ContainerStarted","Data":"3ea416740d81b2d03cdf6b34cbe3b08828a360c2b6759424324c6d943ee257cc"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.781818 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" event={"ID":"6620147f-34ac-4892-9c3c-7886a2bd6558","Type":"ContainerStarted","Data":"78d8d960b7781ba09cc5a11dfc978350814ccc531dcfd7405d2e9897fdff54f2"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.783187 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" event={"ID":"9ad61c28-5162-4ceb-b703-79e0f4d20a43","Type":"ContainerStarted","Data":"2c25b0689b32a2c68b2697df2b77cff0b11862539ae0813c687c19bb45c7acd4"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.784459 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" event={"ID":"145a49ae-4a3b-4096-8fdb-974a93c8194a","Type":"ContainerStarted","Data":"76657c0727c49e11d54414e3133afd421e1f4d43b036d53b30e08ee5dad280dd"} Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.828644 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" podStartSLOduration=4.090660885 podStartE2EDuration="39.828603397s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.071008878 +0000 UTC m=+1155.764705708" lastFinishedPulling="2026-03-20 15:44:51.80895139 +0000 UTC m=+1191.502648220" observedRunningTime="2026-03-20 15:44:52.827136377 +0000 UTC m=+1192.520833207" watchObservedRunningTime="2026-03-20 15:44:52.828603397 +0000 UTC m=+1192.522300227" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.895065 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" podStartSLOduration=4.169418301 podStartE2EDuration="39.895022473s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.072604851 +0000 UTC m=+1155.766301671" lastFinishedPulling="2026-03-20 15:44:51.798209003 +0000 UTC m=+1191.491905843" observedRunningTime="2026-03-20 15:44:52.865850283 +0000 UTC m=+1192.559547113" watchObservedRunningTime="2026-03-20 15:44:52.895022473 +0000 UTC m=+1192.588719303" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.925124 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" podStartSLOduration=4.190940187 podStartE2EDuration="39.925061177s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.083634786 +0000 UTC m=+1155.777331616" lastFinishedPulling="2026-03-20 15:44:51.817755776 +0000 UTC m=+1191.511452606" observedRunningTime="2026-03-20 15:44:52.921986905 +0000 UTC m=+1192.615683745" watchObservedRunningTime="2026-03-20 15:44:52.925061177 +0000 UTC m=+1192.618758007" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.929258 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" podStartSLOduration=4.18244284 podStartE2EDuration="39.929230369s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.070962427 +0000 UTC m=+1155.764659267" lastFinishedPulling="2026-03-20 15:44:51.817749966 +0000 UTC m=+1191.511446796" observedRunningTime="2026-03-20 15:44:52.892509696 +0000 UTC m=+1192.586206546" watchObservedRunningTime="2026-03-20 15:44:52.929230369 +0000 UTC m=+1192.622927199" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.961137 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf" podStartSLOduration=3.128685161 podStartE2EDuration="38.961090771s" podCreationTimestamp="2026-03-20 15:44:14 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.088111275 +0000 UTC m=+1155.781808105" lastFinishedPulling="2026-03-20 15:44:51.920516885 +0000 UTC m=+1191.614213715" observedRunningTime="2026-03-20 15:44:52.948753761 +0000 UTC m=+1192.642450601" watchObservedRunningTime="2026-03-20 15:44:52.961090771 +0000 UTC m=+1192.654787601" Mar 20 15:44:52 crc kubenswrapper[3552]: I0320 15:44:52.994471 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" podStartSLOduration=4.267182637 podStartE2EDuration="39.994430143s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:16.070969337 +0000 UTC m=+1155.764666167" lastFinishedPulling="2026-03-20 15:44:51.798216843 +0000 UTC m=+1191.491913673" observedRunningTime="2026-03-20 15:44:52.993086257 +0000 UTC m=+1192.686783087" watchObservedRunningTime="2026-03-20 15:44:52.994430143 +0000 UTC m=+1192.688126973" Mar 20 15:44:54 crc kubenswrapper[3552]: I0320 15:44:54.139235 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:44:54 crc kubenswrapper[3552]: I0320 15:44:54.286760 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:44:54 crc kubenswrapper[3552]: I0320 15:44:54.443783 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:44:54 crc kubenswrapper[3552]: I0320 15:44:54.799332 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" event={"ID":"145a49ae-4a3b-4096-8fdb-974a93c8194a","Type":"ContainerStarted","Data":"739fefd405b5d23f85dc31c2a7ad602e315f8e5334c6da5dd9e4191f2f2d06c6"} Mar 20 15:44:56 crc kubenswrapper[3552]: I0320 15:44:56.810583 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:44:56 crc kubenswrapper[3552]: I0320 15:44:56.840905 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" podStartSLOduration=42.840869807 podStartE2EDuration="42.840869807s" podCreationTimestamp="2026-03-20 15:44:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:44:56.835598866 +0000 UTC m=+1196.529295716" watchObservedRunningTime="2026-03-20 15:44:56.840869807 +0000 UTC m=+1196.534566637" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.155008 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52"] Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.155795 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9c56d4d5-04a0-49da-8353-73a78e0775ef" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.157125 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.159126 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.159856 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.167865 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52"] Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.324970 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6g7r\" (UniqueName: \"kubernetes.io/projected/9c56d4d5-04a0-49da-8353-73a78e0775ef-kube-api-access-s6g7r\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.325032 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c56d4d5-04a0-49da-8353-73a78e0775ef-secret-volume\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.325064 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c56d4d5-04a0-49da-8353-73a78e0775ef-config-volume\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.426285 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c56d4d5-04a0-49da-8353-73a78e0775ef-config-volume\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.426877 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-s6g7r\" (UniqueName: \"kubernetes.io/projected/9c56d4d5-04a0-49da-8353-73a78e0775ef-kube-api-access-s6g7r\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.427120 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c56d4d5-04a0-49da-8353-73a78e0775ef-secret-volume\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.427364 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c56d4d5-04a0-49da-8353-73a78e0775ef-config-volume\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.460428 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6g7r\" (UniqueName: \"kubernetes.io/projected/9c56d4d5-04a0-49da-8353-73a78e0775ef-kube-api-access-s6g7r\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.467846 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c56d4d5-04a0-49da-8353-73a78e0775ef-secret-volume\") pod \"collect-profiles-29567025-9vx52\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.477677 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.770027 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52"] Mar 20 15:45:00 crc kubenswrapper[3552]: I0320 15:45:00.836803 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" event={"ID":"9c56d4d5-04a0-49da-8353-73a78e0775ef","Type":"ContainerStarted","Data":"305d3369502a5ff81b855d8205216b10638e1b6115a9c71bf5df28b7e5f98835"} Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.300908 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.301283 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.301321 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.301377 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.301419 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.842920 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" event={"ID":"85fc9bb8-3e61-4a3f-bc9a-54c327b0b278","Type":"ContainerStarted","Data":"ecabaf99701c00cd05df131284b58caf860dd2a772f5d7102ffa97232af09a74"} Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.844274 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.846455 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" event={"ID":"6620147f-34ac-4892-9c3c-7886a2bd6558","Type":"ContainerStarted","Data":"ce77d73f54175c29d1be38c569ad6b89487ed4e4fd31939e9a7288aeb437ee3c"} Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.848302 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.850425 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" event={"ID":"9c56d4d5-04a0-49da-8353-73a78e0775ef","Type":"ContainerStarted","Data":"a31837bebbee7da5d08b021f0e6c8e73fb7abf5edb9f3b9789c246773818cfb7"} Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.890963 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" podStartSLOduration=42.046307191 podStartE2EDuration="48.890922517s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:52.56407637 +0000 UTC m=+1192.257773200" lastFinishedPulling="2026-03-20 15:44:59.408691696 +0000 UTC m=+1199.102388526" observedRunningTime="2026-03-20 15:45:01.890205857 +0000 UTC m=+1201.583902697" watchObservedRunningTime="2026-03-20 15:45:01.890922517 +0000 UTC m=+1201.584619337" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.891320 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" podStartSLOduration=41.966448555 podStartE2EDuration="48.891303797s" podCreationTimestamp="2026-03-20 15:44:13 +0000 UTC" firstStartedPulling="2026-03-20 15:44:52.483614298 +0000 UTC m=+1192.177311118" lastFinishedPulling="2026-03-20 15:44:59.40846953 +0000 UTC m=+1199.102166360" observedRunningTime="2026-03-20 15:45:01.863966596 +0000 UTC m=+1201.557663426" watchObservedRunningTime="2026-03-20 15:45:01.891303797 +0000 UTC m=+1201.585000627" Mar 20 15:45:01 crc kubenswrapper[3552]: I0320 15:45:01.911006 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" podStartSLOduration=1.910940552 podStartE2EDuration="1.910940552s" podCreationTimestamp="2026-03-20 15:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:45:01.907148791 +0000 UTC m=+1201.600845651" watchObservedRunningTime="2026-03-20 15:45:01.910940552 +0000 UTC m=+1201.604637392" Mar 20 15:45:02 crc kubenswrapper[3552]: I0320 15:45:02.857106 3552 generic.go:334] "Generic (PLEG): container finished" podID="9c56d4d5-04a0-49da-8353-73a78e0775ef" containerID="a31837bebbee7da5d08b021f0e6c8e73fb7abf5edb9f3b9789c246773818cfb7" exitCode=0 Mar 20 15:45:02 crc kubenswrapper[3552]: I0320 15:45:02.857501 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" event={"ID":"9c56d4d5-04a0-49da-8353-73a78e0775ef","Type":"ContainerDied","Data":"a31837bebbee7da5d08b021f0e6c8e73fb7abf5edb9f3b9789c246773818cfb7"} Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.141324 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-854bd9766b-t7rzd" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.151531 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.212713 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6dd5cfd685-sv9dk" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.288984 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5fccf6868d-8zrxq" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.310816 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6g7r\" (UniqueName: \"kubernetes.io/projected/9c56d4d5-04a0-49da-8353-73a78e0775ef-kube-api-access-s6g7r\") pod \"9c56d4d5-04a0-49da-8353-73a78e0775ef\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.310893 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c56d4d5-04a0-49da-8353-73a78e0775ef-config-volume\") pod \"9c56d4d5-04a0-49da-8353-73a78e0775ef\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.310930 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c56d4d5-04a0-49da-8353-73a78e0775ef-secret-volume\") pod \"9c56d4d5-04a0-49da-8353-73a78e0775ef\" (UID: \"9c56d4d5-04a0-49da-8353-73a78e0775ef\") " Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.311634 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c56d4d5-04a0-49da-8353-73a78e0775ef-config-volume" (OuterVolumeSpecName: "config-volume") pod "9c56d4d5-04a0-49da-8353-73a78e0775ef" (UID: "9c56d4d5-04a0-49da-8353-73a78e0775ef"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.320527 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c56d4d5-04a0-49da-8353-73a78e0775ef-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9c56d4d5-04a0-49da-8353-73a78e0775ef" (UID: "9c56d4d5-04a0-49da-8353-73a78e0775ef"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.321156 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-c9c4d5ccf-zqzbw" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.325320 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c56d4d5-04a0-49da-8353-73a78e0775ef-kube-api-access-s6g7r" (OuterVolumeSpecName: "kube-api-access-s6g7r") pod "9c56d4d5-04a0-49da-8353-73a78e0775ef" (UID: "9c56d4d5-04a0-49da-8353-73a78e0775ef"). InnerVolumeSpecName "kube-api-access-s6g7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.412390 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-s6g7r\" (UniqueName: \"kubernetes.io/projected/9c56d4d5-04a0-49da-8353-73a78e0775ef-kube-api-access-s6g7r\") on node \"crc\" DevicePath \"\"" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.412465 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c56d4d5-04a0-49da-8353-73a78e0775ef-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.412483 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c56d4d5-04a0-49da-8353-73a78e0775ef-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.447148 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7598758bfb-fd8f7" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.869291 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" event={"ID":"9c56d4d5-04a0-49da-8353-73a78e0775ef","Type":"ContainerDied","Data":"305d3369502a5ff81b855d8205216b10638e1b6115a9c71bf5df28b7e5f98835"} Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.869325 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="305d3369502a5ff81b855d8205216b10638e1b6115a9c71bf5df28b7e5f98835" Mar 20 15:45:04 crc kubenswrapper[3552]: I0320 15:45:04.869680 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52" Mar 20 15:45:05 crc kubenswrapper[3552]: I0320 15:45:05.279773 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht"] Mar 20 15:45:05 crc kubenswrapper[3552]: I0320 15:45:05.287358 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29555415-s64ht"] Mar 20 15:45:05 crc kubenswrapper[3552]: I0320 15:45:05.437315 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1f18111-2e43-40c4-ae3c-0f02d431999d" path="/var/lib/kubelet/pods/d1f18111-2e43-40c4-ae3c-0f02d431999d/volumes" Mar 20 15:45:06 crc kubenswrapper[3552]: I0320 15:45:06.729771 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6557559db7m9cdd" Mar 20 15:45:06 crc kubenswrapper[3552]: I0320 15:45:06.808986 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-597d5dd64-rmm62" Mar 20 15:45:12 crc kubenswrapper[3552]: I0320 15:45:12.778940 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:45:12 crc kubenswrapper[3552]: I0320 15:45:12.779608 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:45:15 crc kubenswrapper[3552]: I0320 15:45:15.819699 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-b68b4cfdf-h8ljt" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.149938 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5f88c8b7-s652j"] Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.150652 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" podNamespace="openstack" podName="dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: E0320 15:45:36.151002 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="9c56d4d5-04a0-49da-8353-73a78e0775ef" containerName="collect-profiles" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.151017 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c56d4d5-04a0-49da-8353-73a78e0775ef" containerName="collect-profiles" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.151470 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c56d4d5-04a0-49da-8353-73a78e0775ef" containerName="collect-profiles" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.152452 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.156826 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.157131 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-h7vzm" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.157343 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.161933 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5f88c8b7-s652j"] Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.165527 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.233565 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-config\") pod \"dnsmasq-dns-6d5f88c8b7-s652j\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.233616 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkm22\" (UniqueName: \"kubernetes.io/projected/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-kube-api-access-lkm22\") pod \"dnsmasq-dns-6d5f88c8b7-s652j\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.256561 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-df4645f79-c568l"] Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.256728 3552 topology_manager.go:215] "Topology Admit Handler" podUID="14bd525a-47c6-4d2e-96e5-ce83567aacb7" podNamespace="openstack" podName="dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.257790 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.262461 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.277321 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-df4645f79-c568l"] Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.335348 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-config\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.335423 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqbrk\" (UniqueName: \"kubernetes.io/projected/14bd525a-47c6-4d2e-96e5-ce83567aacb7-kube-api-access-sqbrk\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.335609 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-config\") pod \"dnsmasq-dns-6d5f88c8b7-s652j\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.335671 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lkm22\" (UniqueName: \"kubernetes.io/projected/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-kube-api-access-lkm22\") pod \"dnsmasq-dns-6d5f88c8b7-s652j\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.335724 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-dns-svc\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.336789 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-config\") pod \"dnsmasq-dns-6d5f88c8b7-s652j\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.354417 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkm22\" (UniqueName: \"kubernetes.io/projected/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-kube-api-access-lkm22\") pod \"dnsmasq-dns-6d5f88c8b7-s652j\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.437206 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-dns-svc\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.438074 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-dns-svc\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.438238 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-config\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.439267 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-config\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.439768 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sqbrk\" (UniqueName: \"kubernetes.io/projected/14bd525a-47c6-4d2e-96e5-ce83567aacb7-kube-api-access-sqbrk\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.458901 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqbrk\" (UniqueName: \"kubernetes.io/projected/14bd525a-47c6-4d2e-96e5-ce83567aacb7-kube-api-access-sqbrk\") pod \"dnsmasq-dns-df4645f79-c568l\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.482109 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.575276 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:45:36 crc kubenswrapper[3552]: I0320 15:45:36.908182 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-df4645f79-c568l"] Mar 20 15:45:37 crc kubenswrapper[3552]: I0320 15:45:37.020098 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5f88c8b7-s652j"] Mar 20 15:45:37 crc kubenswrapper[3552]: I0320 15:45:37.067393 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-df4645f79-c568l" event={"ID":"14bd525a-47c6-4d2e-96e5-ce83567aacb7","Type":"ContainerStarted","Data":"feb1df21df52d8dec8cbaaf85ec9d8bc7f5b66ac3658da86796833684c087d9d"} Mar 20 15:45:37 crc kubenswrapper[3552]: I0320 15:45:37.068348 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" event={"ID":"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4","Type":"ContainerStarted","Data":"f08570b255b1422def3ba62069c9705d294a34066aaae8fe7f013226520ca562"} Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.116119 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5f88c8b7-s652j"] Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.144124 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5877d5c6c-2vg4l"] Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.144255 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7f7269f6-1d8b-488c-8061-7ab83faf1350" podNamespace="openstack" podName="dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.145233 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.161887 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5877d5c6c-2vg4l"] Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.288041 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-dns-svc\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.288170 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-config\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.288206 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt9lt\" (UniqueName: \"kubernetes.io/projected/7f7269f6-1d8b-488c-8061-7ab83faf1350-kube-api-access-dt9lt\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.390057 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-config\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.390112 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dt9lt\" (UniqueName: \"kubernetes.io/projected/7f7269f6-1d8b-488c-8061-7ab83faf1350-kube-api-access-dt9lt\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.390146 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-dns-svc\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.391035 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-dns-svc\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.391769 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-config\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.433193 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt9lt\" (UniqueName: \"kubernetes.io/projected/7f7269f6-1d8b-488c-8061-7ab83faf1350-kube-api-access-dt9lt\") pod \"dnsmasq-dns-5877d5c6c-2vg4l\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.483880 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.484967 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-df4645f79-c568l"] Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.540200 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c8b5948c9-ldpnx"] Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.540685 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" podNamespace="openstack" podName="dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.562729 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.570373 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c8b5948c9-ldpnx"] Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.710067 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-config\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.710138 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-dns-svc\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.710167 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmjts\" (UniqueName: \"kubernetes.io/projected/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-kube-api-access-lmjts\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.812413 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-dns-svc\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.812485 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lmjts\" (UniqueName: \"kubernetes.io/projected/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-kube-api-access-lmjts\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.812551 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-config\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.814109 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-dns-svc\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.814316 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-config\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.840153 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmjts\" (UniqueName: \"kubernetes.io/projected/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-kube-api-access-lmjts\") pod \"dnsmasq-dns-6c8b5948c9-ldpnx\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:39 crc kubenswrapper[3552]: I0320 15:45:39.923501 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.132475 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5877d5c6c-2vg4l"] Mar 20 15:45:40 crc kubenswrapper[3552]: W0320 15:45:40.171544 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f7269f6_1d8b_488c_8061_7ab83faf1350.slice/crio-e8282289a702f997b831f916808a715959a66d141c1900f61b0560563dca3a2b WatchSource:0}: Error finding container e8282289a702f997b831f916808a715959a66d141c1900f61b0560563dca3a2b: Status 404 returned error can't find the container with id e8282289a702f997b831f916808a715959a66d141c1900f61b0560563dca3a2b Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.330983 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.331332 3552 topology_manager.go:215] "Topology Admit Handler" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" podNamespace="openstack" podName="rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.332455 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.347837 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.348898 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.349020 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.349118 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.349260 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.349342 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kr2lk" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.355718 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.362522 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.445565 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.448160 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.448308 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.448433 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.448613 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq5mc\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-kube-api-access-bq5mc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.448756 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/90a6e0ae-40a5-47b1-8495-26b369c628c4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.449091 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.449263 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.449496 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/90a6e0ae-40a5-47b1-8495-26b369c628c4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.449715 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.449845 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551685 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551766 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551799 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551826 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551856 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bq5mc\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-kube-api-access-bq5mc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551888 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/90a6e0ae-40a5-47b1-8495-26b369c628c4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551924 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.551958 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.552623 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.552925 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/90a6e0ae-40a5-47b1-8495-26b369c628c4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.552971 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.552976 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.552999 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.563353 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.563387 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.563772 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.563834 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.570090 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.583065 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/90a6e0ae-40a5-47b1-8495-26b369c628c4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.586974 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.589521 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq5mc\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-kube-api-access-bq5mc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.591613 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/90a6e0ae-40a5-47b1-8495-26b369c628c4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.613789 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.674174 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.675334 3552 topology_manager.go:215] "Topology Admit Handler" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" podNamespace="openstack" podName="rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.682057 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.683004 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.684900 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wt2xv" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.687002 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.687457 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.689079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.690054 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.693076 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.695723 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.696072 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.764714 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c8b5948c9-ldpnx"] Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777216 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777276 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f1e0de34-e3ac-4691-94c4-d5ac03353099-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777333 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777373 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777418 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8ttb\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-kube-api-access-f8ttb\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777480 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f1e0de34-e3ac-4691-94c4-d5ac03353099-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777538 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777577 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-config-data\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777886 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777947 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.777989 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.879514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-config-data\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.879930 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.879960 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.879990 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880024 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880052 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f1e0de34-e3ac-4691-94c4-d5ac03353099-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880092 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880118 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880152 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f8ttb\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-kube-api-access-f8ttb\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880192 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f1e0de34-e3ac-4691-94c4-d5ac03353099-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.880233 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.881161 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.881895 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-config-data\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.882013 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.888132 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.888449 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.899449 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.899949 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.903450 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f1e0de34-e3ac-4691-94c4-d5ac03353099-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.905913 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f1e0de34-e3ac-4691-94c4-d5ac03353099-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.906077 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.925766 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:40 crc kubenswrapper[3552]: I0320 15:45:40.930516 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8ttb\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-kube-api-access-f8ttb\") pod \"rabbitmq-server-0\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " pod="openstack/rabbitmq-server-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.050600 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.159425 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" event={"ID":"7f7269f6-1d8b-488c-8061-7ab83faf1350","Type":"ContainerStarted","Data":"e8282289a702f997b831f916808a715959a66d141c1900f61b0560563dca3a2b"} Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.160752 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" event={"ID":"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8","Type":"ContainerStarted","Data":"eadab69fe9268c34df51cec198b2bffb5b8ff72a5708ca140e12b031bf70ac84"} Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.254735 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.611708 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.750609 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.750812 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b5c00f51-e5f5-4edb-8998-e4463051ecac" podNamespace="openstack" podName="openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.752246 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.755735 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-nptrq" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.755778 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.755917 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.759108 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.760579 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.766710 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.796006 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.797045 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-config-data-default\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.797230 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c00f51-e5f5-4edb-8998-e4463051ecac-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.798000 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b5c00f51-e5f5-4edb-8998-e4463051ecac-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.798986 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjchj\" (UniqueName: \"kubernetes.io/projected/b5c00f51-e5f5-4edb-8998-e4463051ecac-kube-api-access-gjchj\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.799067 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-kolla-config\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.799135 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c00f51-e5f5-4edb-8998-e4463051ecac-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.799214 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.900929 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b5c00f51-e5f5-4edb-8998-e4463051ecac-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901003 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gjchj\" (UniqueName: \"kubernetes.io/projected/b5c00f51-e5f5-4edb-8998-e4463051ecac-kube-api-access-gjchj\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901270 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-kolla-config\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901308 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c00f51-e5f5-4edb-8998-e4463051ecac-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901340 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901377 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901426 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-config-data-default\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901450 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c00f51-e5f5-4edb-8998-e4463051ecac-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901533 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b5c00f51-e5f5-4edb-8998-e4463051ecac-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.901808 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-kolla-config\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.902182 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.903779 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-config-data-default\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.907258 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5c00f51-e5f5-4edb-8998-e4463051ecac-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.952141 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjchj\" (UniqueName: \"kubernetes.io/projected/b5c00f51-e5f5-4edb-8998-e4463051ecac-kube-api-access-gjchj\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.952991 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c00f51-e5f5-4edb-8998-e4463051ecac-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.955969 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:41 crc kubenswrapper[3552]: I0320 15:45:41.968589 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c00f51-e5f5-4edb-8998-e4463051ecac-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b5c00f51-e5f5-4edb-8998-e4463051ecac\") " pod="openstack/openstack-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.091830 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.167283 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"90a6e0ae-40a5-47b1-8495-26b369c628c4","Type":"ContainerStarted","Data":"67923631f2dd5e17336701dc732e95bab6d2b84c0567435937e8969f640a0801"} Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.778080 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.778489 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.787299 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.787444 3552 topology_manager.go:215] "Topology Admit Handler" podUID="887f0b69-2c6b-44fa-b0d4-0af7b2e89654" podNamespace="openstack" podName="openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.788466 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.790692 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.790988 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.791028 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.791778 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-rgslt" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.809888 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921131 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921180 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921206 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b2bh\" (UniqueName: \"kubernetes.io/projected/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-kube-api-access-4b2bh\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921227 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921260 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921301 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921337 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:42 crc kubenswrapper[3552]: I0320 15:45:42.921368 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037365 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037452 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037486 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037520 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037577 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037612 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037648 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4b2bh\" (UniqueName: \"kubernetes.io/projected/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-kube-api-access-4b2bh\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.037673 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.038809 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.038947 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.039084 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.041231 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.046083 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.067072 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.077086 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.137167 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b2bh\" (UniqueName: \"kubernetes.io/projected/887f0b69-2c6b-44fa-b0d4-0af7b2e89654-kube-api-access-4b2bh\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.175603 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"887f0b69-2c6b-44fa-b0d4-0af7b2e89654\") " pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.275545 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.275899 3552 topology_manager.go:215] "Topology Admit Handler" podUID="762b23fe-ac75-4df1-b0b7-441f4720c635" podNamespace="openstack" podName="memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.276799 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.281269 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-jb8sp" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.281329 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.281425 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.291616 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.406463 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.447096 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/762b23fe-ac75-4df1-b0b7-441f4720c635-memcached-tls-certs\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.447140 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/762b23fe-ac75-4df1-b0b7-441f4720c635-kolla-config\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.447610 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/762b23fe-ac75-4df1-b0b7-441f4720c635-combined-ca-bundle\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.447651 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/762b23fe-ac75-4df1-b0b7-441f4720c635-config-data\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.447697 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b2zz\" (UniqueName: \"kubernetes.io/projected/762b23fe-ac75-4df1-b0b7-441f4720c635-kube-api-access-8b2zz\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.549296 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8b2zz\" (UniqueName: \"kubernetes.io/projected/762b23fe-ac75-4df1-b0b7-441f4720c635-kube-api-access-8b2zz\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.549398 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/762b23fe-ac75-4df1-b0b7-441f4720c635-memcached-tls-certs\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.549435 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/762b23fe-ac75-4df1-b0b7-441f4720c635-kolla-config\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.549477 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/762b23fe-ac75-4df1-b0b7-441f4720c635-combined-ca-bundle\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.549693 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/762b23fe-ac75-4df1-b0b7-441f4720c635-config-data\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.550398 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/762b23fe-ac75-4df1-b0b7-441f4720c635-config-data\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.550583 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/762b23fe-ac75-4df1-b0b7-441f4720c635-kolla-config\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.563051 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/762b23fe-ac75-4df1-b0b7-441f4720c635-memcached-tls-certs\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.563245 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/762b23fe-ac75-4df1-b0b7-441f4720c635-combined-ca-bundle\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.566924 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b2zz\" (UniqueName: \"kubernetes.io/projected/762b23fe-ac75-4df1-b0b7-441f4720c635-kube-api-access-8b2zz\") pod \"memcached-0\" (UID: \"762b23fe-ac75-4df1-b0b7-441f4720c635\") " pod="openstack/memcached-0" Mar 20 15:45:43 crc kubenswrapper[3552]: I0320 15:45:43.593311 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Mar 20 15:45:44 crc kubenswrapper[3552]: I0320 15:45:44.928542 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:45:44 crc kubenswrapper[3552]: I0320 15:45:44.928677 3552 topology_manager.go:215] "Topology Admit Handler" podUID="788065ca-23a0-4317-bff9-f6503f659aee" podNamespace="openstack" podName="kube-state-metrics-0" Mar 20 15:45:44 crc kubenswrapper[3552]: I0320 15:45:44.929487 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Mar 20 15:45:44 crc kubenswrapper[3552]: I0320 15:45:44.947036 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-99jkb" Mar 20 15:45:44 crc kubenswrapper[3552]: I0320 15:45:44.953574 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:45:45 crc kubenswrapper[3552]: I0320 15:45:45.078063 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz4pk\" (UniqueName: \"kubernetes.io/projected/788065ca-23a0-4317-bff9-f6503f659aee-kube-api-access-mz4pk\") pod \"kube-state-metrics-0\" (UID: \"788065ca-23a0-4317-bff9-f6503f659aee\") " pod="openstack/kube-state-metrics-0" Mar 20 15:45:45 crc kubenswrapper[3552]: I0320 15:45:45.179623 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mz4pk\" (UniqueName: \"kubernetes.io/projected/788065ca-23a0-4317-bff9-f6503f659aee-kube-api-access-mz4pk\") pod \"kube-state-metrics-0\" (UID: \"788065ca-23a0-4317-bff9-f6503f659aee\") " pod="openstack/kube-state-metrics-0" Mar 20 15:45:45 crc kubenswrapper[3552]: I0320 15:45:45.234495 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz4pk\" (UniqueName: \"kubernetes.io/projected/788065ca-23a0-4317-bff9-f6503f659aee-kube-api-access-mz4pk\") pod \"kube-state-metrics-0\" (UID: \"788065ca-23a0-4317-bff9-f6503f659aee\") " pod="openstack/kube-state-metrics-0" Mar 20 15:45:45 crc kubenswrapper[3552]: I0320 15:45:45.264724 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Mar 20 15:45:46 crc kubenswrapper[3552]: W0320 15:45:46.764487 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1e0de34_e3ac_4691_94c4_d5ac03353099.slice/crio-79bb3a0438dea1fca0ed0c4c4c959e46f47c58f65ba8d5374cfa30395b26a4a4 WatchSource:0}: Error finding container 79bb3a0438dea1fca0ed0c4c4c959e46f47c58f65ba8d5374cfa30395b26a4a4: Status 404 returned error can't find the container with id 79bb3a0438dea1fca0ed0c4c4c959e46f47c58f65ba8d5374cfa30395b26a4a4 Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.240536 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f1e0de34-e3ac-4691-94c4-d5ac03353099","Type":"ContainerStarted","Data":"79bb3a0438dea1fca0ed0c4c4c959e46f47c58f65ba8d5374cfa30395b26a4a4"} Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.374617 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.574797 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.687766 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Mar 20 15:45:47 crc kubenswrapper[3552]: W0320 15:45:47.692534 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5c00f51_e5f5_4edb_8998_e4463051ecac.slice/crio-37b2c85ad9945d5402cbb68e7281a516c1c56d4136cea32d7f19ce3b5cefc620 WatchSource:0}: Error finding container 37b2c85ad9945d5402cbb68e7281a516c1c56d4136cea32d7f19ce3b5cefc620: Status 404 returned error can't find the container with id 37b2c85ad9945d5402cbb68e7281a516c1c56d4136cea32d7f19ce3b5cefc620 Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.775824 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.914714 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.914840 3552 topology_manager.go:215] "Topology Admit Handler" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" podNamespace="openstack" podName="prometheus-metric-storage-0" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.916382 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.920550 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.922646 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.923103 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.923234 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.923262 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.923342 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.923438 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-s2p59" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.938930 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Mar 20 15:45:47 crc kubenswrapper[3552]: I0320 15:45:47.947783 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034025 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034102 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n8k6\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-kube-api-access-2n8k6\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034131 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034155 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034201 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034222 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034254 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034279 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034304 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.034337 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.135712 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136135 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136176 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136261 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136299 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2n8k6\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-kube-api-access-2n8k6\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136326 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136351 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136401 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.136452 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.137940 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.138179 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.138471 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.143513 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.144259 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.144584 3552 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.144682 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c127805bcb575cbe31260fab01e798009882e8c5a15f13517ae73f993a199ea5/globalmount\"" pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.144866 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.147781 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.147876 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.156869 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n8k6\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-kube-api-access-2n8k6\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.190608 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.237669 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.245894 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"788065ca-23a0-4317-bff9-f6503f659aee","Type":"ContainerStarted","Data":"4fe62e7b0308a0002367b20fdb23ed98f3e5963ea4862d9a800f378effdd1efe"} Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.247211 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"887f0b69-2c6b-44fa-b0d4-0af7b2e89654","Type":"ContainerStarted","Data":"6c7f3ee202afd27beecbfbe6b694c458ed4aa66f8915b25b6b35b0dbd471d7ab"} Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.248648 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b5c00f51-e5f5-4edb-8998-e4463051ecac","Type":"ContainerStarted","Data":"37b2c85ad9945d5402cbb68e7281a516c1c56d4136cea32d7f19ce3b5cefc620"} Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.249631 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"762b23fe-ac75-4df1-b0b7-441f4720c635","Type":"ContainerStarted","Data":"9fba9be1be69e6302587bd969a100acfcceee8392723c8535429988d1c6798c9"} Mar 20 15:45:48 crc kubenswrapper[3552]: I0320 15:45:48.779258 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.028056 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-c89mf"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.028201 3552 topology_manager.go:215] "Topology Admit Handler" podUID="854348bd-6351-4ba6-82c7-664311074caf" podNamespace="openstack" podName="ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.029028 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.032169 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.032449 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.033020 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-fp7lc" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.052909 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c89mf"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.084340 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-qrf8z"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.084733 3552 topology_manager.go:215] "Topology Admit Handler" podUID="af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0" podNamespace="openstack" podName="ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.096299 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.122808 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-qrf8z"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155367 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fskxr\" (UniqueName: \"kubernetes.io/projected/854348bd-6351-4ba6-82c7-664311074caf-kube-api-access-fskxr\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155450 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/854348bd-6351-4ba6-82c7-664311074caf-combined-ca-bundle\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155629 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-log-ovn\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155703 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/854348bd-6351-4ba6-82c7-664311074caf-scripts\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155737 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/854348bd-6351-4ba6-82c7-664311074caf-ovn-controller-tls-certs\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155787 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-run\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.155831 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-run-ovn\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.257144 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzhkn\" (UniqueName: \"kubernetes.io/projected/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-kube-api-access-tzhkn\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.257249 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-log\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.257370 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-log-ovn\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258003 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-log-ovn\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258327 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-etc-ovs\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258363 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/854348bd-6351-4ba6-82c7-664311074caf-scripts\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/854348bd-6351-4ba6-82c7-664311074caf-ovn-controller-tls-certs\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258436 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-run\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258479 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-run-ovn\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258510 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fskxr\" (UniqueName: \"kubernetes.io/projected/854348bd-6351-4ba6-82c7-664311074caf-kube-api-access-fskxr\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258531 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-lib\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258555 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/854348bd-6351-4ba6-82c7-664311074caf-combined-ca-bundle\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258588 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-scripts\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.258626 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-run\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.260652 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/854348bd-6351-4ba6-82c7-664311074caf-scripts\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.261607 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-run\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.261717 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/854348bd-6351-4ba6-82c7-664311074caf-var-run-ovn\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.267528 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/854348bd-6351-4ba6-82c7-664311074caf-ovn-controller-tls-certs\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.274820 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/854348bd-6351-4ba6-82c7-664311074caf-combined-ca-bundle\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.285031 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fskxr\" (UniqueName: \"kubernetes.io/projected/854348bd-6351-4ba6-82c7-664311074caf-kube-api-access-fskxr\") pod \"ovn-controller-c89mf\" (UID: \"854348bd-6351-4ba6-82c7-664311074caf\") " pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.351240 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.359750 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-log\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.359838 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-etc-ovs\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.359906 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-lib\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.359940 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-scripts\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.359982 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-run\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.360020 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tzhkn\" (UniqueName: \"kubernetes.io/projected/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-kube-api-access-tzhkn\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.360710 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-log\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.360885 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-etc-ovs\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.361014 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-lib\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.363798 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-scripts\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.363943 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-var-run\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.378369 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzhkn\" (UniqueName: \"kubernetes.io/projected/af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0-kube-api-access-tzhkn\") pod \"ovn-controller-ovs-qrf8z\" (UID: \"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0\") " pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.465692 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:45:49 crc kubenswrapper[3552]: W0320 15:45:49.516183 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfdedd79c_7b6f_4512_a434_dc6c4daf6bfb.slice/crio-d35b3d71b97b1531105c59d9b650be88a7046ba4b8e14941bc4112f9b0affae6 WatchSource:0}: Error finding container d35b3d71b97b1531105c59d9b650be88a7046ba4b8e14941bc4112f9b0affae6: Status 404 returned error can't find the container with id d35b3d71b97b1531105c59d9b650be88a7046ba4b8e14941bc4112f9b0affae6 Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.723826 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.724006 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a03ab7e3-7def-4bd8-9ec5-93e9b3098b08" podNamespace="openstack" podName="ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.725475 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.732199 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.732253 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.732394 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tzs8w" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.732566 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.734053 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.739869 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880135 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880197 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880465 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-config\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880504 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880533 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2kfh\" (UniqueName: \"kubernetes.io/projected/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-kube-api-access-v2kfh\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880554 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880589 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.880618 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987172 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987228 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987275 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-config\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987303 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987324 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-v2kfh\" (UniqueName: \"kubernetes.io/projected/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-kube-api-access-v2kfh\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987344 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987374 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.987396 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.993219 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.995789 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.996444 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-config\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.996697 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.997901 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:49 crc kubenswrapper[3552]: I0320 15:45:49.998898 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:50 crc kubenswrapper[3552]: I0320 15:45:50.007281 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:50 crc kubenswrapper[3552]: I0320 15:45:50.039182 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2kfh\" (UniqueName: \"kubernetes.io/projected/a03ab7e3-7def-4bd8-9ec5-93e9b3098b08-kube-api-access-v2kfh\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:50 crc kubenswrapper[3552]: I0320 15:45:50.070430 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08\") " pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:50 crc kubenswrapper[3552]: I0320 15:45:50.280622 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerStarted","Data":"d35b3d71b97b1531105c59d9b650be88a7046ba4b8e14941bc4112f9b0affae6"} Mar 20 15:45:50 crc kubenswrapper[3552]: I0320 15:45:50.353850 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.588475 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.588629 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e4d74fff-0397-4b4c-ac20-6fd72086c84a" podNamespace="openstack" podName="ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.589955 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.595263 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.595579 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.595734 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.596326 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-gdz42" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.606786 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.713812 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4d74fff-0397-4b4c-ac20-6fd72086c84a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.713867 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-676g6\" (UniqueName: \"kubernetes.io/projected/e4d74fff-0397-4b4c-ac20-6fd72086c84a-kube-api-access-676g6\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.713961 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.714065 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.714305 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4d74fff-0397-4b4c-ac20-6fd72086c84a-config\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.714501 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.714558 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e4d74fff-0397-4b4c-ac20-6fd72086c84a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.714595 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816515 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-676g6\" (UniqueName: \"kubernetes.io/projected/e4d74fff-0397-4b4c-ac20-6fd72086c84a-kube-api-access-676g6\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816583 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816614 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816676 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4d74fff-0397-4b4c-ac20-6fd72086c84a-config\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816721 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816749 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e4d74fff-0397-4b4c-ac20-6fd72086c84a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816778 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.816838 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4d74fff-0397-4b4c-ac20-6fd72086c84a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.817134 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.817606 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e4d74fff-0397-4b4c-ac20-6fd72086c84a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.818061 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4d74fff-0397-4b4c-ac20-6fd72086c84a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.818334 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4d74fff-0397-4b4c-ac20-6fd72086c84a-config\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.825603 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.843609 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.847335 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.847361 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4d74fff-0397-4b4c-ac20-6fd72086c84a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.850238 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-676g6\" (UniqueName: \"kubernetes.io/projected/e4d74fff-0397-4b4c-ac20-6fd72086c84a-kube-api-access-676g6\") pod \"ovsdbserver-sb-0\" (UID: \"e4d74fff-0397-4b4c-ac20-6fd72086c84a\") " pod="openstack/ovsdbserver-sb-0" Mar 20 15:45:51 crc kubenswrapper[3552]: I0320 15:45:51.960856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Mar 20 15:46:01 crc kubenswrapper[3552]: I0320 15:46:01.301968 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:46:01 crc kubenswrapper[3552]: I0320 15:46:01.302739 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:46:01 crc kubenswrapper[3552]: I0320 15:46:01.302773 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:46:01 crc kubenswrapper[3552]: I0320 15:46:01.302800 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:46:01 crc kubenswrapper[3552]: I0320 15:46:01.302845 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:46:02 crc kubenswrapper[3552]: E0320 15:46:02.009029 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135\": container with ID starting with a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135 not found: ID does not exist" containerID="a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135" Mar 20 15:46:02 crc kubenswrapper[3552]: I0320 15:46:02.009287 3552 kuberuntime_gc.go:360] "Error getting ContainerStatus for containerID" containerID="a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135" err="rpc error: code = NotFound desc = could not find container \"a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135\": container with ID starting with a96f1bdd3d3da5b833f5785f66a7296093ca7bb0661de3761496fcc7c9d5c135 not found: ID does not exist" Mar 20 15:46:09 crc kubenswrapper[3552]: I0320 15:46:09.978009 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-qrf8z"] Mar 20 15:46:10 crc kubenswrapper[3552]: W0320 15:46:10.083635 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf01ed61_3eb6_4ca7_b4c4_ca71ab654dd0.slice/crio-009ce5b29ab477a3bf6611c76bc4e767f1fd25d49687958ec7d6c695b49e6249 WatchSource:0}: Error finding container 009ce5b29ab477a3bf6611c76bc4e767f1fd25d49687958ec7d6c695b49e6249: Status 404 returned error can't find the container with id 009ce5b29ab477a3bf6611c76bc4e767f1fd25d49687958ec7d6c695b49e6249 Mar 20 15:46:10 crc kubenswrapper[3552]: I0320 15:46:10.136131 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Mar 20 15:46:10 crc kubenswrapper[3552]: I0320 15:46:10.441043 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Mar 20 15:46:10 crc kubenswrapper[3552]: I0320 15:46:10.469621 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c89mf"] Mar 20 15:46:10 crc kubenswrapper[3552]: I0320 15:46:10.520556 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e4d74fff-0397-4b4c-ac20-6fd72086c84a","Type":"ContainerStarted","Data":"cbc7328b40fd6ad6e6463c9a4f750de36eb743330866bc2d21157c844717575a"} Mar 20 15:46:10 crc kubenswrapper[3552]: I0320 15:46:10.528438 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qrf8z" event={"ID":"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0","Type":"ContainerStarted","Data":"009ce5b29ab477a3bf6611c76bc4e767f1fd25d49687958ec7d6c695b49e6249"} Mar 20 15:46:11 crc kubenswrapper[3552]: I0320 15:46:11.536172 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08","Type":"ContainerStarted","Data":"a2f5c7d7b8aace050b4861195b00c06ff82260cfc30bf7c98de5e8e9a8115163"} Mar 20 15:46:11 crc kubenswrapper[3552]: I0320 15:46:11.538279 3552 generic.go:334] "Generic (PLEG): container finished" podID="6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" containerID="2f16e8ba0fbfac56cc011f584e07a0ccf767141d99fa730000d0bce75b65c301" exitCode=0 Mar 20 15:46:11 crc kubenswrapper[3552]: I0320 15:46:11.538330 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" event={"ID":"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4","Type":"ContainerDied","Data":"2f16e8ba0fbfac56cc011f584e07a0ccf767141d99fa730000d0bce75b65c301"} Mar 20 15:46:11 crc kubenswrapper[3552]: I0320 15:46:11.540103 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf" event={"ID":"854348bd-6351-4ba6-82c7-664311074caf","Type":"ContainerStarted","Data":"ce4fbef7e43fdb224eb1d7cd96272e4cc9cd52052319885c17d35cee9dc2705a"} Mar 20 15:46:11 crc kubenswrapper[3552]: I0320 15:46:11.916737 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.058899 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkm22\" (UniqueName: \"kubernetes.io/projected/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-kube-api-access-lkm22\") pod \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.059115 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-config\") pod \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\" (UID: \"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4\") " Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.126314 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-kube-api-access-lkm22" (OuterVolumeSpecName: "kube-api-access-lkm22") pod "6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" (UID: "6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4"). InnerVolumeSpecName "kube-api-access-lkm22". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.147126 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-config" (OuterVolumeSpecName: "config") pod "6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" (UID: "6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.161528 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.161582 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-lkm22\" (UniqueName: \"kubernetes.io/projected/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4-kube-api-access-lkm22\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.334177 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-ztxhh"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.334342 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9d5405cc-3ba4-46fd-b566-be99e325e65c" podNamespace="openstack" podName="ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: E0320 15:46:12.334610 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" containerName="init" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.334626 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" containerName="init" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.334807 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" containerName="init" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.335618 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.342506 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.362295 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-ztxhh"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.465150 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5405cc-3ba4-46fd-b566-be99e325e65c-combined-ca-bundle\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.465247 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d5405cc-3ba4-46fd-b566-be99e325e65c-config\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.465306 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d5405cc-3ba4-46fd-b566-be99e325e65c-ovs-rundir\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.465584 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d5405cc-3ba4-46fd-b566-be99e325e65c-ovn-rundir\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.465688 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkn5g\" (UniqueName: \"kubernetes.io/projected/9d5405cc-3ba4-46fd-b566-be99e325e65c-kube-api-access-vkn5g\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.465865 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d5405cc-3ba4-46fd-b566-be99e325e65c-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.518338 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5877d5c6c-2vg4l"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.551600 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b5c00f51-e5f5-4edb-8998-e4463051ecac","Type":"ContainerStarted","Data":"2e5157249cf717e6416fd1979b039c07fe2e74ae97043cfe1289d09a11533a30"} Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.553943 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"762b23fe-ac75-4df1-b0b7-441f4720c635","Type":"ContainerStarted","Data":"7efe886318e486e7b0cf3da952db1b90557fdc888fc42790b2348f3b29859f16"} Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.554224 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.555367 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d98d94d89-w98z9"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.555518 3552 topology_manager.go:215] "Topology Admit Handler" podUID="261f94f4-62eb-4777-9692-7e956509fe50" podNamespace="openstack" podName="dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.556762 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.556834 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" event={"ID":"6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4","Type":"ContainerDied","Data":"f08570b255b1422def3ba62069c9705d294a34066aaae8fe7f013226520ca562"} Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.556870 3552 scope.go:117] "RemoveContainer" containerID="2f16e8ba0fbfac56cc011f584e07a0ccf767141d99fa730000d0bce75b65c301" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.557003 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5f88c8b7-s652j" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.559718 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.566989 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d5405cc-3ba4-46fd-b566-be99e325e65c-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.567129 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5405cc-3ba4-46fd-b566-be99e325e65c-combined-ca-bundle\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.567181 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d5405cc-3ba4-46fd-b566-be99e325e65c-config\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.567217 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d5405cc-3ba4-46fd-b566-be99e325e65c-ovs-rundir\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.567248 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d5405cc-3ba4-46fd-b566-be99e325e65c-ovn-rundir\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.567287 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vkn5g\" (UniqueName: \"kubernetes.io/projected/9d5405cc-3ba4-46fd-b566-be99e325e65c-kube-api-access-vkn5g\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.572804 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d98d94d89-w98z9"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.579346 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d5405cc-3ba4-46fd-b566-be99e325e65c-ovs-rundir\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.580395 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d5405cc-3ba4-46fd-b566-be99e325e65c-config\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.581163 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d5405cc-3ba4-46fd-b566-be99e325e65c-ovn-rundir\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.581698 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d5405cc-3ba4-46fd-b566-be99e325e65c-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.588446 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5405cc-3ba4-46fd-b566-be99e325e65c-combined-ca-bundle\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.606730 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkn5g\" (UniqueName: \"kubernetes.io/projected/9d5405cc-3ba4-46fd-b566-be99e325e65c-kube-api-access-vkn5g\") pod \"ovn-controller-metrics-ztxhh\" (UID: \"9d5405cc-3ba4-46fd-b566-be99e325e65c\") " pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.659899 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-ztxhh" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.662977 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=7.537924909 podStartE2EDuration="29.662923031s" podCreationTimestamp="2026-03-20 15:45:43 +0000 UTC" firstStartedPulling="2026-03-20 15:45:47.78196239 +0000 UTC m=+1247.475659220" lastFinishedPulling="2026-03-20 15:46:09.906960512 +0000 UTC m=+1269.600657342" observedRunningTime="2026-03-20 15:46:12.662087059 +0000 UTC m=+1272.355783889" watchObservedRunningTime="2026-03-20 15:46:12.662923031 +0000 UTC m=+1272.356619861" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.668957 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-dns-svc\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.669206 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-ovsdbserver-nb\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.669261 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gplx\" (UniqueName: \"kubernetes.io/projected/261f94f4-62eb-4777-9692-7e956509fe50-kube-api-access-4gplx\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.669370 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-config\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.703050 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c8b5948c9-ldpnx"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.730346 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5f88c8b7-s652j"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.754432 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5f88c8b7-s652j"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.767909 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74cfff8f4c-rcmqm"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.768061 3552 topology_manager.go:215] "Topology Admit Handler" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" podNamespace="openstack" podName="dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.769837 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.770658 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-config\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.770733 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-dns-svc\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.770766 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-ovsdbserver-nb\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.770804 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4gplx\" (UniqueName: \"kubernetes.io/projected/261f94f4-62eb-4777-9692-7e956509fe50-kube-api-access-4gplx\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.771828 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-config\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.772445 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-dns-svc\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.773104 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-ovsdbserver-nb\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.774554 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.778927 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.778985 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.779016 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.779814 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f985d384e5c2566938c7d880ef875555c77af84a50df3c9a9abd4000ed8661c"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.779973 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://7f985d384e5c2566938c7d880ef875555c77af84a50df3c9a9abd4000ed8661c" gracePeriod=600 Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.782803 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74cfff8f4c-rcmqm"] Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.791513 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gplx\" (UniqueName: \"kubernetes.io/projected/261f94f4-62eb-4777-9692-7e956509fe50-kube-api-access-4gplx\") pod \"dnsmasq-dns-6d98d94d89-w98z9\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.872182 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-config\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.872303 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-nb\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.872329 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-sb\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.872366 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxxz5\" (UniqueName: \"kubernetes.io/projected/706952c0-5b18-4aed-9ce1-d503bba5ba52-kube-api-access-nxxz5\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.872394 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-dns-svc\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.947724 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.974034 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-nb\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.974367 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-sb\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.974525 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nxxz5\" (UniqueName: \"kubernetes.io/projected/706952c0-5b18-4aed-9ce1-d503bba5ba52-kube-api-access-nxxz5\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.974682 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-dns-svc\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.974840 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-config\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.975901 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-config\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.976250 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-sb\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.976932 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-dns-svc\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.977246 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-nb\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:12 crc kubenswrapper[3552]: I0320 15:46:12.994639 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxxz5\" (UniqueName: \"kubernetes.io/projected/706952c0-5b18-4aed-9ce1-d503bba5ba52-kube-api-access-nxxz5\") pod \"dnsmasq-dns-74cfff8f4c-rcmqm\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:13 crc kubenswrapper[3552]: I0320 15:46:13.095037 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:13 crc kubenswrapper[3552]: I0320 15:46:13.439087 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4" path="/var/lib/kubelet/pods/6562700d-6d6a-4b9c-a0d4-bf2df2f0a0e4/volumes" Mar 20 15:46:16 crc kubenswrapper[3552]: I0320 15:46:16.614372 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"90a6e0ae-40a5-47b1-8495-26b369c628c4","Type":"ContainerStarted","Data":"51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6"} Mar 20 15:46:16 crc kubenswrapper[3552]: I0320 15:46:16.622188 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="7f985d384e5c2566938c7d880ef875555c77af84a50df3c9a9abd4000ed8661c" exitCode=0 Mar 20 15:46:16 crc kubenswrapper[3552]: I0320 15:46:16.622265 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"7f985d384e5c2566938c7d880ef875555c77af84a50df3c9a9abd4000ed8661c"} Mar 20 15:46:16 crc kubenswrapper[3552]: I0320 15:46:16.622314 3552 scope.go:117] "RemoveContainer" containerID="902094328c1e9daadeb5f64e0a47c03d126a08ca4a0366a4ea4ca5f17a2975d1" Mar 20 15:46:16 crc kubenswrapper[3552]: E0320 15:46:16.981657 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f7269f6_1d8b_488c_8061_7ab83faf1350.slice/crio-cfa895c0cc73b51f4f7d430d9a964c82b31cba56e6a4b2db4029d30819d07a2a.scope\": RecentStats: unable to find data in memory cache]" Mar 20 15:46:16 crc kubenswrapper[3552]: I0320 15:46:16.997690 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d98d94d89-w98z9"] Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.121276 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74cfff8f4c-rcmqm"] Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.187355 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-ztxhh"] Mar 20 15:46:17 crc kubenswrapper[3552]: W0320 15:46:17.242025 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod261f94f4_62eb_4777_9692_7e956509fe50.slice/crio-94e59dc21c4b86179dd3cfea0213d3954b24324bfb5219ffeff145de3a2db7d4 WatchSource:0}: Error finding container 94e59dc21c4b86179dd3cfea0213d3954b24324bfb5219ffeff145de3a2db7d4: Status 404 returned error can't find the container with id 94e59dc21c4b86179dd3cfea0213d3954b24324bfb5219ffeff145de3a2db7d4 Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.640986 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"887f0b69-2c6b-44fa-b0d4-0af7b2e89654","Type":"ContainerStarted","Data":"11abd0bb0354d18a891e9cf21b4ce4277f794b9f376d6d742cc65272dba88173"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.643954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-ztxhh" event={"ID":"9d5405cc-3ba4-46fd-b566-be99e325e65c","Type":"ContainerStarted","Data":"a750f490ea95a84b64dfcd11eac06d367f9e8d9914d6e231270eec928ea2733c"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.646747 3552 generic.go:334] "Generic (PLEG): container finished" podID="7f7269f6-1d8b-488c-8061-7ab83faf1350" containerID="cfa895c0cc73b51f4f7d430d9a964c82b31cba56e6a4b2db4029d30819d07a2a" exitCode=0 Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.646938 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" event={"ID":"7f7269f6-1d8b-488c-8061-7ab83faf1350","Type":"ContainerDied","Data":"cfa895c0cc73b51f4f7d430d9a964c82b31cba56e6a4b2db4029d30819d07a2a"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.649632 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"3e2f09fb5251918a00eedeadde2f6289e4a42a00c71de21ae5afa976b5070f51"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.650933 3552 generic.go:334] "Generic (PLEG): container finished" podID="2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" containerID="f3305534fa9a4724e032a1d04d41f86d90828c6f7eef88d51a34d24a9437484f" exitCode=0 Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.651004 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" event={"ID":"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8","Type":"ContainerDied","Data":"f3305534fa9a4724e032a1d04d41f86d90828c6f7eef88d51a34d24a9437484f"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.653327 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" event={"ID":"261f94f4-62eb-4777-9692-7e956509fe50","Type":"ContainerStarted","Data":"94e59dc21c4b86179dd3cfea0213d3954b24324bfb5219ffeff145de3a2db7d4"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.654219 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" event={"ID":"706952c0-5b18-4aed-9ce1-d503bba5ba52","Type":"ContainerStarted","Data":"fda3e1db6c6d9e91e3da95335c77cb2368261e71b2d7d37bf9d64092f0b97dae"} Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.655432 3552 generic.go:334] "Generic (PLEG): container finished" podID="14bd525a-47c6-4d2e-96e5-ce83567aacb7" containerID="3ae9d1ba1b3f517c3eb633d7a47c05e5ed7658d343f2fa2332a884767e44b24f" exitCode=0 Mar 20 15:46:17 crc kubenswrapper[3552]: I0320 15:46:17.655938 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-df4645f79-c568l" event={"ID":"14bd525a-47c6-4d2e-96e5-ce83567aacb7","Type":"ContainerDied","Data":"3ae9d1ba1b3f517c3eb633d7a47c05e5ed7658d343f2fa2332a884767e44b24f"} Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.040916 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.086294 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-dns-svc\") pod \"7f7269f6-1d8b-488c-8061-7ab83faf1350\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.087164 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-config\") pod \"7f7269f6-1d8b-488c-8061-7ab83faf1350\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.087244 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dt9lt\" (UniqueName: \"kubernetes.io/projected/7f7269f6-1d8b-488c-8061-7ab83faf1350-kube-api-access-dt9lt\") pod \"7f7269f6-1d8b-488c-8061-7ab83faf1350\" (UID: \"7f7269f6-1d8b-488c-8061-7ab83faf1350\") " Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.096455 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f7269f6-1d8b-488c-8061-7ab83faf1350-kube-api-access-dt9lt" (OuterVolumeSpecName: "kube-api-access-dt9lt") pod "7f7269f6-1d8b-488c-8061-7ab83faf1350" (UID: "7f7269f6-1d8b-488c-8061-7ab83faf1350"). InnerVolumeSpecName "kube-api-access-dt9lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.149568 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-config" (OuterVolumeSpecName: "config") pod "7f7269f6-1d8b-488c-8061-7ab83faf1350" (UID: "7f7269f6-1d8b-488c-8061-7ab83faf1350"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.167901 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7f7269f6-1d8b-488c-8061-7ab83faf1350" (UID: "7f7269f6-1d8b-488c-8061-7ab83faf1350"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.190128 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.190175 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f7269f6-1d8b-488c-8061-7ab83faf1350-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.190194 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-dt9lt\" (UniqueName: \"kubernetes.io/projected/7f7269f6-1d8b-488c-8061-7ab83faf1350-kube-api-access-dt9lt\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.594548 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.677600 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" event={"ID":"7f7269f6-1d8b-488c-8061-7ab83faf1350","Type":"ContainerDied","Data":"e8282289a702f997b831f916808a715959a66d141c1900f61b0560563dca3a2b"} Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.677665 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5877d5c6c-2vg4l" Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.690525 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f1e0de34-e3ac-4691-94c4-d5ac03353099","Type":"ContainerStarted","Data":"d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021"} Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.797040 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5877d5c6c-2vg4l"] Mar 20 15:46:18 crc kubenswrapper[3552]: I0320 15:46:18.810381 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5877d5c6c-2vg4l"] Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.445742 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f7269f6-1d8b-488c-8061-7ab83faf1350" path="/var/lib/kubelet/pods/7f7269f6-1d8b-488c-8061-7ab83faf1350/volumes" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.698686 3552 generic.go:334] "Generic (PLEG): container finished" podID="b5c00f51-e5f5-4edb-8998-e4463051ecac" containerID="2e5157249cf717e6416fd1979b039c07fe2e74ae97043cfe1289d09a11533a30" exitCode=0 Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.698753 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b5c00f51-e5f5-4edb-8998-e4463051ecac","Type":"ContainerDied","Data":"2e5157249cf717e6416fd1979b039c07fe2e74ae97043cfe1289d09a11533a30"} Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.746831 3552 scope.go:117] "RemoveContainer" containerID="cfa895c0cc73b51f4f7d430d9a964c82b31cba56e6a4b2db4029d30819d07a2a" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.765252 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.771022 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.921877 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqbrk\" (UniqueName: \"kubernetes.io/projected/14bd525a-47c6-4d2e-96e5-ce83567aacb7-kube-api-access-sqbrk\") pod \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.922042 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-config\") pod \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.922109 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-dns-svc\") pod \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.922141 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmjts\" (UniqueName: \"kubernetes.io/projected/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-kube-api-access-lmjts\") pod \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.922192 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-config\") pod \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\" (UID: \"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8\") " Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.922266 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-dns-svc\") pod \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\" (UID: \"14bd525a-47c6-4d2e-96e5-ce83567aacb7\") " Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.928741 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-kube-api-access-lmjts" (OuterVolumeSpecName: "kube-api-access-lmjts") pod "2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" (UID: "2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8"). InnerVolumeSpecName "kube-api-access-lmjts". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.938707 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14bd525a-47c6-4d2e-96e5-ce83567aacb7-kube-api-access-sqbrk" (OuterVolumeSpecName: "kube-api-access-sqbrk") pod "14bd525a-47c6-4d2e-96e5-ce83567aacb7" (UID: "14bd525a-47c6-4d2e-96e5-ce83567aacb7"). InnerVolumeSpecName "kube-api-access-sqbrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.948448 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "14bd525a-47c6-4d2e-96e5-ce83567aacb7" (UID: "14bd525a-47c6-4d2e-96e5-ce83567aacb7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.956434 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-config" (OuterVolumeSpecName: "config") pod "14bd525a-47c6-4d2e-96e5-ce83567aacb7" (UID: "14bd525a-47c6-4d2e-96e5-ce83567aacb7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.958915 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" (UID: "2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:19 crc kubenswrapper[3552]: I0320 15:46:19.967236 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-config" (OuterVolumeSpecName: "config") pod "2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" (UID: "2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.025379 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.025442 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-lmjts\" (UniqueName: \"kubernetes.io/projected/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-kube-api-access-lmjts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.025459 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.025475 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.025490 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-sqbrk\" (UniqueName: \"kubernetes.io/projected/14bd525a-47c6-4d2e-96e5-ce83567aacb7-kube-api-access-sqbrk\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.025505 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14bd525a-47c6-4d2e-96e5-ce83567aacb7-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.706726 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-df4645f79-c568l" event={"ID":"14bd525a-47c6-4d2e-96e5-ce83567aacb7","Type":"ContainerDied","Data":"feb1df21df52d8dec8cbaaf85ec9d8bc7f5b66ac3658da86796833684c087d9d"} Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.706759 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-df4645f79-c568l" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.708874 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerStarted","Data":"8b1aca7237e1fe93905bf1dba4286ca6f021a7ed41f1700eac9be0ffda92d03d"} Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.714234 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" event={"ID":"2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8","Type":"ContainerDied","Data":"eadab69fe9268c34df51cec198b2bffb5b8ff72a5708ca140e12b031bf70ac84"} Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.714281 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c8b5948c9-ldpnx" Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.807515 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c8b5948c9-ldpnx"] Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.826399 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c8b5948c9-ldpnx"] Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.838888 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-df4645f79-c568l"] Mar 20 15:46:20 crc kubenswrapper[3552]: I0320 15:46:20.844104 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-df4645f79-c568l"] Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.211919 3552 scope.go:117] "RemoveContainer" containerID="3ae9d1ba1b3f517c3eb633d7a47c05e5ed7658d343f2fa2332a884767e44b24f" Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.371790 3552 scope.go:117] "RemoveContainer" containerID="f3305534fa9a4724e032a1d04d41f86d90828c6f7eef88d51a34d24a9437484f" Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.445223 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14bd525a-47c6-4d2e-96e5-ce83567aacb7" path="/var/lib/kubelet/pods/14bd525a-47c6-4d2e-96e5-ce83567aacb7/volumes" Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.446500 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" path="/var/lib/kubelet/pods/2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8/volumes" Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.726367 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b5c00f51-e5f5-4edb-8998-e4463051ecac","Type":"ContainerStarted","Data":"46319061018bfe1547da2f7b7cd71007bfc25646f248415f486530481e093451"} Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.734676 3552 generic.go:334] "Generic (PLEG): container finished" podID="887f0b69-2c6b-44fa-b0d4-0af7b2e89654" containerID="11abd0bb0354d18a891e9cf21b4ce4277f794b9f376d6d742cc65272dba88173" exitCode=0 Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.734771 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"887f0b69-2c6b-44fa-b0d4-0af7b2e89654","Type":"ContainerDied","Data":"11abd0bb0354d18a891e9cf21b4ce4277f794b9f376d6d742cc65272dba88173"} Mar 20 15:46:21 crc kubenswrapper[3552]: I0320 15:46:21.762115 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=19.914848902 podStartE2EDuration="41.762045298s" podCreationTimestamp="2026-03-20 15:45:40 +0000 UTC" firstStartedPulling="2026-03-20 15:45:47.695616489 +0000 UTC m=+1247.389313319" lastFinishedPulling="2026-03-20 15:46:09.542812885 +0000 UTC m=+1269.236509715" observedRunningTime="2026-03-20 15:46:21.754306971 +0000 UTC m=+1281.448003811" watchObservedRunningTime="2026-03-20 15:46:21.762045298 +0000 UTC m=+1281.455742128" Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.092921 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.092975 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.744789 3552 generic.go:334] "Generic (PLEG): container finished" podID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerID="82b3e5724ad7e853f871e22ac1c5814773597bcfd0102fdca864ce9614a293d8" exitCode=0 Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.744911 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" event={"ID":"706952c0-5b18-4aed-9ce1-d503bba5ba52","Type":"ContainerDied","Data":"82b3e5724ad7e853f871e22ac1c5814773597bcfd0102fdca864ce9614a293d8"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.750099 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"887f0b69-2c6b-44fa-b0d4-0af7b2e89654","Type":"ContainerStarted","Data":"3af4038d69bd0cc70f7f01d2e48d3d1f5723c9e739cdf72c5b36bdee3b0199f8"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.761711 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf" event={"ID":"854348bd-6351-4ba6-82c7-664311074caf","Type":"ContainerStarted","Data":"3901e8bd9668ef5a756cd2d80d95b27917714106c011fa513cb1ff732864bdbc"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.762690 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-c89mf" Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.790655 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e4d74fff-0397-4b4c-ac20-6fd72086c84a","Type":"ContainerStarted","Data":"f8440e0b92d487b425febaec25a711e6b08f0a0c581d1ace01b65cc7188f65b7"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.799681 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08","Type":"ContainerStarted","Data":"48f7b0c1da1c280808572dd786e940431124bf1f793dacd1026316197fe00949"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.800575 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=19.277563022 podStartE2EDuration="41.800544586s" podCreationTimestamp="2026-03-20 15:45:41 +0000 UTC" firstStartedPulling="2026-03-20 15:45:47.408270739 +0000 UTC m=+1247.101967569" lastFinishedPulling="2026-03-20 15:46:09.931252303 +0000 UTC m=+1269.624949133" observedRunningTime="2026-03-20 15:46:22.796382934 +0000 UTC m=+1282.490079784" watchObservedRunningTime="2026-03-20 15:46:22.800544586 +0000 UTC m=+1282.494241416" Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.806269 3552 generic.go:334] "Generic (PLEG): container finished" podID="261f94f4-62eb-4777-9692-7e956509fe50" containerID="74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b" exitCode=0 Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.806358 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" event={"ID":"261f94f4-62eb-4777-9692-7e956509fe50","Type":"ContainerDied","Data":"74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.810568 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"788065ca-23a0-4317-bff9-f6503f659aee","Type":"ContainerStarted","Data":"dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.812985 3552 generic.go:334] "Generic (PLEG): container finished" podID="af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0" containerID="5505e4bce0650de09e2405f0f03ab308e9653d7bf926e1de3c6fb6ff62d8259a" exitCode=0 Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.813489 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qrf8z" event={"ID":"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0","Type":"ContainerDied","Data":"5505e4bce0650de09e2405f0f03ab308e9653d7bf926e1de3c6fb6ff62d8259a"} Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.819763 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovn-controller-c89mf" podStartSLOduration=23.603174633 podStartE2EDuration="33.819711259s" podCreationTimestamp="2026-03-20 15:45:49 +0000 UTC" firstStartedPulling="2026-03-20 15:46:11.249341414 +0000 UTC m=+1270.943038244" lastFinishedPulling="2026-03-20 15:46:21.46587804 +0000 UTC m=+1281.159574870" observedRunningTime="2026-03-20 15:46:22.812742592 +0000 UTC m=+1282.506439442" watchObservedRunningTime="2026-03-20 15:46:22.819711259 +0000 UTC m=+1282.513408089" Mar 20 15:46:22 crc kubenswrapper[3552]: I0320 15:46:22.854013 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=5.182999222 podStartE2EDuration="38.853954255s" podCreationTimestamp="2026-03-20 15:45:44 +0000 UTC" firstStartedPulling="2026-03-20 15:45:47.584915826 +0000 UTC m=+1247.278612656" lastFinishedPulling="2026-03-20 15:46:21.255870859 +0000 UTC m=+1280.949567689" observedRunningTime="2026-03-20 15:46:22.847232555 +0000 UTC m=+1282.540929385" watchObservedRunningTime="2026-03-20 15:46:22.853954255 +0000 UTC m=+1282.547651085" Mar 20 15:46:23 crc kubenswrapper[3552]: I0320 15:46:23.406645 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Mar 20 15:46:23 crc kubenswrapper[3552]: I0320 15:46:23.407230 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Mar 20 15:46:23 crc kubenswrapper[3552]: I0320 15:46:23.825962 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.834109 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e4d74fff-0397-4b4c-ac20-6fd72086c84a","Type":"ContainerStarted","Data":"ea1fa79e37fcffdb331f06a317ce3527504b5f208c3b7388c64d480854f1259b"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.836583 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a03ab7e3-7def-4bd8-9ec5-93e9b3098b08","Type":"ContainerStarted","Data":"27fd4173dd46feaef74f885b0561b14c3309b8e63a2be5c13ab7018e3c55713b"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.838333 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" event={"ID":"261f94f4-62eb-4777-9692-7e956509fe50","Type":"ContainerStarted","Data":"bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.840105 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qrf8z" event={"ID":"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0","Type":"ContainerStarted","Data":"995ab423cec1bb3b560c4fcceb5198e61aaa1b0a61cac671364d05db8b0d382a"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.842104 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" event={"ID":"706952c0-5b18-4aed-9ce1-d503bba5ba52","Type":"ContainerStarted","Data":"5b17712d7ec03f53a3d465ead2e18a221c90d07dfb43ba8a9efc89a8126433d3"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.843537 3552 generic.go:334] "Generic (PLEG): container finished" podID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerID="8b1aca7237e1fe93905bf1dba4286ca6f021a7ed41f1700eac9be0ffda92d03d" exitCode=0 Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.843643 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerDied","Data":"8b1aca7237e1fe93905bf1dba4286ca6f021a7ed41f1700eac9be0ffda92d03d"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.846168 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-ztxhh" event={"ID":"9d5405cc-3ba4-46fd-b566-be99e325e65c","Type":"ContainerStarted","Data":"6190bf115f1abbc1986becd11b955048ffafd59b5000f0a1b86372d8f007c67f"} Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.856531 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=20.982252245 podStartE2EDuration="34.856487006s" podCreationTimestamp="2026-03-20 15:45:50 +0000 UTC" firstStartedPulling="2026-03-20 15:46:10.177111024 +0000 UTC m=+1269.870807854" lastFinishedPulling="2026-03-20 15:46:24.051345785 +0000 UTC m=+1283.745042615" observedRunningTime="2026-03-20 15:46:24.853448085 +0000 UTC m=+1284.547144935" watchObservedRunningTime="2026-03-20 15:46:24.856487006 +0000 UTC m=+1284.550183836" Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.928817 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" podStartSLOduration=12.928779081 podStartE2EDuration="12.928779081s" podCreationTimestamp="2026-03-20 15:46:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:24.925863013 +0000 UTC m=+1284.619559863" watchObservedRunningTime="2026-03-20 15:46:24.928779081 +0000 UTC m=+1284.622475911" Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.961482 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.984616 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-ztxhh" podStartSLOduration=6.185483845 podStartE2EDuration="12.984574555s" podCreationTimestamp="2026-03-20 15:46:12 +0000 UTC" firstStartedPulling="2026-03-20 15:46:17.251626388 +0000 UTC m=+1276.945323218" lastFinishedPulling="2026-03-20 15:46:24.050717108 +0000 UTC m=+1283.744413928" observedRunningTime="2026-03-20 15:46:24.982662994 +0000 UTC m=+1284.676359834" watchObservedRunningTime="2026-03-20 15:46:24.984574555 +0000 UTC m=+1284.678271385" Mar 20 15:46:24 crc kubenswrapper[3552]: I0320 15:46:24.985222 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=24.186366047 podStartE2EDuration="36.985196412s" podCreationTimestamp="2026-03-20 15:45:48 +0000 UTC" firstStartedPulling="2026-03-20 15:46:11.260297048 +0000 UTC m=+1270.953993878" lastFinishedPulling="2026-03-20 15:46:24.059127413 +0000 UTC m=+1283.752824243" observedRunningTime="2026-03-20 15:46:24.956128543 +0000 UTC m=+1284.649825373" watchObservedRunningTime="2026-03-20 15:46:24.985196412 +0000 UTC m=+1284.678893232" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.004629 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" podStartSLOduration=13.00457783 podStartE2EDuration="13.00457783s" podCreationTimestamp="2026-03-20 15:46:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:25.002277789 +0000 UTC m=+1284.695974629" watchObservedRunningTime="2026-03-20 15:46:25.00457783 +0000 UTC m=+1284.698274660" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.071350 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.354943 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.380719 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d98d94d89-w98z9"] Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.424049 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d58c49d99-lznpf"] Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.424209 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" podNamespace="openstack" podName="dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: E0320 15:46:25.424412 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="14bd525a-47c6-4d2e-96e5-ce83567aacb7" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.424422 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="14bd525a-47c6-4d2e-96e5-ce83567aacb7" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: E0320 15:46:25.424445 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7f7269f6-1d8b-488c-8061-7ab83faf1350" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.424451 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f7269f6-1d8b-488c-8061-7ab83faf1350" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: E0320 15:46:25.424463 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.424468 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.430047 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="14bd525a-47c6-4d2e-96e5-ce83567aacb7" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.430079 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f12bd5b-3ff4-4fd8-a0a8-6fda28855da8" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.430087 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f7269f6-1d8b-488c-8061-7ab83faf1350" containerName="init" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.431090 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.440787 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-nb\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.440830 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-sb\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.440858 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-config\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.440944 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ktss\" (UniqueName: \"kubernetes.io/projected/8f102abe-1535-4233-ba10-e6ce2e4daa29-kube-api-access-8ktss\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.440991 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-dns-svc\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.494373 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d58c49d99-lznpf"] Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.543218 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-nb\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.543269 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-sb\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.543318 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-config\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.543374 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8ktss\" (UniqueName: \"kubernetes.io/projected/8f102abe-1535-4233-ba10-e6ce2e4daa29-kube-api-access-8ktss\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.543419 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-dns-svc\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.544237 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-dns-svc\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.544872 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-sb\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.545534 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-nb\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.552605 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-config\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.582378 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ktss\" (UniqueName: \"kubernetes.io/projected/8f102abe-1535-4233-ba10-e6ce2e4daa29-kube-api-access-8ktss\") pod \"dnsmasq-dns-7d58c49d99-lznpf\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.832603 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.853499 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qrf8z" event={"ID":"af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0","Type":"ContainerStarted","Data":"09378d066ceefe5a5b24b9e3101f61ee1e3c2468136c8314e39c1dce76cf423c"} Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.854765 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.854812 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.855099 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:25 crc kubenswrapper[3552]: I0320 15:46:25.876579 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-qrf8z" podStartSLOduration=25.762760448 podStartE2EDuration="36.87653064s" podCreationTimestamp="2026-03-20 15:45:49 +0000 UTC" firstStartedPulling="2026-03-20 15:46:10.102839526 +0000 UTC m=+1269.796536356" lastFinishedPulling="2026-03-20 15:46:21.216609718 +0000 UTC m=+1280.910306548" observedRunningTime="2026-03-20 15:46:25.869792 +0000 UTC m=+1285.563488850" watchObservedRunningTime="2026-03-20 15:46:25.87653064 +0000 UTC m=+1285.570227470" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.354766 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.383279 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d58c49d99-lznpf"] Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.578374 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.578884 3552 topology_manager.go:215] "Topology Admit Handler" podUID="dd24d70f-864e-4803-8e8c-9d9e5aadfa84" podNamespace="openstack" podName="swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.584758 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: W0320 15:46:26.587365 3552 reflector.go:539] object-"openstack"/"swift-ring-files": failed to list *v1.ConfigMap: configmaps "swift-ring-files" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: E0320 15:46:26.587415 3552 reflector.go:147] object-"openstack"/"swift-ring-files": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "swift-ring-files" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: W0320 15:46:26.587453 3552 reflector.go:539] object-"openstack"/"swift-conf": failed to list *v1.Secret: secrets "swift-conf" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: E0320 15:46:26.587464 3552 reflector.go:147] object-"openstack"/"swift-conf": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "swift-conf" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: W0320 15:46:26.587930 3552 reflector.go:539] object-"openstack"/"swift-swift-dockercfg-bwz6c": failed to list *v1.Secret: secrets "swift-swift-dockercfg-bwz6c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: E0320 15:46:26.587950 3552 reflector.go:147] object-"openstack"/"swift-swift-dockercfg-bwz6c": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "swift-swift-dockercfg-bwz6c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: W0320 15:46:26.587977 3552 reflector.go:539] object-"openstack"/"swift-storage-config-data": failed to list *v1.ConfigMap: configmaps "swift-storage-config-data" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: E0320 15:46:26.587988 3552 reflector.go:147] object-"openstack"/"swift-storage-config-data": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "swift-storage-config-data" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.643160 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.686232 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.767722 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.767771 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.767804 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhzjf\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-kube-api-access-qhzjf\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.767845 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-lock\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.767884 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.767908 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-cache\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.861805 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" event={"ID":"8f102abe-1535-4233-ba10-e6ce2e4daa29","Type":"ContainerStarted","Data":"970f573e43c78ddf2b5f665667cfb9072cb18729d3dba783425406ad02525db2"} Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.863426 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.863532 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.862895 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" podUID="261f94f4-62eb-4777-9692-7e956509fe50" containerName="dnsmasq-dns" containerID="cri-o://bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051" gracePeriod=10 Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.869957 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-lock\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.870038 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.870075 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-cache\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.870146 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.870172 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.870203 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qhzjf\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-kube-api-access-qhzjf\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.870884 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-lock\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.872531 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-cache\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.873151 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.877390 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.889286 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhzjf\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-kube-api-access-qhzjf\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.917388 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.953046 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Mar 20 15:46:26 crc kubenswrapper[3552]: I0320 15:46:26.987511 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.121897 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-q5q9g"] Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.122045 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7961927b-2515-44c1-b350-16985a6c6c73" podNamespace="openstack" podName="swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.123051 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.125719 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.125974 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.128551 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.136487 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-q5q9g"] Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.233084 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.233243 3552 topology_manager.go:215] "Topology Admit Handler" podUID="74241f0f-72bf-49b6-b849-11361c3b86e5" podNamespace="openstack" podName="ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.240230 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.247819 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.248118 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.248276 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-5pj2m" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.248521 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.248662 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.259904 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292353 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k254\" (UniqueName: \"kubernetes.io/projected/7961927b-2515-44c1-b350-16985a6c6c73-kube-api-access-5k254\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292721 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-scripts\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292775 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-dispersionconf\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292798 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-combined-ca-bundle\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292826 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7961927b-2515-44c1-b350-16985a6c6c73-etc-swift\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292858 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-ring-data-devices\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.292885 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-swiftconf\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394273 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-scripts\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394344 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394383 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-dispersionconf\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394444 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-combined-ca-bundle\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394465 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4ttx\" (UniqueName: \"kubernetes.io/projected/74241f0f-72bf-49b6-b849-11361c3b86e5-kube-api-access-h4ttx\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394487 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7961927b-2515-44c1-b350-16985a6c6c73-etc-swift\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394513 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394530 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394556 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-ring-data-devices\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394595 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-swiftconf\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394619 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74241f0f-72bf-49b6-b849-11361c3b86e5-scripts\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394656 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74241f0f-72bf-49b6-b849-11361c3b86e5-config\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394707 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/74241f0f-72bf-49b6-b849-11361c3b86e5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.394732 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5k254\" (UniqueName: \"kubernetes.io/projected/7961927b-2515-44c1-b350-16985a6c6c73-kube-api-access-5k254\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.395986 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-ring-data-devices\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.396776 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-scripts\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.396790 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7961927b-2515-44c1-b350-16985a6c6c73-etc-swift\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.426076 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-combined-ca-bundle\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.426081 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-dispersionconf\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.431082 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k254\" (UniqueName: \"kubernetes.io/projected/7961927b-2515-44c1-b350-16985a6c6c73-kube-api-access-5k254\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496365 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496562 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496636 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-h4ttx\" (UniqueName: \"kubernetes.io/projected/74241f0f-72bf-49b6-b849-11361c3b86e5-kube-api-access-h4ttx\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496675 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496695 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496743 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74241f0f-72bf-49b6-b849-11361c3b86e5-scripts\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496790 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74241f0f-72bf-49b6-b849-11361c3b86e5-config\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.496822 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/74241f0f-72bf-49b6-b849-11361c3b86e5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.498039 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74241f0f-72bf-49b6-b849-11361c3b86e5-scripts\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.498783 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74241f0f-72bf-49b6-b849-11361c3b86e5-config\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.499035 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/74241f0f-72bf-49b6-b849-11361c3b86e5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.503567 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.505424 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.506746 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74241f0f-72bf-49b6-b849-11361c3b86e5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.536136 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4ttx\" (UniqueName: \"kubernetes.io/projected/74241f0f-72bf-49b6-b849-11361c3b86e5-kube-api-access-h4ttx\") pod \"ovn-northd-0\" (UID: \"74241f0f-72bf-49b6-b849-11361c3b86e5\") " pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.561260 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-bwz6c" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.562744 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.572470 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.598051 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gplx\" (UniqueName: \"kubernetes.io/projected/261f94f4-62eb-4777-9692-7e956509fe50-kube-api-access-4gplx\") pod \"261f94f4-62eb-4777-9692-7e956509fe50\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.598116 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-dns-svc\") pod \"261f94f4-62eb-4777-9692-7e956509fe50\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.598207 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-ovsdbserver-nb\") pod \"261f94f4-62eb-4777-9692-7e956509fe50\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.598368 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-config\") pod \"261f94f4-62eb-4777-9692-7e956509fe50\" (UID: \"261f94f4-62eb-4777-9692-7e956509fe50\") " Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.614708 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/261f94f4-62eb-4777-9692-7e956509fe50-kube-api-access-4gplx" (OuterVolumeSpecName: "kube-api-access-4gplx") pod "261f94f4-62eb-4777-9692-7e956509fe50" (UID: "261f94f4-62eb-4777-9692-7e956509fe50"). InnerVolumeSpecName "kube-api-access-4gplx". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.655713 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "261f94f4-62eb-4777-9692-7e956509fe50" (UID: "261f94f4-62eb-4777-9692-7e956509fe50"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.662702 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-config" (OuterVolumeSpecName: "config") pod "261f94f4-62eb-4777-9692-7e956509fe50" (UID: "261f94f4-62eb-4777-9692-7e956509fe50"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.677653 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.697154 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "261f94f4-62eb-4777-9692-7e956509fe50" (UID: "261f94f4-62eb-4777-9692-7e956509fe50"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.701939 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.701973 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.701988 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-4gplx\" (UniqueName: \"kubernetes.io/projected/261f94f4-62eb-4777-9692-7e956509fe50-kube-api-access-4gplx\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.701998 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/261f94f4-62eb-4777-9692-7e956509fe50-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.732381 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.739837 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-swiftconf\") pod \"swift-ring-rebalance-q5q9g\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.747745 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.859681 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Mar 20 15:46:27 crc kubenswrapper[3552]: E0320 15:46:27.865074 3552 projected.go:294] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Mar 20 15:46:27 crc kubenswrapper[3552]: E0320 15:46:27.865094 3552 projected.go:200] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Mar 20 15:46:27 crc kubenswrapper[3552]: E0320 15:46:27.865172 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift podName:dd24d70f-864e-4803-8e8c-9d9e5aadfa84 nodeName:}" failed. No retries permitted until 2026-03-20 15:46:28.365155309 +0000 UTC m=+1288.058852139 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift") pod "swift-storage-0" (UID: "dd24d70f-864e-4803-8e8c-9d9e5aadfa84") : configmap "swift-ring-files" not found Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.872612 3552 generic.go:334] "Generic (PLEG): container finished" podID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerID="b794f0b5d166ba4f465b9a326003f8e3d1ff9f6c1a8e7d2cfc504c8a39f60274" exitCode=0 Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.872809 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" event={"ID":"8f102abe-1535-4233-ba10-e6ce2e4daa29","Type":"ContainerDied","Data":"b794f0b5d166ba4f465b9a326003f8e3d1ff9f6c1a8e7d2cfc504c8a39f60274"} Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.881098 3552 generic.go:334] "Generic (PLEG): container finished" podID="261f94f4-62eb-4777-9692-7e956509fe50" containerID="bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051" exitCode=0 Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.881293 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.881316 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" event={"ID":"261f94f4-62eb-4777-9692-7e956509fe50","Type":"ContainerDied","Data":"bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051"} Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.896595 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d98d94d89-w98z9" event={"ID":"261f94f4-62eb-4777-9692-7e956509fe50","Type":"ContainerDied","Data":"94e59dc21c4b86179dd3cfea0213d3954b24324bfb5219ffeff145de3a2db7d4"} Mar 20 15:46:27 crc kubenswrapper[3552]: I0320 15:46:27.896653 3552 scope.go:117] "RemoveContainer" containerID="bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.017545 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d98d94d89-w98z9"] Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.045253 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d98d94d89-w98z9"] Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.074673 3552 scope.go:117] "RemoveContainer" containerID="74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.077677 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Mar 20 15:46:28 crc kubenswrapper[3552]: W0320 15:46:28.086684 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74241f0f_72bf_49b6_b849_11361c3b86e5.slice/crio-3c6980a8458dde7ecdd9db1fa2b4e20f51bc5aea53fc4915d96c35040993de60 WatchSource:0}: Error finding container 3c6980a8458dde7ecdd9db1fa2b4e20f51bc5aea53fc4915d96c35040993de60: Status 404 returned error can't find the container with id 3c6980a8458dde7ecdd9db1fa2b4e20f51bc5aea53fc4915d96c35040993de60 Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.143216 3552 scope.go:117] "RemoveContainer" containerID="bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051" Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.144126 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051\": container with ID starting with bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051 not found: ID does not exist" containerID="bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.144180 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051"} err="failed to get container status \"bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051\": rpc error: code = NotFound desc = could not find container \"bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051\": container with ID starting with bf8b2bf968baa5fa941ebedccb1d1dae54447ee8ebe943a6dd17c330efb58051 not found: ID does not exist" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.144195 3552 scope.go:117] "RemoveContainer" containerID="74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b" Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.147475 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b\": container with ID starting with 74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b not found: ID does not exist" containerID="74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.147521 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b"} err="failed to get container status \"74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b\": rpc error: code = NotFound desc = could not find container \"74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b\": container with ID starting with 74c46e4970fefdd3152277384303520e95e73110c275fdee8a1ce80fd4774d8b not found: ID does not exist" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.221858 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-q5q9g"] Mar 20 15:46:28 crc kubenswrapper[3552]: W0320 15:46:28.227363 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7961927b_2515_44c1_b350_16985a6c6c73.slice/crio-2e0397bcfa163cb011d859f144ac024e731b4aae59afbda7abf2828147bb17f1 WatchSource:0}: Error finding container 2e0397bcfa163cb011d859f144ac024e731b4aae59afbda7abf2828147bb17f1: Status 404 returned error can't find the container with id 2e0397bcfa163cb011d859f144ac024e731b4aae59afbda7abf2828147bb17f1 Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.434558 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.434729 3552 projected.go:294] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.434996 3552 projected.go:200] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.435049 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift podName:dd24d70f-864e-4803-8e8c-9d9e5aadfa84 nodeName:}" failed. No retries permitted until 2026-03-20 15:46:29.435033533 +0000 UTC m=+1289.128730363 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift") pod "swift-storage-0" (UID: "dd24d70f-864e-4803-8e8c-9d9e5aadfa84") : configmap "swift-ring-files" not found Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.836865 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-6cf8-account-create-update-n8mhq"] Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.837004 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8dc74a35-3b06-4f41-9684-f35bbdd1078b" podNamespace="openstack" podName="glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.837183 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="261f94f4-62eb-4777-9692-7e956509fe50" containerName="dnsmasq-dns" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.837197 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="261f94f4-62eb-4777-9692-7e956509fe50" containerName="dnsmasq-dns" Mar 20 15:46:28 crc kubenswrapper[3552]: E0320 15:46:28.837227 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="261f94f4-62eb-4777-9692-7e956509fe50" containerName="init" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.837235 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="261f94f4-62eb-4777-9692-7e956509fe50" containerName="init" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.837381 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="261f94f4-62eb-4777-9692-7e956509fe50" containerName="dnsmasq-dns" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.839281 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.841963 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.851339 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6cf8-account-create-update-n8mhq"] Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.878993 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-4t82z"] Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.879195 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3121cfff-17ad-42d7-b737-f68b3de66a9f" podNamespace="openstack" podName="glance-db-create-4t82z" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.880344 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4t82z" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.888823 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-4t82z"] Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.901862 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-q5q9g" event={"ID":"7961927b-2515-44c1-b350-16985a6c6c73","Type":"ContainerStarted","Data":"2e0397bcfa163cb011d859f144ac024e731b4aae59afbda7abf2828147bb17f1"} Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.906928 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"74241f0f-72bf-49b6-b849-11361c3b86e5","Type":"ContainerStarted","Data":"3c6980a8458dde7ecdd9db1fa2b4e20f51bc5aea53fc4915d96c35040993de60"} Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.908630 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" event={"ID":"8f102abe-1535-4233-ba10-e6ce2e4daa29","Type":"ContainerStarted","Data":"c8979f267bfa243f15c7fa737169c6c8237493db03fa21d44455302f3ce8b80e"} Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.936163 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" podStartSLOduration=3.936106046 podStartE2EDuration="3.936106046s" podCreationTimestamp="2026-03-20 15:46:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:28.92914972 +0000 UTC m=+1288.622846550" watchObservedRunningTime="2026-03-20 15:46:28.936106046 +0000 UTC m=+1288.629802876" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.942379 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3121cfff-17ad-42d7-b737-f68b3de66a9f-operator-scripts\") pod \"glance-db-create-4t82z\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " pod="openstack/glance-db-create-4t82z" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.942477 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jlhf\" (UniqueName: \"kubernetes.io/projected/8dc74a35-3b06-4f41-9684-f35bbdd1078b-kube-api-access-8jlhf\") pod \"glance-6cf8-account-create-update-n8mhq\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.942872 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5grm\" (UniqueName: \"kubernetes.io/projected/3121cfff-17ad-42d7-b737-f68b3de66a9f-kube-api-access-j5grm\") pod \"glance-db-create-4t82z\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " pod="openstack/glance-db-create-4t82z" Mar 20 15:46:28 crc kubenswrapper[3552]: I0320 15:46:28.942913 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8dc74a35-3b06-4f41-9684-f35bbdd1078b-operator-scripts\") pod \"glance-6cf8-account-create-update-n8mhq\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.044452 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8jlhf\" (UniqueName: \"kubernetes.io/projected/8dc74a35-3b06-4f41-9684-f35bbdd1078b-kube-api-access-8jlhf\") pod \"glance-6cf8-account-create-update-n8mhq\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.044617 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j5grm\" (UniqueName: \"kubernetes.io/projected/3121cfff-17ad-42d7-b737-f68b3de66a9f-kube-api-access-j5grm\") pod \"glance-db-create-4t82z\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " pod="openstack/glance-db-create-4t82z" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.044652 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8dc74a35-3b06-4f41-9684-f35bbdd1078b-operator-scripts\") pod \"glance-6cf8-account-create-update-n8mhq\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.044980 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3121cfff-17ad-42d7-b737-f68b3de66a9f-operator-scripts\") pod \"glance-db-create-4t82z\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " pod="openstack/glance-db-create-4t82z" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.045767 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3121cfff-17ad-42d7-b737-f68b3de66a9f-operator-scripts\") pod \"glance-db-create-4t82z\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " pod="openstack/glance-db-create-4t82z" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.046245 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8dc74a35-3b06-4f41-9684-f35bbdd1078b-operator-scripts\") pod \"glance-6cf8-account-create-update-n8mhq\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.063679 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jlhf\" (UniqueName: \"kubernetes.io/projected/8dc74a35-3b06-4f41-9684-f35bbdd1078b-kube-api-access-8jlhf\") pod \"glance-6cf8-account-create-update-n8mhq\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.064103 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5grm\" (UniqueName: \"kubernetes.io/projected/3121cfff-17ad-42d7-b737-f68b3de66a9f-kube-api-access-j5grm\") pod \"glance-db-create-4t82z\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " pod="openstack/glance-db-create-4t82z" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.152645 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.207096 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4t82z" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.439375 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="261f94f4-62eb-4777-9692-7e956509fe50" path="/var/lib/kubelet/pods/261f94f4-62eb-4777-9692-7e956509fe50/volumes" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.451519 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:29 crc kubenswrapper[3552]: E0320 15:46:29.451729 3552 projected.go:294] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Mar 20 15:46:29 crc kubenswrapper[3552]: E0320 15:46:29.451740 3552 projected.go:200] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Mar 20 15:46:29 crc kubenswrapper[3552]: E0320 15:46:29.451782 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift podName:dd24d70f-864e-4803-8e8c-9d9e5aadfa84 nodeName:}" failed. No retries permitted until 2026-03-20 15:46:31.451768649 +0000 UTC m=+1291.145465479 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift") pod "swift-storage-0" (UID: "dd24d70f-864e-4803-8e8c-9d9e5aadfa84") : configmap "swift-ring-files" not found Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.601061 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.697102 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6cf8-account-create-update-n8mhq"] Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.760941 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.925324 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6cf8-account-create-update-n8mhq" event={"ID":"8dc74a35-3b06-4f41-9684-f35bbdd1078b","Type":"ContainerStarted","Data":"4fb0154c63dea6091d6b92e452d33d9565c47a345566ad1423ccd189ab7ded87"} Mar 20 15:46:29 crc kubenswrapper[3552]: I0320 15:46:29.925792 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.034115 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-4t82z"] Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.657020 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-m87nb"] Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.657189 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e628a45b-693e-4699-b6f8-88a01dfd6ae7" podNamespace="openstack" podName="root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.658026 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.660343 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.666915 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-m87nb"] Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.782257 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e628a45b-693e-4699-b6f8-88a01dfd6ae7-operator-scripts\") pod \"root-account-create-update-m87nb\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.782573 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnnmh\" (UniqueName: \"kubernetes.io/projected/e628a45b-693e-4699-b6f8-88a01dfd6ae7-kube-api-access-fnnmh\") pod \"root-account-create-update-m87nb\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.884365 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e628a45b-693e-4699-b6f8-88a01dfd6ae7-operator-scripts\") pod \"root-account-create-update-m87nb\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.884463 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fnnmh\" (UniqueName: \"kubernetes.io/projected/e628a45b-693e-4699-b6f8-88a01dfd6ae7-kube-api-access-fnnmh\") pod \"root-account-create-update-m87nb\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.885197 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e628a45b-693e-4699-b6f8-88a01dfd6ae7-operator-scripts\") pod \"root-account-create-update-m87nb\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.913281 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnnmh\" (UniqueName: \"kubernetes.io/projected/e628a45b-693e-4699-b6f8-88a01dfd6ae7-kube-api-access-fnnmh\") pod \"root-account-create-update-m87nb\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.932638 3552 generic.go:334] "Generic (PLEG): container finished" podID="3121cfff-17ad-42d7-b737-f68b3de66a9f" containerID="167b9625a6956f61ebbc3f161ae18e1c62999da2959da536f4d12cf7af6fc059" exitCode=0 Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.932752 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4t82z" event={"ID":"3121cfff-17ad-42d7-b737-f68b3de66a9f","Type":"ContainerDied","Data":"167b9625a6956f61ebbc3f161ae18e1c62999da2959da536f4d12cf7af6fc059"} Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.932802 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4t82z" event={"ID":"3121cfff-17ad-42d7-b737-f68b3de66a9f","Type":"ContainerStarted","Data":"9d6bb08e3ec8f5df1b4366d5982513071ac32d34850d43e0490f1b82dfbf4db2"} Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.934488 3552 generic.go:334] "Generic (PLEG): container finished" podID="8dc74a35-3b06-4f41-9684-f35bbdd1078b" containerID="bb0582e5adc02085eb8cf47852e5e9da42051e43f99309362e55cb9d24f7a99c" exitCode=0 Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.934531 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6cf8-account-create-update-n8mhq" event={"ID":"8dc74a35-3b06-4f41-9684-f35bbdd1078b","Type":"ContainerDied","Data":"bb0582e5adc02085eb8cf47852e5e9da42051e43f99309362e55cb9d24f7a99c"} Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.936308 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"74241f0f-72bf-49b6-b849-11361c3b86e5","Type":"ContainerStarted","Data":"c88f84ad8d3c461fd74a3530795375d768a5ef8a70905dde2e67d6aa6d72222b"} Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.936338 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"74241f0f-72bf-49b6-b849-11361c3b86e5","Type":"ContainerStarted","Data":"da7fdfaae23813cf3398591cf900ff10e5ad68fb93d7681da48d9ab2c69c575f"} Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.970671 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.428599857 podStartE2EDuration="3.970616203s" podCreationTimestamp="2026-03-20 15:46:27 +0000 UTC" firstStartedPulling="2026-03-20 15:46:28.104180607 +0000 UTC m=+1287.797877437" lastFinishedPulling="2026-03-20 15:46:29.646196953 +0000 UTC m=+1289.339893783" observedRunningTime="2026-03-20 15:46:30.965803824 +0000 UTC m=+1290.659500654" watchObservedRunningTime="2026-03-20 15:46:30.970616203 +0000 UTC m=+1290.664313063" Mar 20 15:46:30 crc kubenswrapper[3552]: I0320 15:46:30.987760 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:31 crc kubenswrapper[3552]: I0320 15:46:31.494631 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:31 crc kubenswrapper[3552]: E0320 15:46:31.494782 3552 projected.go:294] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Mar 20 15:46:31 crc kubenswrapper[3552]: E0320 15:46:31.494916 3552 projected.go:200] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Mar 20 15:46:31 crc kubenswrapper[3552]: E0320 15:46:31.494955 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift podName:dd24d70f-864e-4803-8e8c-9d9e5aadfa84 nodeName:}" failed. No retries permitted until 2026-03-20 15:46:35.494941338 +0000 UTC m=+1295.188638168 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift") pod "swift-storage-0" (UID: "dd24d70f-864e-4803-8e8c-9d9e5aadfa84") : configmap "swift-ring-files" not found Mar 20 15:46:31 crc kubenswrapper[3552]: I0320 15:46:31.496795 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-m87nb"] Mar 20 15:46:31 crc kubenswrapper[3552]: W0320 15:46:31.510352 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode628a45b_693e_4699_b6f8_88a01dfd6ae7.slice/crio-98af9b225caab92b39771069ab5afb374b124dd6d23e0ff8496681ddceb04a45 WatchSource:0}: Error finding container 98af9b225caab92b39771069ab5afb374b124dd6d23e0ff8496681ddceb04a45: Status 404 returned error can't find the container with id 98af9b225caab92b39771069ab5afb374b124dd6d23e0ff8496681ddceb04a45 Mar 20 15:46:31 crc kubenswrapper[3552]: I0320 15:46:31.959089 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m87nb" event={"ID":"e628a45b-693e-4699-b6f8-88a01dfd6ae7","Type":"ContainerStarted","Data":"b19470232bec3ad152455755485915b5992d607b8fb9c8f08865b93bf743c298"} Mar 20 15:46:31 crc kubenswrapper[3552]: I0320 15:46:31.959130 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m87nb" event={"ID":"e628a45b-693e-4699-b6f8-88a01dfd6ae7","Type":"ContainerStarted","Data":"98af9b225caab92b39771069ab5afb374b124dd6d23e0ff8496681ddceb04a45"} Mar 20 15:46:31 crc kubenswrapper[3552]: I0320 15:46:31.959320 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.542639 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4t82z" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.579122 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.613986 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3121cfff-17ad-42d7-b737-f68b3de66a9f-operator-scripts\") pod \"3121cfff-17ad-42d7-b737-f68b3de66a9f\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.614334 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5grm\" (UniqueName: \"kubernetes.io/projected/3121cfff-17ad-42d7-b737-f68b3de66a9f-kube-api-access-j5grm\") pod \"3121cfff-17ad-42d7-b737-f68b3de66a9f\" (UID: \"3121cfff-17ad-42d7-b737-f68b3de66a9f\") " Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.614431 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8dc74a35-3b06-4f41-9684-f35bbdd1078b-operator-scripts\") pod \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.614490 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jlhf\" (UniqueName: \"kubernetes.io/projected/8dc74a35-3b06-4f41-9684-f35bbdd1078b-kube-api-access-8jlhf\") pod \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\" (UID: \"8dc74a35-3b06-4f41-9684-f35bbdd1078b\") " Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.616625 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dc74a35-3b06-4f41-9684-f35bbdd1078b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8dc74a35-3b06-4f41-9684-f35bbdd1078b" (UID: "8dc74a35-3b06-4f41-9684-f35bbdd1078b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.616812 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3121cfff-17ad-42d7-b737-f68b3de66a9f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3121cfff-17ad-42d7-b737-f68b3de66a9f" (UID: "3121cfff-17ad-42d7-b737-f68b3de66a9f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.621176 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dc74a35-3b06-4f41-9684-f35bbdd1078b-kube-api-access-8jlhf" (OuterVolumeSpecName: "kube-api-access-8jlhf") pod "8dc74a35-3b06-4f41-9684-f35bbdd1078b" (UID: "8dc74a35-3b06-4f41-9684-f35bbdd1078b"). InnerVolumeSpecName "kube-api-access-8jlhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.621475 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3121cfff-17ad-42d7-b737-f68b3de66a9f-kube-api-access-j5grm" (OuterVolumeSpecName: "kube-api-access-j5grm") pod "3121cfff-17ad-42d7-b737-f68b3de66a9f" (UID: "3121cfff-17ad-42d7-b737-f68b3de66a9f"). InnerVolumeSpecName "kube-api-access-j5grm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.716475 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8dc74a35-3b06-4f41-9684-f35bbdd1078b-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.716523 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-8jlhf\" (UniqueName: \"kubernetes.io/projected/8dc74a35-3b06-4f41-9684-f35bbdd1078b-kube-api-access-8jlhf\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.716537 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3121cfff-17ad-42d7-b737-f68b3de66a9f-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.716553 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-j5grm\" (UniqueName: \"kubernetes.io/projected/3121cfff-17ad-42d7-b737-f68b3de66a9f-kube-api-access-j5grm\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.978116 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6cf8-account-create-update-n8mhq" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.979100 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6cf8-account-create-update-n8mhq" event={"ID":"8dc74a35-3b06-4f41-9684-f35bbdd1078b","Type":"ContainerDied","Data":"4fb0154c63dea6091d6b92e452d33d9565c47a345566ad1423ccd189ab7ded87"} Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.979223 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fb0154c63dea6091d6b92e452d33d9565c47a345566ad1423ccd189ab7ded87" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.991627 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4t82z" Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.992246 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4t82z" event={"ID":"3121cfff-17ad-42d7-b737-f68b3de66a9f","Type":"ContainerDied","Data":"9d6bb08e3ec8f5df1b4366d5982513071ac32d34850d43e0490f1b82dfbf4db2"} Mar 20 15:46:32 crc kubenswrapper[3552]: I0320 15:46:32.992287 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d6bb08e3ec8f5df1b4366d5982513071ac32d34850d43e0490f1b82dfbf4db2" Mar 20 15:46:33 crc kubenswrapper[3552]: I0320 15:46:33.019566 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/root-account-create-update-m87nb" podStartSLOduration=3.019526437 podStartE2EDuration="3.019526437s" podCreationTimestamp="2026-03-20 15:46:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:33.015360626 +0000 UTC m=+1292.709057456" watchObservedRunningTime="2026-03-20 15:46:33.019526437 +0000 UTC m=+1292.713223267" Mar 20 15:46:33 crc kubenswrapper[3552]: I0320 15:46:33.096795 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.005133 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-4ntxp"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.005574 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e04aaab3-651f-4523-ab35-250a33f54f4d" podNamespace="openstack" podName="glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: E0320 15:46:34.005789 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8dc74a35-3b06-4f41-9684-f35bbdd1078b" containerName="mariadb-account-create-update" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.005803 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dc74a35-3b06-4f41-9684-f35bbdd1078b" containerName="mariadb-account-create-update" Mar 20 15:46:34 crc kubenswrapper[3552]: E0320 15:46:34.005827 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3121cfff-17ad-42d7-b737-f68b3de66a9f" containerName="mariadb-database-create" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.005833 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3121cfff-17ad-42d7-b737-f68b3de66a9f" containerName="mariadb-database-create" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.005978 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3121cfff-17ad-42d7-b737-f68b3de66a9f" containerName="mariadb-database-create" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.005993 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dc74a35-3b06-4f41-9684-f35bbdd1078b" containerName="mariadb-account-create-update" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.006554 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.010689 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.011392 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rwgjn" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.018164 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-4ntxp"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.039453 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-db-sync-config-data\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.039504 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-config-data\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.039583 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g95wd\" (UniqueName: \"kubernetes.io/projected/e04aaab3-651f-4523-ab35-250a33f54f4d-kube-api-access-g95wd\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.039623 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-combined-ca-bundle\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.142202 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-db-sync-config-data\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.142252 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-config-data\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.142322 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g95wd\" (UniqueName: \"kubernetes.io/projected/e04aaab3-651f-4523-ab35-250a33f54f4d-kube-api-access-g95wd\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.142352 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-combined-ca-bundle\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.147752 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-db-sync-config-data\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.149751 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-combined-ca-bundle\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.150542 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-config-data\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.164987 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-g95wd\" (UniqueName: \"kubernetes.io/projected/e04aaab3-651f-4523-ab35-250a33f54f4d-kube-api-access-g95wd\") pod \"glance-db-sync-4ntxp\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.375131 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-84nn5"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.375274 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c56721e1-6189-4d50-bb51-cdf6a0f163a8" podNamespace="openstack" podName="keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.376108 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.388519 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-84nn5"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.388973 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4ntxp" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.449426 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrmsg\" (UniqueName: \"kubernetes.io/projected/c56721e1-6189-4d50-bb51-cdf6a0f163a8-kube-api-access-jrmsg\") pod \"keystone-db-create-84nn5\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.449762 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56721e1-6189-4d50-bb51-cdf6a0f163a8-operator-scripts\") pod \"keystone-db-create-84nn5\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.478741 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-2002-account-create-update-mnjhb"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.478888 3552 topology_manager.go:215] "Topology Admit Handler" podUID="55543b44-cb0a-4ffe-997e-d94d126eb91b" podNamespace="openstack" podName="keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.479857 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.498892 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.506770 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-2002-account-create-update-mnjhb"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.551656 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55543b44-cb0a-4ffe-997e-d94d126eb91b-operator-scripts\") pod \"keystone-2002-account-create-update-mnjhb\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.551721 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5vh7\" (UniqueName: \"kubernetes.io/projected/55543b44-cb0a-4ffe-997e-d94d126eb91b-kube-api-access-l5vh7\") pod \"keystone-2002-account-create-update-mnjhb\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.551828 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-jrmsg\" (UniqueName: \"kubernetes.io/projected/c56721e1-6189-4d50-bb51-cdf6a0f163a8-kube-api-access-jrmsg\") pod \"keystone-db-create-84nn5\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.551860 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56721e1-6189-4d50-bb51-cdf6a0f163a8-operator-scripts\") pod \"keystone-db-create-84nn5\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.552601 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56721e1-6189-4d50-bb51-cdf6a0f163a8-operator-scripts\") pod \"keystone-db-create-84nn5\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.560989 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-zxrpw"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.561209 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" podNamespace="openstack" podName="placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.573866 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.585632 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrmsg\" (UniqueName: \"kubernetes.io/projected/c56721e1-6189-4d50-bb51-cdf6a0f163a8-kube-api-access-jrmsg\") pod \"keystone-db-create-84nn5\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.592456 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zxrpw"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.603484 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/placement-449e-account-create-update-r8fxj"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.603632 3552 topology_manager.go:215] "Topology Admit Handler" podUID="aa27c1f0-656c-4972-aeb9-8279bd8cc6da" podNamespace="openstack" podName="placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.604476 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.609205 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.613130 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-449e-account-create-update-r8fxj"] Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.655675 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bswxj\" (UniqueName: \"kubernetes.io/projected/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-kube-api-access-bswxj\") pod \"placement-db-create-zxrpw\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.655735 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-operator-scripts\") pod \"placement-449e-account-create-update-r8fxj\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.655773 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55543b44-cb0a-4ffe-997e-d94d126eb91b-operator-scripts\") pod \"keystone-2002-account-create-update-mnjhb\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.655803 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l5vh7\" (UniqueName: \"kubernetes.io/projected/55543b44-cb0a-4ffe-997e-d94d126eb91b-kube-api-access-l5vh7\") pod \"keystone-2002-account-create-update-mnjhb\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.655852 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hchxt\" (UniqueName: \"kubernetes.io/projected/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-kube-api-access-hchxt\") pod \"placement-449e-account-create-update-r8fxj\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.655916 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-operator-scripts\") pod \"placement-db-create-zxrpw\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.658093 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55543b44-cb0a-4ffe-997e-d94d126eb91b-operator-scripts\") pod \"keystone-2002-account-create-update-mnjhb\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.685649 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5vh7\" (UniqueName: \"kubernetes.io/projected/55543b44-cb0a-4ffe-997e-d94d126eb91b-kube-api-access-l5vh7\") pod \"keystone-2002-account-create-update-mnjhb\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.740256 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.758141 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bswxj\" (UniqueName: \"kubernetes.io/projected/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-kube-api-access-bswxj\") pod \"placement-db-create-zxrpw\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.758187 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-operator-scripts\") pod \"placement-449e-account-create-update-r8fxj\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.758244 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hchxt\" (UniqueName: \"kubernetes.io/projected/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-kube-api-access-hchxt\") pod \"placement-449e-account-create-update-r8fxj\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.758301 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-operator-scripts\") pod \"placement-db-create-zxrpw\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.759031 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-operator-scripts\") pod \"placement-db-create-zxrpw\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.759916 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-operator-scripts\") pod \"placement-449e-account-create-update-r8fxj\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.781026 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bswxj\" (UniqueName: \"kubernetes.io/projected/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-kube-api-access-bswxj\") pod \"placement-db-create-zxrpw\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.782282 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hchxt\" (UniqueName: \"kubernetes.io/projected/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-kube-api-access-hchxt\") pod \"placement-449e-account-create-update-r8fxj\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.834331 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.956364 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:34 crc kubenswrapper[3552]: I0320 15:46:34.997509 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.011365 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-ll6vw"] Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.011605 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3e0924e3-d91e-44d3-a060-0e400a85cfc9" podNamespace="openstack" podName="watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.012720 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.017691 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-ll6vw"] Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.064942 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e0924e3-d91e-44d3-a060-0e400a85cfc9-operator-scripts\") pod \"watcher-db-create-ll6vw\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.065070 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9d7gm\" (UniqueName: \"kubernetes.io/projected/3e0924e3-d91e-44d3-a060-0e400a85cfc9-kube-api-access-9d7gm\") pod \"watcher-db-create-ll6vw\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.117695 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-c3b1-account-create-update-2ngjl"] Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.117849 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6d13a1a3-677a-43ea-bb63-305d67463ec7" podNamespace="openstack" podName="watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.118716 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.122444 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.143471 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-c3b1-account-create-update-2ngjl"] Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.166489 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9d7gm\" (UniqueName: \"kubernetes.io/projected/3e0924e3-d91e-44d3-a060-0e400a85cfc9-kube-api-access-9d7gm\") pod \"watcher-db-create-ll6vw\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.166583 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hngww\" (UniqueName: \"kubernetes.io/projected/6d13a1a3-677a-43ea-bb63-305d67463ec7-kube-api-access-hngww\") pod \"watcher-c3b1-account-create-update-2ngjl\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.166614 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e0924e3-d91e-44d3-a060-0e400a85cfc9-operator-scripts\") pod \"watcher-db-create-ll6vw\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.166646 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d13a1a3-677a-43ea-bb63-305d67463ec7-operator-scripts\") pod \"watcher-c3b1-account-create-update-2ngjl\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.167678 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e0924e3-d91e-44d3-a060-0e400a85cfc9-operator-scripts\") pod \"watcher-db-create-ll6vw\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.194230 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9d7gm\" (UniqueName: \"kubernetes.io/projected/3e0924e3-d91e-44d3-a060-0e400a85cfc9-kube-api-access-9d7gm\") pod \"watcher-db-create-ll6vw\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.268019 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d13a1a3-677a-43ea-bb63-305d67463ec7-operator-scripts\") pod \"watcher-c3b1-account-create-update-2ngjl\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.268157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hngww\" (UniqueName: \"kubernetes.io/projected/6d13a1a3-677a-43ea-bb63-305d67463ec7-kube-api-access-hngww\") pod \"watcher-c3b1-account-create-update-2ngjl\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.268690 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.268685 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d13a1a3-677a-43ea-bb63-305d67463ec7-operator-scripts\") pod \"watcher-c3b1-account-create-update-2ngjl\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.301004 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hngww\" (UniqueName: \"kubernetes.io/projected/6d13a1a3-677a-43ea-bb63-305d67463ec7-kube-api-access-hngww\") pod \"watcher-c3b1-account-create-update-2ngjl\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.342438 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.494706 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.586040 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:35 crc kubenswrapper[3552]: E0320 15:46:35.586198 3552 projected.go:294] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Mar 20 15:46:35 crc kubenswrapper[3552]: E0320 15:46:35.586212 3552 projected.go:200] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Mar 20 15:46:35 crc kubenswrapper[3552]: E0320 15:46:35.586265 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift podName:dd24d70f-864e-4803-8e8c-9d9e5aadfa84 nodeName:}" failed. No retries permitted until 2026-03-20 15:46:43.586251231 +0000 UTC m=+1303.279948051 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift") pod "swift-storage-0" (UID: "dd24d70f-864e-4803-8e8c-9d9e5aadfa84") : configmap "swift-ring-files" not found Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.834592 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.914571 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74cfff8f4c-rcmqm"] Mar 20 15:46:35 crc kubenswrapper[3552]: I0320 15:46:35.914794 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="dnsmasq-dns" containerID="cri-o://5b17712d7ec03f53a3d465ead2e18a221c90d07dfb43ba8a9efc89a8126433d3" gracePeriod=10 Mar 20 15:46:36 crc kubenswrapper[3552]: I0320 15:46:36.027667 3552 generic.go:334] "Generic (PLEG): container finished" podID="e628a45b-693e-4699-b6f8-88a01dfd6ae7" containerID="b19470232bec3ad152455755485915b5992d607b8fb9c8f08865b93bf743c298" exitCode=0 Mar 20 15:46:36 crc kubenswrapper[3552]: I0320 15:46:36.027714 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m87nb" event={"ID":"e628a45b-693e-4699-b6f8-88a01dfd6ae7","Type":"ContainerDied","Data":"b19470232bec3ad152455755485915b5992d607b8fb9c8f08865b93bf743c298"} Mar 20 15:46:37 crc kubenswrapper[3552]: I0320 15:46:37.046263 3552 generic.go:334] "Generic (PLEG): container finished" podID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerID="5b17712d7ec03f53a3d465ead2e18a221c90d07dfb43ba8a9efc89a8126433d3" exitCode=0 Mar 20 15:46:37 crc kubenswrapper[3552]: I0320 15:46:37.046383 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" event={"ID":"706952c0-5b18-4aed-9ce1-d503bba5ba52","Type":"ContainerDied","Data":"5b17712d7ec03f53a3d465ead2e18a221c90d07dfb43ba8a9efc89a8126433d3"} Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.096285 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.664145 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.740181 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnnmh\" (UniqueName: \"kubernetes.io/projected/e628a45b-693e-4699-b6f8-88a01dfd6ae7-kube-api-access-fnnmh\") pod \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.740396 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e628a45b-693e-4699-b6f8-88a01dfd6ae7-operator-scripts\") pod \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\" (UID: \"e628a45b-693e-4699-b6f8-88a01dfd6ae7\") " Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.741744 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e628a45b-693e-4699-b6f8-88a01dfd6ae7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e628a45b-693e-4699-b6f8-88a01dfd6ae7" (UID: "e628a45b-693e-4699-b6f8-88a01dfd6ae7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.762701 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e628a45b-693e-4699-b6f8-88a01dfd6ae7-kube-api-access-fnnmh" (OuterVolumeSpecName: "kube-api-access-fnnmh") pod "e628a45b-693e-4699-b6f8-88a01dfd6ae7" (UID: "e628a45b-693e-4699-b6f8-88a01dfd6ae7"). InnerVolumeSpecName "kube-api-access-fnnmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.842268 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-fnnmh\" (UniqueName: \"kubernetes.io/projected/e628a45b-693e-4699-b6f8-88a01dfd6ae7-kube-api-access-fnnmh\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:38 crc kubenswrapper[3552]: I0320 15:46:38.842314 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e628a45b-693e-4699-b6f8-88a01dfd6ae7-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:39 crc kubenswrapper[3552]: I0320 15:46:39.061319 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m87nb" event={"ID":"e628a45b-693e-4699-b6f8-88a01dfd6ae7","Type":"ContainerDied","Data":"98af9b225caab92b39771069ab5afb374b124dd6d23e0ff8496681ddceb04a45"} Mar 20 15:46:39 crc kubenswrapper[3552]: I0320 15:46:39.061352 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98af9b225caab92b39771069ab5afb374b124dd6d23e0ff8496681ddceb04a45" Mar 20 15:46:39 crc kubenswrapper[3552]: I0320 15:46:39.061431 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m87nb" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.361147 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.491263 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-nb\") pod \"706952c0-5b18-4aed-9ce1-d503bba5ba52\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.491384 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-config\") pod \"706952c0-5b18-4aed-9ce1-d503bba5ba52\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.491437 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-dns-svc\") pod \"706952c0-5b18-4aed-9ce1-d503bba5ba52\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.491482 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-sb\") pod \"706952c0-5b18-4aed-9ce1-d503bba5ba52\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.491511 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxxz5\" (UniqueName: \"kubernetes.io/projected/706952c0-5b18-4aed-9ce1-d503bba5ba52-kube-api-access-nxxz5\") pod \"706952c0-5b18-4aed-9ce1-d503bba5ba52\" (UID: \"706952c0-5b18-4aed-9ce1-d503bba5ba52\") " Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.508409 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/706952c0-5b18-4aed-9ce1-d503bba5ba52-kube-api-access-nxxz5" (OuterVolumeSpecName: "kube-api-access-nxxz5") pod "706952c0-5b18-4aed-9ce1-d503bba5ba52" (UID: "706952c0-5b18-4aed-9ce1-d503bba5ba52"). InnerVolumeSpecName "kube-api-access-nxxz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.571673 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "706952c0-5b18-4aed-9ce1-d503bba5ba52" (UID: "706952c0-5b18-4aed-9ce1-d503bba5ba52"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.578927 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "706952c0-5b18-4aed-9ce1-d503bba5ba52" (UID: "706952c0-5b18-4aed-9ce1-d503bba5ba52"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.584080 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "706952c0-5b18-4aed-9ce1-d503bba5ba52" (UID: "706952c0-5b18-4aed-9ce1-d503bba5ba52"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.595517 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.595550 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.595582 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.595596 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nxxz5\" (UniqueName: \"kubernetes.io/projected/706952c0-5b18-4aed-9ce1-d503bba5ba52-kube-api-access-nxxz5\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.622914 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-config" (OuterVolumeSpecName: "config") pod "706952c0-5b18-4aed-9ce1-d503bba5ba52" (UID: "706952c0-5b18-4aed-9ce1-d503bba5ba52"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.696940 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706952c0-5b18-4aed-9ce1-d503bba5ba52-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.845813 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-m87nb"] Mar 20 15:46:41 crc kubenswrapper[3552]: I0320 15:46:41.858624 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-m87nb"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.093160 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-q5q9g" event={"ID":"7961927b-2515-44c1-b350-16985a6c6c73","Type":"ContainerStarted","Data":"f0e1c9091369474f3875b494eb377b951fbad3e15bb10f56bb47c42f70eaa4b9"} Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.099999 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" event={"ID":"706952c0-5b18-4aed-9ce1-d503bba5ba52","Type":"ContainerDied","Data":"fda3e1db6c6d9e91e3da95335c77cb2368261e71b2d7d37bf9d64092f0b97dae"} Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.100055 3552 scope.go:117] "RemoveContainer" containerID="5b17712d7ec03f53a3d465ead2e18a221c90d07dfb43ba8a9efc89a8126433d3" Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.100195 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74cfff8f4c-rcmqm" Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.121466 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-ll6vw"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.122465 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-q5q9g" podStartSLOduration=2.045259018 podStartE2EDuration="15.122422795s" podCreationTimestamp="2026-03-20 15:46:27 +0000 UTC" firstStartedPulling="2026-03-20 15:46:28.232004469 +0000 UTC m=+1287.925701289" lastFinishedPulling="2026-03-20 15:46:41.309168236 +0000 UTC m=+1301.002865066" observedRunningTime="2026-03-20 15:46:42.1121626 +0000 UTC m=+1301.805859430" watchObservedRunningTime="2026-03-20 15:46:42.122422795 +0000 UTC m=+1301.816119625" Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.135128 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-c3b1-account-create-update-2ngjl"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.137396 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerStarted","Data":"da3b9b6c4eda1da2baa0ef4ba5735cef89fda87e7a23f34e15ac57d956e68309"} Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.190489 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-2002-account-create-update-mnjhb"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.202665 3552 scope.go:117] "RemoveContainer" containerID="82b3e5724ad7e853f871e22ac1c5814773597bcfd0102fdca864ce9614a293d8" Mar 20 15:46:42 crc kubenswrapper[3552]: W0320 15:46:42.224843 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55543b44_cb0a_4ffe_997e_d94d126eb91b.slice/crio-f0e4894221a8127259cf24f9d236d18c47dc3a3ca70ba27626d409cd4d29d0f8 WatchSource:0}: Error finding container f0e4894221a8127259cf24f9d236d18c47dc3a3ca70ba27626d409cd4d29d0f8: Status 404 returned error can't find the container with id f0e4894221a8127259cf24f9d236d18c47dc3a3ca70ba27626d409cd4d29d0f8 Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.245453 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74cfff8f4c-rcmqm"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.253481 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74cfff8f4c-rcmqm"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.334914 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-4ntxp"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.537923 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zxrpw"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.552583 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-449e-account-create-update-r8fxj"] Mar 20 15:46:42 crc kubenswrapper[3552]: I0320 15:46:42.559939 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-84nn5"] Mar 20 15:46:42 crc kubenswrapper[3552]: W0320 15:46:42.592606 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa27c1f0_656c_4972_aeb9_8279bd8cc6da.slice/crio-e12fba5976ca6369ba37f9472e418263a25e04cae6a1c742dba851d31c7226a7 WatchSource:0}: Error finding container e12fba5976ca6369ba37f9472e418263a25e04cae6a1c742dba851d31c7226a7: Status 404 returned error can't find the container with id e12fba5976ca6369ba37f9472e418263a25e04cae6a1c742dba851d31c7226a7 Mar 20 15:46:42 crc kubenswrapper[3552]: W0320 15:46:42.597678 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc56721e1_6189_4d50_bb51_cdf6a0f163a8.slice/crio-ca3fbe0c148cf340a722c121bc66621cc6d2e989c8ed81d2aea4c792e59e7455 WatchSource:0}: Error finding container ca3fbe0c148cf340a722c121bc66621cc6d2e989c8ed81d2aea4c792e59e7455: Status 404 returned error can't find the container with id ca3fbe0c148cf340a722c121bc66621cc6d2e989c8ed81d2aea4c792e59e7455 Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.143437 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zxrpw" event={"ID":"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa","Type":"ContainerStarted","Data":"367b7a6fb761a5ffbd64b3e9ac3b844b1614ca34094a51b4f7b87ef7d33edec4"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.143475 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zxrpw" event={"ID":"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa","Type":"ContainerStarted","Data":"aa1c9e24c7a04d9b9c712619513696b6eb37b390a41ec8cbb9a670eeafd7b79d"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.145454 3552 generic.go:334] "Generic (PLEG): container finished" podID="3e0924e3-d91e-44d3-a060-0e400a85cfc9" containerID="3126c7c8e97ac4e36fef9be8bc5a4791ac35fcecdb186a2b5a9d88c92059776e" exitCode=0 Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.145544 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-ll6vw" event={"ID":"3e0924e3-d91e-44d3-a060-0e400a85cfc9","Type":"ContainerDied","Data":"3126c7c8e97ac4e36fef9be8bc5a4791ac35fcecdb186a2b5a9d88c92059776e"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.145602 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-ll6vw" event={"ID":"3e0924e3-d91e-44d3-a060-0e400a85cfc9","Type":"ContainerStarted","Data":"ce9e1dd5ea7658347c5b45f006599d8d2e3055e996733e01e21c6c88ad71e835"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.147147 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-84nn5" event={"ID":"c56721e1-6189-4d50-bb51-cdf6a0f163a8","Type":"ContainerStarted","Data":"dd9ba912db86217994c5b6b924300df66f8da27b4c706444731209518cf43011"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.147177 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-84nn5" event={"ID":"c56721e1-6189-4d50-bb51-cdf6a0f163a8","Type":"ContainerStarted","Data":"ca3fbe0c148cf340a722c121bc66621cc6d2e989c8ed81d2aea4c792e59e7455"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.149391 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c3b1-account-create-update-2ngjl" event={"ID":"6d13a1a3-677a-43ea-bb63-305d67463ec7","Type":"ContainerStarted","Data":"e691b528c59ae7f167e883811d6b8c8a0d8c23280fa9cff74dd75230c5a589d1"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.149756 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c3b1-account-create-update-2ngjl" event={"ID":"6d13a1a3-677a-43ea-bb63-305d67463ec7","Type":"ContainerStarted","Data":"5157faa44e9f8874ec8e3ce4510f398895daee103f68ce33a6ef60ae003d716b"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.150839 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4ntxp" event={"ID":"e04aaab3-651f-4523-ab35-250a33f54f4d","Type":"ContainerStarted","Data":"2d878f79baeb09c9f756b98fae12d8686584e698e342aa5c2260927243126da5"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.152359 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-449e-account-create-update-r8fxj" event={"ID":"aa27c1f0-656c-4972-aeb9-8279bd8cc6da","Type":"ContainerStarted","Data":"0791022f3fc145812764d5cb666c1b28d78717f628f50c5bcf062b59a20e6eae"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.152384 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-449e-account-create-update-r8fxj" event={"ID":"aa27c1f0-656c-4972-aeb9-8279bd8cc6da","Type":"ContainerStarted","Data":"e12fba5976ca6369ba37f9472e418263a25e04cae6a1c742dba851d31c7226a7"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.156997 3552 generic.go:334] "Generic (PLEG): container finished" podID="55543b44-cb0a-4ffe-997e-d94d126eb91b" containerID="95e6f9f09b1adf70685ba8b0dc1c4bd430acf4fbbca2999e959ea6783954846e" exitCode=0 Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.157903 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2002-account-create-update-mnjhb" event={"ID":"55543b44-cb0a-4ffe-997e-d94d126eb91b","Type":"ContainerDied","Data":"95e6f9f09b1adf70685ba8b0dc1c4bd430acf4fbbca2999e959ea6783954846e"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.157930 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2002-account-create-update-mnjhb" event={"ID":"55543b44-cb0a-4ffe-997e-d94d126eb91b","Type":"ContainerStarted","Data":"f0e4894221a8127259cf24f9d236d18c47dc3a3ca70ba27626d409cd4d29d0f8"} Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.179981 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/placement-db-create-zxrpw" podStartSLOduration=9.179944452 podStartE2EDuration="9.179944452s" podCreationTimestamp="2026-03-20 15:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:43.166479871 +0000 UTC m=+1302.860176711" watchObservedRunningTime="2026-03-20 15:46:43.179944452 +0000 UTC m=+1302.873641282" Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.182852 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-c3b1-account-create-update-2ngjl" podStartSLOduration=8.182829109 podStartE2EDuration="8.182829109s" podCreationTimestamp="2026-03-20 15:46:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:43.17913863 +0000 UTC m=+1302.872835460" watchObservedRunningTime="2026-03-20 15:46:43.182829109 +0000 UTC m=+1302.876525929" Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.198370 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/placement-449e-account-create-update-r8fxj" podStartSLOduration=9.198325214 podStartE2EDuration="9.198325214s" podCreationTimestamp="2026-03-20 15:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:43.193640708 +0000 UTC m=+1302.887337548" watchObservedRunningTime="2026-03-20 15:46:43.198325214 +0000 UTC m=+1302.892022044" Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.206419 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/keystone-db-create-84nn5" podStartSLOduration=9.206356799 podStartE2EDuration="9.206356799s" podCreationTimestamp="2026-03-20 15:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:46:43.205561317 +0000 UTC m=+1302.899258167" watchObservedRunningTime="2026-03-20 15:46:43.206356799 +0000 UTC m=+1302.900053639" Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.439024 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" path="/var/lib/kubelet/pods/706952c0-5b18-4aed-9ce1-d503bba5ba52/volumes" Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.440104 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e628a45b-693e-4699-b6f8-88a01dfd6ae7" path="/var/lib/kubelet/pods/e628a45b-693e-4699-b6f8-88a01dfd6ae7/volumes" Mar 20 15:46:43 crc kubenswrapper[3552]: I0320 15:46:43.646192 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:43 crc kubenswrapper[3552]: E0320 15:46:43.646321 3552 projected.go:294] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Mar 20 15:46:43 crc kubenswrapper[3552]: E0320 15:46:43.646338 3552 projected.go:200] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Mar 20 15:46:43 crc kubenswrapper[3552]: E0320 15:46:43.646389 3552 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift podName:dd24d70f-864e-4803-8e8c-9d9e5aadfa84 nodeName:}" failed. No retries permitted until 2026-03-20 15:46:59.646373367 +0000 UTC m=+1319.340070197 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift") pod "swift-storage-0" (UID: "dd24d70f-864e-4803-8e8c-9d9e5aadfa84") : configmap "swift-ring-files" not found Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.165228 3552 generic.go:334] "Generic (PLEG): container finished" podID="c56721e1-6189-4d50-bb51-cdf6a0f163a8" containerID="dd9ba912db86217994c5b6b924300df66f8da27b4c706444731209518cf43011" exitCode=0 Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.165324 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-84nn5" event={"ID":"c56721e1-6189-4d50-bb51-cdf6a0f163a8","Type":"ContainerDied","Data":"dd9ba912db86217994c5b6b924300df66f8da27b4c706444731209518cf43011"} Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.167025 3552 generic.go:334] "Generic (PLEG): container finished" podID="6d13a1a3-677a-43ea-bb63-305d67463ec7" containerID="e691b528c59ae7f167e883811d6b8c8a0d8c23280fa9cff74dd75230c5a589d1" exitCode=0 Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.167101 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c3b1-account-create-update-2ngjl" event={"ID":"6d13a1a3-677a-43ea-bb63-305d67463ec7","Type":"ContainerDied","Data":"e691b528c59ae7f167e883811d6b8c8a0d8c23280fa9cff74dd75230c5a589d1"} Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.169339 3552 generic.go:334] "Generic (PLEG): container finished" podID="aa27c1f0-656c-4972-aeb9-8279bd8cc6da" containerID="0791022f3fc145812764d5cb666c1b28d78717f628f50c5bcf062b59a20e6eae" exitCode=0 Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.169398 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-449e-account-create-update-r8fxj" event={"ID":"aa27c1f0-656c-4972-aeb9-8279bd8cc6da","Type":"ContainerDied","Data":"0791022f3fc145812764d5cb666c1b28d78717f628f50c5bcf062b59a20e6eae"} Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.172079 3552 generic.go:334] "Generic (PLEG): container finished" podID="8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" containerID="367b7a6fb761a5ffbd64b3e9ac3b844b1614ca34094a51b4f7b87ef7d33edec4" exitCode=0 Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.172101 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zxrpw" event={"ID":"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa","Type":"ContainerDied","Data":"367b7a6fb761a5ffbd64b3e9ac3b844b1614ca34094a51b4f7b87ef7d33edec4"} Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.657148 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.663676 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.663808 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e0924e3-d91e-44d3-a060-0e400a85cfc9-operator-scripts\") pod \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.663888 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9d7gm\" (UniqueName: \"kubernetes.io/projected/3e0924e3-d91e-44d3-a060-0e400a85cfc9-kube-api-access-9d7gm\") pod \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\" (UID: \"3e0924e3-d91e-44d3-a060-0e400a85cfc9\") " Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.664872 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e0924e3-d91e-44d3-a060-0e400a85cfc9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e0924e3-d91e-44d3-a060-0e400a85cfc9" (UID: "3e0924e3-d91e-44d3-a060-0e400a85cfc9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.669294 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e0924e3-d91e-44d3-a060-0e400a85cfc9-kube-api-access-9d7gm" (OuterVolumeSpecName: "kube-api-access-9d7gm") pod "3e0924e3-d91e-44d3-a060-0e400a85cfc9" (UID: "3e0924e3-d91e-44d3-a060-0e400a85cfc9"). InnerVolumeSpecName "kube-api-access-9d7gm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.765315 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55543b44-cb0a-4ffe-997e-d94d126eb91b-operator-scripts\") pod \"55543b44-cb0a-4ffe-997e-d94d126eb91b\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.765379 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5vh7\" (UniqueName: \"kubernetes.io/projected/55543b44-cb0a-4ffe-997e-d94d126eb91b-kube-api-access-l5vh7\") pod \"55543b44-cb0a-4ffe-997e-d94d126eb91b\" (UID: \"55543b44-cb0a-4ffe-997e-d94d126eb91b\") " Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.765726 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e0924e3-d91e-44d3-a060-0e400a85cfc9-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.765739 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-9d7gm\" (UniqueName: \"kubernetes.io/projected/3e0924e3-d91e-44d3-a060-0e400a85cfc9-kube-api-access-9d7gm\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.766747 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55543b44-cb0a-4ffe-997e-d94d126eb91b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55543b44-cb0a-4ffe-997e-d94d126eb91b" (UID: "55543b44-cb0a-4ffe-997e-d94d126eb91b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.768671 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55543b44-cb0a-4ffe-997e-d94d126eb91b-kube-api-access-l5vh7" (OuterVolumeSpecName: "kube-api-access-l5vh7") pod "55543b44-cb0a-4ffe-997e-d94d126eb91b" (UID: "55543b44-cb0a-4ffe-997e-d94d126eb91b"). InnerVolumeSpecName "kube-api-access-l5vh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.867346 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55543b44-cb0a-4ffe-997e-d94d126eb91b-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:44 crc kubenswrapper[3552]: I0320 15:46:44.867385 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-l5vh7\" (UniqueName: \"kubernetes.io/projected/55543b44-cb0a-4ffe-997e-d94d126eb91b-kube-api-access-l5vh7\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.179877 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-ll6vw" event={"ID":"3e0924e3-d91e-44d3-a060-0e400a85cfc9","Type":"ContainerDied","Data":"ce9e1dd5ea7658347c5b45f006599d8d2e3055e996733e01e21c6c88ad71e835"} Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.179913 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce9e1dd5ea7658347c5b45f006599d8d2e3055e996733e01e21c6c88ad71e835" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.179939 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-ll6vw" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.187871 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2002-account-create-update-mnjhb" event={"ID":"55543b44-cb0a-4ffe-997e-d94d126eb91b","Type":"ContainerDied","Data":"f0e4894221a8127259cf24f9d236d18c47dc3a3ca70ba27626d409cd4d29d0f8"} Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.187899 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0e4894221a8127259cf24f9d236d18c47dc3a3ca70ba27626d409cd4d29d0f8" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.188238 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2002-account-create-update-mnjhb" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.568352 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.687753 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hchxt\" (UniqueName: \"kubernetes.io/projected/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-kube-api-access-hchxt\") pod \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.688484 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-operator-scripts\") pod \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\" (UID: \"aa27c1f0-656c-4972-aeb9-8279bd8cc6da\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.689056 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aa27c1f0-656c-4972-aeb9-8279bd8cc6da" (UID: "aa27c1f0-656c-4972-aeb9-8279bd8cc6da"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.694529 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-kube-api-access-hchxt" (OuterVolumeSpecName: "kube-api-access-hchxt") pod "aa27c1f0-656c-4972-aeb9-8279bd8cc6da" (UID: "aa27c1f0-656c-4972-aeb9-8279bd8cc6da"). InnerVolumeSpecName "kube-api-access-hchxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.796197 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.796231 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hchxt\" (UniqueName: \"kubernetes.io/projected/aa27c1f0-656c-4972-aeb9-8279bd8cc6da-kube-api-access-hchxt\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.848749 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.862866 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.881103 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999197 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56721e1-6189-4d50-bb51-cdf6a0f163a8-operator-scripts\") pod \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999276 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-operator-scripts\") pod \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999343 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrmsg\" (UniqueName: \"kubernetes.io/projected/c56721e1-6189-4d50-bb51-cdf6a0f163a8-kube-api-access-jrmsg\") pod \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\" (UID: \"c56721e1-6189-4d50-bb51-cdf6a0f163a8\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999368 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bswxj\" (UniqueName: \"kubernetes.io/projected/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-kube-api-access-bswxj\") pod \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\" (UID: \"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999511 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hngww\" (UniqueName: \"kubernetes.io/projected/6d13a1a3-677a-43ea-bb63-305d67463ec7-kube-api-access-hngww\") pod \"6d13a1a3-677a-43ea-bb63-305d67463ec7\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999567 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d13a1a3-677a-43ea-bb63-305d67463ec7-operator-scripts\") pod \"6d13a1a3-677a-43ea-bb63-305d67463ec7\" (UID: \"6d13a1a3-677a-43ea-bb63-305d67463ec7\") " Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999688 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c56721e1-6189-4d50-bb51-cdf6a0f163a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c56721e1-6189-4d50-bb51-cdf6a0f163a8" (UID: "c56721e1-6189-4d50-bb51-cdf6a0f163a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:45 crc kubenswrapper[3552]: I0320 15:46:45.999800 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" (UID: "8da21953-3ce2-4d5a-abab-6bf0fc7f16aa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.000186 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d13a1a3-677a-43ea-bb63-305d67463ec7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d13a1a3-677a-43ea-bb63-305d67463ec7" (UID: "6d13a1a3-677a-43ea-bb63-305d67463ec7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.000276 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.000293 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d13a1a3-677a-43ea-bb63-305d67463ec7-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.000308 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56721e1-6189-4d50-bb51-cdf6a0f163a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.007703 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d13a1a3-677a-43ea-bb63-305d67463ec7-kube-api-access-hngww" (OuterVolumeSpecName: "kube-api-access-hngww") pod "6d13a1a3-677a-43ea-bb63-305d67463ec7" (UID: "6d13a1a3-677a-43ea-bb63-305d67463ec7"). InnerVolumeSpecName "kube-api-access-hngww". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.007800 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-kube-api-access-bswxj" (OuterVolumeSpecName: "kube-api-access-bswxj") pod "8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" (UID: "8da21953-3ce2-4d5a-abab-6bf0fc7f16aa"). InnerVolumeSpecName "kube-api-access-bswxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.007850 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c56721e1-6189-4d50-bb51-cdf6a0f163a8-kube-api-access-jrmsg" (OuterVolumeSpecName: "kube-api-access-jrmsg") pod "c56721e1-6189-4d50-bb51-cdf6a0f163a8" (UID: "c56721e1-6189-4d50-bb51-cdf6a0f163a8"). InnerVolumeSpecName "kube-api-access-jrmsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.102143 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-jrmsg\" (UniqueName: \"kubernetes.io/projected/c56721e1-6189-4d50-bb51-cdf6a0f163a8-kube-api-access-jrmsg\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.102184 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-bswxj\" (UniqueName: \"kubernetes.io/projected/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa-kube-api-access-bswxj\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.102196 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hngww\" (UniqueName: \"kubernetes.io/projected/6d13a1a3-677a-43ea-bb63-305d67463ec7-kube-api-access-hngww\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.197510 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-84nn5" event={"ID":"c56721e1-6189-4d50-bb51-cdf6a0f163a8","Type":"ContainerDied","Data":"ca3fbe0c148cf340a722c121bc66621cc6d2e989c8ed81d2aea4c792e59e7455"} Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.197531 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-84nn5" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.197547 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca3fbe0c148cf340a722c121bc66621cc6d2e989c8ed81d2aea4c792e59e7455" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.199428 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-c3b1-account-create-update-2ngjl" event={"ID":"6d13a1a3-677a-43ea-bb63-305d67463ec7","Type":"ContainerDied","Data":"5157faa44e9f8874ec8e3ce4510f398895daee103f68ce33a6ef60ae003d716b"} Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.199455 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5157faa44e9f8874ec8e3ce4510f398895daee103f68ce33a6ef60ae003d716b" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.199635 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-c3b1-account-create-update-2ngjl" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.201665 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-449e-account-create-update-r8fxj" event={"ID":"aa27c1f0-656c-4972-aeb9-8279bd8cc6da","Type":"ContainerDied","Data":"e12fba5976ca6369ba37f9472e418263a25e04cae6a1c742dba851d31c7226a7"} Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.201690 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e12fba5976ca6369ba37f9472e418263a25e04cae6a1c742dba851d31c7226a7" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.201743 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-449e-account-create-update-r8fxj" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.206187 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zxrpw" event={"ID":"8da21953-3ce2-4d5a-abab-6bf0fc7f16aa","Type":"ContainerDied","Data":"aa1c9e24c7a04d9b9c712619513696b6eb37b390a41ec8cbb9a670eeafd7b79d"} Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.206221 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa1c9e24c7a04d9b9c712619513696b6eb37b390a41ec8cbb9a670eeafd7b79d" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.206264 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zxrpw" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855373 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-q4mwz"] Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855533 3552 topology_manager.go:215] "Topology Admit Handler" podUID="22af4aca-4214-48f2-9741-44a70d1c7245" podNamespace="openstack" podName="root-account-create-update-q4mwz" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855724 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3e0924e3-d91e-44d3-a060-0e400a85cfc9" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855734 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e0924e3-d91e-44d3-a060-0e400a85cfc9" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855749 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="init" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855756 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="init" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855769 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="55543b44-cb0a-4ffe-997e-d94d126eb91b" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855775 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="55543b44-cb0a-4ffe-997e-d94d126eb91b" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855791 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855798 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855809 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e628a45b-693e-4699-b6f8-88a01dfd6ae7" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855816 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e628a45b-693e-4699-b6f8-88a01dfd6ae7" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855826 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="dnsmasq-dns" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855832 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="dnsmasq-dns" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855841 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c56721e1-6189-4d50-bb51-cdf6a0f163a8" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855847 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c56721e1-6189-4d50-bb51-cdf6a0f163a8" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855856 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="aa27c1f0-656c-4972-aeb9-8279bd8cc6da" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855862 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa27c1f0-656c-4972-aeb9-8279bd8cc6da" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: E0320 15:46:46.855875 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6d13a1a3-677a-43ea-bb63-305d67463ec7" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.855881 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d13a1a3-677a-43ea-bb63-305d67463ec7" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856021 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa27c1f0-656c-4972-aeb9-8279bd8cc6da" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856031 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d13a1a3-677a-43ea-bb63-305d67463ec7" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856038 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c56721e1-6189-4d50-bb51-cdf6a0f163a8" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856049 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856058 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="55543b44-cb0a-4ffe-997e-d94d126eb91b" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856069 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e0924e3-d91e-44d3-a060-0e400a85cfc9" containerName="mariadb-database-create" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856077 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e628a45b-693e-4699-b6f8-88a01dfd6ae7" containerName="mariadb-account-create-update" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856088 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="706952c0-5b18-4aed-9ce1-d503bba5ba52" containerName="dnsmasq-dns" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.856574 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.858559 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Mar 20 15:46:46 crc kubenswrapper[3552]: I0320 15:46:46.876739 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-q4mwz"] Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.016315 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22af4aca-4214-48f2-9741-44a70d1c7245-operator-scripts\") pod \"root-account-create-update-q4mwz\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.016487 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scrcw\" (UniqueName: \"kubernetes.io/projected/22af4aca-4214-48f2-9741-44a70d1c7245-kube-api-access-scrcw\") pod \"root-account-create-update-q4mwz\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.117623 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22af4aca-4214-48f2-9741-44a70d1c7245-operator-scripts\") pod \"root-account-create-update-q4mwz\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.117705 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-scrcw\" (UniqueName: \"kubernetes.io/projected/22af4aca-4214-48f2-9741-44a70d1c7245-kube-api-access-scrcw\") pod \"root-account-create-update-q4mwz\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.118441 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22af4aca-4214-48f2-9741-44a70d1c7245-operator-scripts\") pod \"root-account-create-update-q4mwz\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.137005 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-scrcw\" (UniqueName: \"kubernetes.io/projected/22af4aca-4214-48f2-9741-44a70d1c7245-kube-api-access-scrcw\") pod \"root-account-create-update-q4mwz\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.172395 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.235804 3552 generic.go:334] "Generic (PLEG): container finished" podID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerID="51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6" exitCode=0 Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.235842 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"90a6e0ae-40a5-47b1-8495-26b369c628c4","Type":"ContainerDied","Data":"51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6"} Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.656079 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-q4mwz"] Mar 20 15:46:47 crc kubenswrapper[3552]: W0320 15:46:47.669911 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22af4aca_4214_48f2_9741_44a70d1c7245.slice/crio-e6247964a4705df705ca038c5a46672bc806004c0c21d4d8e0a63bb88ac34fde WatchSource:0}: Error finding container e6247964a4705df705ca038c5a46672bc806004c0c21d4d8e0a63bb88ac34fde: Status 404 returned error can't find the container with id e6247964a4705df705ca038c5a46672bc806004c0c21d4d8e0a63bb88ac34fde Mar 20 15:46:47 crc kubenswrapper[3552]: I0320 15:46:47.747078 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Mar 20 15:46:48 crc kubenswrapper[3552]: I0320 15:46:48.246828 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerStarted","Data":"c5e2d8664254019df8337109c1dd12c7ed02482d86963cca3b2caebd193ffe36"} Mar 20 15:46:48 crc kubenswrapper[3552]: I0320 15:46:48.248986 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q4mwz" event={"ID":"22af4aca-4214-48f2-9741-44a70d1c7245","Type":"ContainerStarted","Data":"bed04f0fd40a44758c7f282f8f3c568253168401c10ced2a33beedb681dc067e"} Mar 20 15:46:48 crc kubenswrapper[3552]: I0320 15:46:48.249027 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q4mwz" event={"ID":"22af4aca-4214-48f2-9741-44a70d1c7245","Type":"ContainerStarted","Data":"e6247964a4705df705ca038c5a46672bc806004c0c21d4d8e0a63bb88ac34fde"} Mar 20 15:46:48 crc kubenswrapper[3552]: I0320 15:46:48.250908 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"90a6e0ae-40a5-47b1-8495-26b369c628c4","Type":"ContainerStarted","Data":"a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e"} Mar 20 15:46:48 crc kubenswrapper[3552]: I0320 15:46:48.252647 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:46:48 crc kubenswrapper[3552]: I0320 15:46:48.274272 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=40.685819992 podStartE2EDuration="1m9.274229271s" podCreationTimestamp="2026-03-20 15:45:39 +0000 UTC" firstStartedPulling="2026-03-20 15:45:41.307632481 +0000 UTC m=+1241.001329311" lastFinishedPulling="2026-03-20 15:46:09.89604176 +0000 UTC m=+1269.589738590" observedRunningTime="2026-03-20 15:46:48.272059053 +0000 UTC m=+1307.965755893" watchObservedRunningTime="2026-03-20 15:46:48.274229271 +0000 UTC m=+1307.967926101" Mar 20 15:46:49 crc kubenswrapper[3552]: I0320 15:46:49.261356 3552 generic.go:334] "Generic (PLEG): container finished" podID="22af4aca-4214-48f2-9741-44a70d1c7245" containerID="bed04f0fd40a44758c7f282f8f3c568253168401c10ced2a33beedb681dc067e" exitCode=0 Mar 20 15:46:49 crc kubenswrapper[3552]: I0320 15:46:49.261474 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q4mwz" event={"ID":"22af4aca-4214-48f2-9741-44a70d1c7245","Type":"ContainerDied","Data":"bed04f0fd40a44758c7f282f8f3c568253168401c10ced2a33beedb681dc067e"} Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.270605 3552 generic.go:334] "Generic (PLEG): container finished" podID="7961927b-2515-44c1-b350-16985a6c6c73" containerID="f0e1c9091369474f3875b494eb377b951fbad3e15bb10f56bb47c42f70eaa4b9" exitCode=0 Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.270661 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-q5q9g" event={"ID":"7961927b-2515-44c1-b350-16985a6c6c73","Type":"ContainerDied","Data":"f0e1c9091369474f3875b494eb377b951fbad3e15bb10f56bb47c42f70eaa4b9"} Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.272367 3552 generic.go:334] "Generic (PLEG): container finished" podID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerID="d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021" exitCode=0 Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.272510 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f1e0de34-e3ac-4691-94c4-d5ac03353099","Type":"ContainerDied","Data":"d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021"} Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.731411 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.783270 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scrcw\" (UniqueName: \"kubernetes.io/projected/22af4aca-4214-48f2-9741-44a70d1c7245-kube-api-access-scrcw\") pod \"22af4aca-4214-48f2-9741-44a70d1c7245\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.783506 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22af4aca-4214-48f2-9741-44a70d1c7245-operator-scripts\") pod \"22af4aca-4214-48f2-9741-44a70d1c7245\" (UID: \"22af4aca-4214-48f2-9741-44a70d1c7245\") " Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.784517 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22af4aca-4214-48f2-9741-44a70d1c7245-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "22af4aca-4214-48f2-9741-44a70d1c7245" (UID: "22af4aca-4214-48f2-9741-44a70d1c7245"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.789278 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22af4aca-4214-48f2-9741-44a70d1c7245-kube-api-access-scrcw" (OuterVolumeSpecName: "kube-api-access-scrcw") pod "22af4aca-4214-48f2-9741-44a70d1c7245" (UID: "22af4aca-4214-48f2-9741-44a70d1c7245"). InnerVolumeSpecName "kube-api-access-scrcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.885787 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22af4aca-4214-48f2-9741-44a70d1c7245-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:50 crc kubenswrapper[3552]: I0320 15:46:50.885815 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-scrcw\" (UniqueName: \"kubernetes.io/projected/22af4aca-4214-48f2-9741-44a70d1c7245-kube-api-access-scrcw\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.280153 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q4mwz" event={"ID":"22af4aca-4214-48f2-9741-44a70d1c7245","Type":"ContainerDied","Data":"e6247964a4705df705ca038c5a46672bc806004c0c21d4d8e0a63bb88ac34fde"} Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.280192 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6247964a4705df705ca038c5a46672bc806004c0c21d4d8e0a63bb88ac34fde" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.280157 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q4mwz" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.282018 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f1e0de34-e3ac-4691-94c4-d5ac03353099","Type":"ContainerStarted","Data":"1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0"} Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.282472 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.329552 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=49.001338715 podStartE2EDuration="1m12.329505812s" podCreationTimestamp="2026-03-20 15:45:39 +0000 UTC" firstStartedPulling="2026-03-20 15:45:46.774838413 +0000 UTC m=+1246.468535243" lastFinishedPulling="2026-03-20 15:46:10.10300551 +0000 UTC m=+1269.796702340" observedRunningTime="2026-03-20 15:46:51.320889921 +0000 UTC m=+1311.014586771" watchObservedRunningTime="2026-03-20 15:46:51.329505812 +0000 UTC m=+1311.023202642" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.664618 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.801697 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-ring-data-devices\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.801754 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-combined-ca-bundle\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.801802 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k254\" (UniqueName: \"kubernetes.io/projected/7961927b-2515-44c1-b350-16985a6c6c73-kube-api-access-5k254\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.801864 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-scripts\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.801945 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7961927b-2515-44c1-b350-16985a6c6c73-etc-swift\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.802000 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-swiftconf\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.802048 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-dispersionconf\") pod \"7961927b-2515-44c1-b350-16985a6c6c73\" (UID: \"7961927b-2515-44c1-b350-16985a6c6c73\") " Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.802247 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.802394 3552 reconciler_common.go:300] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-ring-data-devices\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.806250 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7961927b-2515-44c1-b350-16985a6c6c73-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.811266 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7961927b-2515-44c1-b350-16985a6c6c73-kube-api-access-5k254" (OuterVolumeSpecName: "kube-api-access-5k254") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "kube-api-access-5k254". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.814309 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.828499 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.831935 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-scripts" (OuterVolumeSpecName: "scripts") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.836632 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "7961927b-2515-44c1-b350-16985a6c6c73" (UID: "7961927b-2515-44c1-b350-16985a6c6c73"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.904191 3552 reconciler_common.go:300] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-dispersionconf\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.904232 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.904251 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-5k254\" (UniqueName: \"kubernetes.io/projected/7961927b-2515-44c1-b350-16985a6c6c73-kube-api-access-5k254\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.904264 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7961927b-2515-44c1-b350-16985a6c6c73-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.904278 3552 reconciler_common.go:300] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7961927b-2515-44c1-b350-16985a6c6c73-etc-swift\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:51 crc kubenswrapper[3552]: I0320 15:46:51.904291 3552 reconciler_common.go:300] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7961927b-2515-44c1-b350-16985a6c6c73-swiftconf\") on node \"crc\" DevicePath \"\"" Mar 20 15:46:52 crc kubenswrapper[3552]: I0320 15:46:52.289502 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q5q9g" Mar 20 15:46:52 crc kubenswrapper[3552]: I0320 15:46:52.289492 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-q5q9g" event={"ID":"7961927b-2515-44c1-b350-16985a6c6c73","Type":"ContainerDied","Data":"2e0397bcfa163cb011d859f144ac024e731b4aae59afbda7abf2828147bb17f1"} Mar 20 15:46:52 crc kubenswrapper[3552]: I0320 15:46:52.289984 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e0397bcfa163cb011d859f144ac024e731b4aae59afbda7abf2828147bb17f1" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.460645 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-c89mf" podUID="854348bd-6351-4ba6-82c7-664311074caf" containerName="ovn-controller" probeResult="failure" output=< Mar 20 15:46:54 crc kubenswrapper[3552]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Mar 20 15:46:54 crc kubenswrapper[3552]: > Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.567781 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.615135 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-qrf8z" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.931032 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-c89mf-config-lh844"] Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.931169 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cc16d66d-67d5-428b-95e6-722f16a59ee6" podNamespace="openstack" podName="ovn-controller-c89mf-config-lh844" Mar 20 15:46:54 crc kubenswrapper[3552]: E0320 15:46:54.931362 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="22af4aca-4214-48f2-9741-44a70d1c7245" containerName="mariadb-account-create-update" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.931379 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="22af4aca-4214-48f2-9741-44a70d1c7245" containerName="mariadb-account-create-update" Mar 20 15:46:54 crc kubenswrapper[3552]: E0320 15:46:54.931396 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7961927b-2515-44c1-b350-16985a6c6c73" containerName="swift-ring-rebalance" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.931405 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7961927b-2515-44c1-b350-16985a6c6c73" containerName="swift-ring-rebalance" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.931570 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7961927b-2515-44c1-b350-16985a6c6c73" containerName="swift-ring-rebalance" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.931595 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="22af4aca-4214-48f2-9741-44a70d1c7245" containerName="mariadb-account-create-update" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.932065 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.934044 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Mar 20 15:46:54 crc kubenswrapper[3552]: I0320 15:46:54.945766 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c89mf-config-lh844"] Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.062353 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run-ovn\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.062423 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-log-ovn\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.062452 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-scripts\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.062833 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snvwj\" (UniqueName: \"kubernetes.io/projected/cc16d66d-67d5-428b-95e6-722f16a59ee6-kube-api-access-snvwj\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.062984 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.063136 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-additional-scripts\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.164708 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-snvwj\" (UniqueName: \"kubernetes.io/projected/cc16d66d-67d5-428b-95e6-722f16a59ee6-kube-api-access-snvwj\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.164820 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.164850 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-additional-scripts\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.164895 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run-ovn\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.164920 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-log-ovn\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.164945 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-scripts\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.165569 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run-ovn\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.165578 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.165589 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-log-ovn\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.166064 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-additional-scripts\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.167184 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-scripts\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.190075 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-snvwj\" (UniqueName: \"kubernetes.io/projected/cc16d66d-67d5-428b-95e6-722f16a59ee6-kube-api-access-snvwj\") pod \"ovn-controller-c89mf-config-lh844\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.253267 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:46:55 crc kubenswrapper[3552]: I0320 15:46:55.928962 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c89mf-config-lh844"] Mar 20 15:46:56 crc kubenswrapper[3552]: I0320 15:46:56.315837 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf-config-lh844" event={"ID":"cc16d66d-67d5-428b-95e6-722f16a59ee6","Type":"ContainerStarted","Data":"41139f3cb87301365726444b22702acd7e393a31ab5008c8d86a58ea67900ca4"} Mar 20 15:46:56 crc kubenswrapper[3552]: I0320 15:46:56.319085 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerStarted","Data":"c1d4c75bf19af62f45bb4656f9d7e8658ba3117f1ec6d6f034936a05bde6c2c0"} Mar 20 15:46:56 crc kubenswrapper[3552]: I0320 15:46:56.349156 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.225986731 podStartE2EDuration="1m10.349118121s" podCreationTimestamp="2026-03-20 15:45:46 +0000 UTC" firstStartedPulling="2026-03-20 15:45:49.521788761 +0000 UTC m=+1249.215485591" lastFinishedPulling="2026-03-20 15:46:55.644920151 +0000 UTC m=+1315.338616981" observedRunningTime="2026-03-20 15:46:56.344556799 +0000 UTC m=+1316.038253649" watchObservedRunningTime="2026-03-20 15:46:56.349118121 +0000 UTC m=+1316.042814951" Mar 20 15:46:57 crc kubenswrapper[3552]: I0320 15:46:57.329542 3552 generic.go:334] "Generic (PLEG): container finished" podID="cc16d66d-67d5-428b-95e6-722f16a59ee6" containerID="faa0c566e51de8183bc011bbfaf90e9b9ce83f79a0a83437d577d076ecfab5e3" exitCode=0 Mar 20 15:46:57 crc kubenswrapper[3552]: I0320 15:46:57.330000 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf-config-lh844" event={"ID":"cc16d66d-67d5-428b-95e6-722f16a59ee6","Type":"ContainerDied","Data":"faa0c566e51de8183bc011bbfaf90e9b9ce83f79a0a83437d577d076ecfab5e3"} Mar 20 15:46:58 crc kubenswrapper[3552]: I0320 15:46:58.238299 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Mar 20 15:46:59 crc kubenswrapper[3552]: I0320 15:46:59.488620 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-c89mf" Mar 20 15:46:59 crc kubenswrapper[3552]: I0320 15:46:59.649377 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:59 crc kubenswrapper[3552]: I0320 15:46:59.656786 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dd24d70f-864e-4803-8e8c-9d9e5aadfa84-etc-swift\") pod \"swift-storage-0\" (UID: \"dd24d70f-864e-4803-8e8c-9d9e5aadfa84\") " pod="openstack/swift-storage-0" Mar 20 15:46:59 crc kubenswrapper[3552]: I0320 15:46:59.700348 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Mar 20 15:47:00 crc kubenswrapper[3552]: I0320 15:47:00.690594 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:47:01 crc kubenswrapper[3552]: I0320 15:47:01.053820 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Mar 20 15:47:01 crc kubenswrapper[3552]: I0320 15:47:01.303055 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:47:01 crc kubenswrapper[3552]: I0320 15:47:01.303135 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:47:01 crc kubenswrapper[3552]: I0320 15:47:01.303169 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:47:01 crc kubenswrapper[3552]: I0320 15:47:01.303191 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:47:01 crc kubenswrapper[3552]: I0320 15:47:01.303233 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.652612 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-t949z"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.652977 3552 topology_manager.go:215] "Topology Admit Handler" podUID="97cb48d9-1b5c-45e8-b8e7-a30354047d3d" podNamespace="openstack" podName="cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.653840 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.667907 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-t949z"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.735003 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-77zr7"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.735149 3552 topology_manager.go:215] "Topology Admit Handler" podUID="148d3ae9-f212-412a-b4a3-95c60681e8e3" podNamespace="openstack" podName="watcher-db-sync-77zr7" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.736111 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.738899 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.739982 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-t9t8c" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.743591 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-77zr7"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.766834 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-tpfhd"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.766987 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c4a0b1bc-7138-4e84-b566-86bbe003cd2c" podNamespace="openstack" podName="barbican-db-create-tpfhd" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.767895 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.786792 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-tpfhd"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.796696 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-operator-scripts\") pod \"cinder-db-create-t949z\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.796766 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvmvc\" (UniqueName: \"kubernetes.io/projected/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-kube-api-access-lvmvc\") pod \"cinder-db-create-t949z\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.855660 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-1ec3-account-create-update-q4lwl"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.855814 3552 topology_manager.go:215] "Topology Admit Handler" podUID="682a86ca-28d3-4309-95a2-80458ddd5e31" podNamespace="openstack" podName="cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.858272 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.875519 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.876458 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1ec3-account-create-update-q4lwl"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.894283 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-rnz7c"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.894513 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c8f364e0-b915-4bdc-969a-469119a78a2d" podNamespace="openstack" podName="keystone-db-sync-rnz7c" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lvmvc\" (UniqueName: \"kubernetes.io/projected/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-kube-api-access-lvmvc\") pod \"cinder-db-create-t949z\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902259 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-operator-scripts\") pod \"barbican-db-create-tpfhd\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902316 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2cj2\" (UniqueName: \"kubernetes.io/projected/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-kube-api-access-f2cj2\") pod \"barbican-db-create-tpfhd\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902341 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-db-sync-config-data\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902418 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5xb7\" (UniqueName: \"kubernetes.io/projected/148d3ae9-f212-412a-b4a3-95c60681e8e3-kube-api-access-b5xb7\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902451 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-config-data\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902512 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-operator-scripts\") pod \"cinder-db-create-t949z\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902557 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-combined-ca-bundle\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.902613 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.904610 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-operator-scripts\") pod \"cinder-db-create-t949z\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.906162 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-rnz7c"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.920375 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.920640 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.920759 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.920874 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tt8n9" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.941439 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvmvc\" (UniqueName: \"kubernetes.io/projected/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-kube-api-access-lvmvc\") pod \"cinder-db-create-t949z\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " pod="openstack/cinder-db-create-t949z" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.944739 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-h9hxv"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.944886 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c88f4219-6cb4-4fd9-8414-239ef9a7c25e" podNamespace="openstack" podName="neutron-db-create-h9hxv" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.945824 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.961861 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-h9hxv"] Mar 20 15:47:02 crc kubenswrapper[3552]: I0320 15:47:02.972612 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-t949z" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.003805 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-2e8b-account-create-update-nl2l7"] Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.003994 3552 topology_manager.go:215] "Topology Admit Handler" podUID="12bbf46a-6ef5-49fb-98c2-dd0e48affa63" podNamespace="openstack" podName="barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.004938 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4b2h\" (UniqueName: \"kubernetes.io/projected/682a86ca-28d3-4309-95a2-80458ddd5e31-kube-api-access-m4b2h\") pod \"cinder-1ec3-account-create-update-q4lwl\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005065 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-operator-scripts\") pod \"barbican-db-create-tpfhd\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005175 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f2cj2\" (UniqueName: \"kubernetes.io/projected/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-kube-api-access-f2cj2\") pod \"barbican-db-create-tpfhd\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005275 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-db-sync-config-data\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005382 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpz9b\" (UniqueName: \"kubernetes.io/projected/c8f364e0-b915-4bdc-969a-469119a78a2d-kube-api-access-lpz9b\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005495 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-b5xb7\" (UniqueName: \"kubernetes.io/projected/148d3ae9-f212-412a-b4a3-95c60681e8e3-kube-api-access-b5xb7\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005586 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-config-data\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.004970 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005727 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-combined-ca-bundle\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005773 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-operator-scripts\") pod \"barbican-db-create-tpfhd\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005848 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-config-data\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005886 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-combined-ca-bundle\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.005918 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/682a86ca-28d3-4309-95a2-80458ddd5e31-operator-scripts\") pod \"cinder-1ec3-account-create-update-q4lwl\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.013272 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-combined-ca-bundle\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.014065 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-config-data\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.014545 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.015946 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-db-sync-config-data\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.020248 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2e8b-account-create-update-nl2l7"] Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.027447 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-eb5f-account-create-update-x72h6"] Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.027626 3552 topology_manager.go:215] "Topology Admit Handler" podUID="823cdc27-b1a5-4032-8081-3d0466e6a40c" podNamespace="openstack" podName="neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.028903 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.032227 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.035185 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5xb7\" (UniqueName: \"kubernetes.io/projected/148d3ae9-f212-412a-b4a3-95c60681e8e3-kube-api-access-b5xb7\") pod \"watcher-db-sync-77zr7\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.036134 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-eb5f-account-create-update-x72h6"] Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.049785 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.052043 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2cj2\" (UniqueName: \"kubernetes.io/projected/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-kube-api-access-f2cj2\") pod \"barbican-db-create-tpfhd\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.091743 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110440 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-m4b2h\" (UniqueName: \"kubernetes.io/projected/682a86ca-28d3-4309-95a2-80458ddd5e31-kube-api-access-m4b2h\") pod \"cinder-1ec3-account-create-update-q4lwl\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110656 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbqlq\" (UniqueName: \"kubernetes.io/projected/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-kube-api-access-tbqlq\") pod \"barbican-2e8b-account-create-update-nl2l7\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110729 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lpz9b\" (UniqueName: \"kubernetes.io/projected/c8f364e0-b915-4bdc-969a-469119a78a2d-kube-api-access-lpz9b\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110813 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-operator-scripts\") pod \"neutron-db-create-h9hxv\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110856 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-operator-scripts\") pod \"barbican-2e8b-account-create-update-nl2l7\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110910 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-combined-ca-bundle\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.110945 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/823cdc27-b1a5-4032-8081-3d0466e6a40c-operator-scripts\") pod \"neutron-eb5f-account-create-update-x72h6\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.111019 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-config-data\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.111058 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/682a86ca-28d3-4309-95a2-80458ddd5e31-operator-scripts\") pod \"cinder-1ec3-account-create-update-q4lwl\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.111126 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9qcg\" (UniqueName: \"kubernetes.io/projected/823cdc27-b1a5-4032-8081-3d0466e6a40c-kube-api-access-f9qcg\") pod \"neutron-eb5f-account-create-update-x72h6\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.111153 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xhd6\" (UniqueName: \"kubernetes.io/projected/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-kube-api-access-2xhd6\") pod \"neutron-db-create-h9hxv\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.112880 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/682a86ca-28d3-4309-95a2-80458ddd5e31-operator-scripts\") pod \"cinder-1ec3-account-create-update-q4lwl\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.118254 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-combined-ca-bundle\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.118506 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-config-data\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.133206 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4b2h\" (UniqueName: \"kubernetes.io/projected/682a86ca-28d3-4309-95a2-80458ddd5e31-kube-api-access-m4b2h\") pod \"cinder-1ec3-account-create-update-q4lwl\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.147216 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpz9b\" (UniqueName: \"kubernetes.io/projected/c8f364e0-b915-4bdc-969a-469119a78a2d-kube-api-access-lpz9b\") pod \"keystone-db-sync-rnz7c\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.184012 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.212130 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-operator-scripts\") pod \"neutron-db-create-h9hxv\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.212171 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-operator-scripts\") pod \"barbican-2e8b-account-create-update-nl2l7\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.212211 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/823cdc27-b1a5-4032-8081-3d0466e6a40c-operator-scripts\") pod \"neutron-eb5f-account-create-update-x72h6\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.212263 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f9qcg\" (UniqueName: \"kubernetes.io/projected/823cdc27-b1a5-4032-8081-3d0466e6a40c-kube-api-access-f9qcg\") pod \"neutron-eb5f-account-create-update-x72h6\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.212286 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2xhd6\" (UniqueName: \"kubernetes.io/projected/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-kube-api-access-2xhd6\") pod \"neutron-db-create-h9hxv\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.212338 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tbqlq\" (UniqueName: \"kubernetes.io/projected/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-kube-api-access-tbqlq\") pod \"barbican-2e8b-account-create-update-nl2l7\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.213475 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-operator-scripts\") pod \"neutron-db-create-h9hxv\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.213935 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-operator-scripts\") pod \"barbican-2e8b-account-create-update-nl2l7\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.214372 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/823cdc27-b1a5-4032-8081-3d0466e6a40c-operator-scripts\") pod \"neutron-eb5f-account-create-update-x72h6\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.234921 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9qcg\" (UniqueName: \"kubernetes.io/projected/823cdc27-b1a5-4032-8081-3d0466e6a40c-kube-api-access-f9qcg\") pod \"neutron-eb5f-account-create-update-x72h6\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.244804 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.247550 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbqlq\" (UniqueName: \"kubernetes.io/projected/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-kube-api-access-tbqlq\") pod \"barbican-2e8b-account-create-update-nl2l7\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.248325 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.249119 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xhd6\" (UniqueName: \"kubernetes.io/projected/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-kube-api-access-2xhd6\") pod \"neutron-db-create-h9hxv\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.313673 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.321122 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.341619 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.359639 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:03 crc kubenswrapper[3552]: I0320 15:47:03.374819 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:05 crc kubenswrapper[3552]: I0320 15:47:05.657296 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:47:05 crc kubenswrapper[3552]: I0320 15:47:05.658273 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="thanos-sidecar" containerID="cri-o://c1d4c75bf19af62f45bb4656f9d7e8658ba3117f1ec6d6f034936a05bde6c2c0" gracePeriod=600 Mar 20 15:47:05 crc kubenswrapper[3552]: I0320 15:47:05.658470 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="config-reloader" containerID="cri-o://c5e2d8664254019df8337109c1dd12c7ed02482d86963cca3b2caebd193ffe36" gracePeriod=600 Mar 20 15:47:05 crc kubenswrapper[3552]: I0320 15:47:05.658572 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="prometheus" containerID="cri-o://da3b9b6c4eda1da2baa0ef4ba5735cef89fda87e7a23f34e15ac57d956e68309" gracePeriod=600 Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.176558 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.271693 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snvwj\" (UniqueName: \"kubernetes.io/projected/cc16d66d-67d5-428b-95e6-722f16a59ee6-kube-api-access-snvwj\") pod \"cc16d66d-67d5-428b-95e6-722f16a59ee6\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.272005 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-log-ovn\") pod \"cc16d66d-67d5-428b-95e6-722f16a59ee6\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.272028 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-scripts\") pod \"cc16d66d-67d5-428b-95e6-722f16a59ee6\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.272052 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run\") pod \"cc16d66d-67d5-428b-95e6-722f16a59ee6\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.272071 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-additional-scripts\") pod \"cc16d66d-67d5-428b-95e6-722f16a59ee6\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.272096 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run-ovn\") pod \"cc16d66d-67d5-428b-95e6-722f16a59ee6\" (UID: \"cc16d66d-67d5-428b-95e6-722f16a59ee6\") " Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.272570 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "cc16d66d-67d5-428b-95e6-722f16a59ee6" (UID: "cc16d66d-67d5-428b-95e6-722f16a59ee6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.273509 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "cc16d66d-67d5-428b-95e6-722f16a59ee6" (UID: "cc16d66d-67d5-428b-95e6-722f16a59ee6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.273546 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run" (OuterVolumeSpecName: "var-run") pod "cc16d66d-67d5-428b-95e6-722f16a59ee6" (UID: "cc16d66d-67d5-428b-95e6-722f16a59ee6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.274014 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "cc16d66d-67d5-428b-95e6-722f16a59ee6" (UID: "cc16d66d-67d5-428b-95e6-722f16a59ee6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.278049 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc16d66d-67d5-428b-95e6-722f16a59ee6-kube-api-access-snvwj" (OuterVolumeSpecName: "kube-api-access-snvwj") pod "cc16d66d-67d5-428b-95e6-722f16a59ee6" (UID: "cc16d66d-67d5-428b-95e6-722f16a59ee6"). InnerVolumeSpecName "kube-api-access-snvwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.280189 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-scripts" (OuterVolumeSpecName: "scripts") pod "cc16d66d-67d5-428b-95e6-722f16a59ee6" (UID: "cc16d66d-67d5-428b-95e6-722f16a59ee6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.375473 3552 reconciler_common.go:300] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.375514 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.375532 3552 reconciler_common.go:300] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.375543 3552 reconciler_common.go:300] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/cc16d66d-67d5-428b-95e6-722f16a59ee6-additional-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.375552 3552 reconciler_common.go:300] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc16d66d-67d5-428b-95e6-722f16a59ee6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.375579 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-snvwj\" (UniqueName: \"kubernetes.io/projected/cc16d66d-67d5-428b-95e6-722f16a59ee6-kube-api-access-snvwj\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.457525 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf-config-lh844" event={"ID":"cc16d66d-67d5-428b-95e6-722f16a59ee6","Type":"ContainerDied","Data":"41139f3cb87301365726444b22702acd7e393a31ab5008c8d86a58ea67900ca4"} Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.457549 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-lh844" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.457567 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41139f3cb87301365726444b22702acd7e393a31ab5008c8d86a58ea67900ca4" Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.460345 3552 generic.go:334] "Generic (PLEG): container finished" podID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerID="c1d4c75bf19af62f45bb4656f9d7e8658ba3117f1ec6d6f034936a05bde6c2c0" exitCode=0 Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.460618 3552 generic.go:334] "Generic (PLEG): container finished" podID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerID="c5e2d8664254019df8337109c1dd12c7ed02482d86963cca3b2caebd193ffe36" exitCode=0 Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.460631 3552 generic.go:334] "Generic (PLEG): container finished" podID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerID="da3b9b6c4eda1da2baa0ef4ba5735cef89fda87e7a23f34e15ac57d956e68309" exitCode=0 Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.460431 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerDied","Data":"c1d4c75bf19af62f45bb4656f9d7e8658ba3117f1ec6d6f034936a05bde6c2c0"} Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.460667 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerDied","Data":"c5e2d8664254019df8337109c1dd12c7ed02482d86963cca3b2caebd193ffe36"} Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.460676 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerDied","Data":"da3b9b6c4eda1da2baa0ef4ba5735cef89fda87e7a23f34e15ac57d956e68309"} Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.893868 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1ec3-account-create-update-q4lwl"] Mar 20 15:47:06 crc kubenswrapper[3552]: I0320 15:47:06.906326 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-rnz7c"] Mar 20 15:47:06 crc kubenswrapper[3552]: W0320 15:47:06.909445 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod682a86ca_28d3_4309_95a2_80458ddd5e31.slice/crio-9900ac0e4eb5b521f2534c7ea477871835477dbc1f3511d9c845f47dffdeece7 WatchSource:0}: Error finding container 9900ac0e4eb5b521f2534c7ea477871835477dbc1f3511d9c845f47dffdeece7: Status 404 returned error can't find the container with id 9900ac0e4eb5b521f2534c7ea477871835477dbc1f3511d9c845f47dffdeece7 Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.295453 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-c89mf-config-lh844"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.310340 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-c89mf-config-lh844"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.311630 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.335824 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-c89mf-config-krqw2"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.335978 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" podNamespace="openstack" podName="ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: E0320 15:47:07.336179 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="init-config-reloader" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336192 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="init-config-reloader" Mar 20 15:47:07 crc kubenswrapper[3552]: E0320 15:47:07.336213 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="prometheus" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336219 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="prometheus" Mar 20 15:47:07 crc kubenswrapper[3552]: E0320 15:47:07.336234 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="thanos-sidecar" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336240 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="thanos-sidecar" Mar 20 15:47:07 crc kubenswrapper[3552]: E0320 15:47:07.336250 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="cc16d66d-67d5-428b-95e6-722f16a59ee6" containerName="ovn-config" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336257 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc16d66d-67d5-428b-95e6-722f16a59ee6" containerName="ovn-config" Mar 20 15:47:07 crc kubenswrapper[3552]: E0320 15:47:07.336268 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="config-reloader" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336275 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="config-reloader" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336459 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="prometheus" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336477 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc16d66d-67d5-428b-95e6-722f16a59ee6" containerName="ovn-config" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336488 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="thanos-sidecar" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.336497 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" containerName="config-reloader" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.337028 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.339513 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.353926 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c89mf-config-krqw2"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.367733 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-tpfhd"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.397908 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-tls-assets\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.397952 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.397985 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config-out\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398019 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-1\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398085 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-2\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398231 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398268 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-web-config\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398288 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n8k6\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-kube-api-access-2n8k6\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398383 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-0\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.398429 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-thanos-prometheus-http-client-file\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.399976 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.406552 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.408617 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.409175 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.409533 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.415081 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-kube-api-access-2n8k6" (OuterVolumeSpecName: "kube-api-access-2n8k6") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "kube-api-access-2n8k6". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.418573 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config" (OuterVolumeSpecName: "config") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.422647 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config-out" (OuterVolumeSpecName: "config-out") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.447132 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc16d66d-67d5-428b-95e6-722f16a59ee6" path="/var/lib/kubelet/pods/cc16d66d-67d5-428b-95e6-722f16a59ee6/volumes" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.466670 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-web-config" (OuterVolumeSpecName: "web-config") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.467037 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-77zr7"] Mar 20 15:47:07 crc kubenswrapper[3552]: W0320 15:47:07.473519 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97cb48d9_1b5c_45e8_b8e7_a30354047d3d.slice/crio-c44b6ce3a558ce60144e96ca089c5cfe27bb022e1b4ebec7fca3d1421b5c73b0 WatchSource:0}: Error finding container c44b6ce3a558ce60144e96ca089c5cfe27bb022e1b4ebec7fca3d1421b5c73b0: Status 404 returned error can't find the container with id c44b6ce3a558ce60144e96ca089c5cfe27bb022e1b4ebec7fca3d1421b5c73b0 Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.484706 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2e8b-account-create-update-nl2l7"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.500446 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" (UID: "fdedd79c-7b6f-4512-a434-dc6c4daf6bfb"). InnerVolumeSpecName "pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 20 15:47:07 crc kubenswrapper[3552]: E0320 15:47:07.505440 3552 reconciler_common.go:169] "operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled true) for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") : UnmountVolume.NewUnmounter failed for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") : kubernetes.io/csi: unmounter failed to load volume data file [/var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes/kubernetes.io~csi/pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d/mount]: kubernetes.io/csi: failed to open volume data file [/var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes/kubernetes.io~csi/pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d/vol_data.json]: open /var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes/kubernetes.io~csi/pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d/vol_data.json: no such file or directory" err="UnmountVolume.NewUnmounter failed for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\" (UID: \"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb\") : kubernetes.io/csi: unmounter failed to load volume data file [/var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes/kubernetes.io~csi/pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d/mount]: kubernetes.io/csi: failed to open volume data file [/var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes/kubernetes.io~csi/pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d/vol_data.json]: open /var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes/kubernetes.io~csi/pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d/vol_data.json: no such file or directory" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506183 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-scripts\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506215 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4ntxp" event={"ID":"e04aaab3-651f-4523-ab35-250a33f54f4d","Type":"ContainerStarted","Data":"935c10c4456ee8e387170ebbb81b8702783462a33b6dfd6c23a114af7a4b915e"} Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506229 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run-ovn\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506483 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-additional-scripts\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506524 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-log-ovn\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506580 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506751 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcnrw\" (UniqueName: \"kubernetes.io/projected/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-kube-api-access-xcnrw\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506893 3552 reconciler_common.go:300] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.506915 3552 reconciler_common.go:300] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.507643 3552 reconciler_common.go:300] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-tls-assets\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.507999 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.508018 3552 reconciler_common.go:300] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-config-out\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.508036 3552 reconciler_common.go:300] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.508058 3552 reconciler_common.go:300] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.509209 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") on node \"crc\" " Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.509232 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2n8k6\" (UniqueName: \"kubernetes.io/projected/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-kube-api-access-2n8k6\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.509250 3552 reconciler_common.go:300] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb-web-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.511034 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1ec3-account-create-update-q4lwl" event={"ID":"682a86ca-28d3-4309-95a2-80458ddd5e31","Type":"ContainerStarted","Data":"7055113e467cd08a20577eb913ad8a320e9a8311e69fbe9f950325829d9b540c"} Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.511060 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1ec3-account-create-update-q4lwl" event={"ID":"682a86ca-28d3-4309-95a2-80458ddd5e31","Type":"ContainerStarted","Data":"9900ac0e4eb5b521f2534c7ea477871835477dbc1f3511d9c845f47dffdeece7"} Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.515063 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-h9hxv"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.575766 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fdedd79c-7b6f-4512-a434-dc6c4daf6bfb","Type":"ContainerDied","Data":"d35b3d71b97b1531105c59d9b650be88a7046ba4b8e14941bc4112f9b0affae6"} Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.575820 3552 scope.go:117] "RemoveContainer" containerID="c1d4c75bf19af62f45bb4656f9d7e8658ba3117f1ec6d6f034936a05bde6c2c0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.576026 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.598606 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-t949z"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.603739 3552 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.603876 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d") on node "crc" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.603944 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-eb5f-account-create-update-x72h6"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.608305 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tpfhd" event={"ID":"c4a0b1bc-7138-4e84-b566-86bbe003cd2c","Type":"ContainerStarted","Data":"f7867b7301ea77bea678bc1fc70692081130f82f9e609fcdf1eb360f8356f43a"} Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.610463 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-db-sync-4ntxp" podStartSLOduration=10.714722986 podStartE2EDuration="34.610415343s" podCreationTimestamp="2026-03-20 15:46:33 +0000 UTC" firstStartedPulling="2026-03-20 15:46:42.397185699 +0000 UTC m=+1302.090882529" lastFinishedPulling="2026-03-20 15:47:06.292878056 +0000 UTC m=+1325.986574886" observedRunningTime="2026-03-20 15:47:07.543350078 +0000 UTC m=+1327.237046908" watchObservedRunningTime="2026-03-20 15:47:07.610415343 +0000 UTC m=+1327.304112173" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.610764 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-additional-scripts\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.610804 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-log-ovn\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.610834 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.610937 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xcnrw\" (UniqueName: \"kubernetes.io/projected/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-kube-api-access-xcnrw\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.611002 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-scripts\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.611032 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run-ovn\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.611083 3552 reconciler_common.go:300] "Volume detached for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.611331 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run-ovn\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.611980 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-additional-scripts\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.612035 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-log-ovn\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.612069 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: W0320 15:47:07.613678 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd24d70f_864e_4803_8e8c_9d9e5aadfa84.slice/crio-fad7ab66209e7fa71f5a4982cde1944c3391425249904d1a6b56e80f6d60c032 WatchSource:0}: Error finding container fad7ab66209e7fa71f5a4982cde1944c3391425249904d1a6b56e80f6d60c032: Status 404 returned error can't find the container with id fad7ab66209e7fa71f5a4982cde1944c3391425249904d1a6b56e80f6d60c032 Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.616358 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-scripts\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.620337 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-rnz7c" event={"ID":"c8f364e0-b915-4bdc-969a-469119a78a2d","Type":"ContainerStarted","Data":"3785f36a9476dc333a8cefc2b792721ce5db86495cbb2789edc8626ee34a5c87"} Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.646038 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcnrw\" (UniqueName: \"kubernetes.io/projected/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-kube-api-access-xcnrw\") pod \"ovn-controller-c89mf-config-krqw2\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.654693 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.669621 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.671251 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/cinder-1ec3-account-create-update-q4lwl" podStartSLOduration=5.6712092 podStartE2EDuration="5.6712092s" podCreationTimestamp="2026-03-20 15:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:07.592623727 +0000 UTC m=+1327.286320567" watchObservedRunningTime="2026-03-20 15:47:07.6712092 +0000 UTC m=+1327.364906030" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.735312 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.743718 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.753078 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.753317 3552 topology_manager.go:215] "Topology Admit Handler" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" podNamespace="openstack" podName="prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.763569 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.764054 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.770663 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-s2p59" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.771660 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.771824 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.771960 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.772388 3552 scope.go:117] "RemoveContainer" containerID="c5e2d8664254019df8337109c1dd12c7ed02482d86963cca3b2caebd193ffe36" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.775773 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.775907 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.776055 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.776154 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.790141 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.939869 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940029 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940071 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940245 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940300 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940330 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940369 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940395 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpxvj\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-kube-api-access-bpxvj\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940432 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940457 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940484 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940510 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.940539 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:07 crc kubenswrapper[3552]: I0320 15:47:07.979467 3552 scope.go:117] "RemoveContainer" containerID="da3b9b6c4eda1da2baa0ef4ba5735cef89fda87e7a23f34e15ac57d956e68309" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042265 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042320 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042359 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042390 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042447 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042480 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042519 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042547 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042584 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042609 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bpxvj\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-kube-api-access-bpxvj\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042632 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042657 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.042681 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.048360 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.048737 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.049145 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.049471 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.048458 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.049611 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.050686 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.051369 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.054180 3552 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.054226 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c127805bcb575cbe31260fab01e798009882e8c5a15f13517ae73f993a199ea5/globalmount\"" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.055693 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.059742 3552 scope.go:117] "RemoveContainer" containerID="8b1aca7237e1fe93905bf1dba4286ca6f021a7ed41f1700eac9be0ffda92d03d" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.059912 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.063553 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpxvj\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-kube-api-access-bpxvj\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.065902 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.169121 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.173998 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.467893 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c89mf-config-krqw2"] Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.662481 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"fad7ab66209e7fa71f5a4982cde1944c3391425249904d1a6b56e80f6d60c032"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.668869 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf-config-krqw2" event={"ID":"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab","Type":"ContainerStarted","Data":"abefba791c94ea0a0ea965ca696600147b8fef7ee209b6a1775fe63fe316d4c1"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.675418 3552 generic.go:334] "Generic (PLEG): container finished" podID="c4a0b1bc-7138-4e84-b566-86bbe003cd2c" containerID="23a3d250d223953e770ae5ebd80c62e8fd019941008d2385e4dd5cc15af2210c" exitCode=0 Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.675595 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tpfhd" event={"ID":"c4a0b1bc-7138-4e84-b566-86bbe003cd2c","Type":"ContainerDied","Data":"23a3d250d223953e770ae5ebd80c62e8fd019941008d2385e4dd5cc15af2210c"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.679836 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2e8b-account-create-update-nl2l7" event={"ID":"12bbf46a-6ef5-49fb-98c2-dd0e48affa63","Type":"ContainerStarted","Data":"309a7f8f52133ac9bff7c70cd77d3902694211b4c28b187f526dd5afc5ed21d0"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.679890 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2e8b-account-create-update-nl2l7" event={"ID":"12bbf46a-6ef5-49fb-98c2-dd0e48affa63","Type":"ContainerStarted","Data":"012baf7d716cfbdea4922c2e94bbd93e901df4cf69cc28c637c419d020828b55"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.684233 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-t949z" event={"ID":"97cb48d9-1b5c-45e8-b8e7-a30354047d3d","Type":"ContainerStarted","Data":"c44b6ce3a558ce60144e96ca089c5cfe27bb022e1b4ebec7fca3d1421b5c73b0"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.687313 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-h9hxv" event={"ID":"c88f4219-6cb4-4fd9-8414-239ef9a7c25e","Type":"ContainerStarted","Data":"7722e7a3f95ddabd6ae756fa839f757e13141bfa04a46738a3449bc3bcd308bb"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.687352 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-h9hxv" event={"ID":"c88f4219-6cb4-4fd9-8414-239ef9a7c25e","Type":"ContainerStarted","Data":"aa18ff0de9adf30145838db4be008b6d91675076271d43c3d5f756e094f5a497"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.720552 3552 generic.go:334] "Generic (PLEG): container finished" podID="682a86ca-28d3-4309-95a2-80458ddd5e31" containerID="7055113e467cd08a20577eb913ad8a320e9a8311e69fbe9f950325829d9b540c" exitCode=0 Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.720633 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1ec3-account-create-update-q4lwl" event={"ID":"682a86ca-28d3-4309-95a2-80458ddd5e31","Type":"ContainerDied","Data":"7055113e467cd08a20577eb913ad8a320e9a8311e69fbe9f950325829d9b540c"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.724970 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-eb5f-account-create-update-x72h6" event={"ID":"823cdc27-b1a5-4032-8081-3d0466e6a40c","Type":"ContainerStarted","Data":"b426825943a654d5621b1a0be0a6b9382b87b477a65f0d8db585fd124aa352c5"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.725009 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-eb5f-account-create-update-x72h6" event={"ID":"823cdc27-b1a5-4032-8081-3d0466e6a40c","Type":"ContainerStarted","Data":"bf15f9ceebcbe98e84f6691f5b377d0b71250f659f9e07d8e5046d20ef12d21b"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.736421 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-db-create-h9hxv" podStartSLOduration=6.736358532 podStartE2EDuration="6.736358532s" podCreationTimestamp="2026-03-20 15:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:08.719444729 +0000 UTC m=+1328.413141569" watchObservedRunningTime="2026-03-20 15:47:08.736358532 +0000 UTC m=+1328.430055362" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.755084 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-77zr7" event={"ID":"148d3ae9-f212-412a-b4a3-95c60681e8e3","Type":"ContainerStarted","Data":"dd2c09578afe8ba913bd03bf8efe6ec82714d6f37e2c04ddc41d04b40327a0d2"} Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.818227 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-eb5f-account-create-update-x72h6" podStartSLOduration=6.818156611 podStartE2EDuration="6.818156611s" podCreationTimestamp="2026-03-20 15:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:08.811732119 +0000 UTC m=+1328.505428969" watchObservedRunningTime="2026-03-20 15:47:08.818156611 +0000 UTC m=+1328.511853461" Mar 20 15:47:08 crc kubenswrapper[3552]: I0320 15:47:08.882541 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 15:47:08 crc kubenswrapper[3552]: E0320 15:47:08.887791 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4a0b1bc_7138_4e84_b566_86bbe003cd2c.slice/crio-conmon-23a3d250d223953e770ae5ebd80c62e8fd019941008d2385e4dd5cc15af2210c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4a0b1bc_7138_4e84_b566_86bbe003cd2c.slice/crio-23a3d250d223953e770ae5ebd80c62e8fd019941008d2385e4dd5cc15af2210c.scope\": RecentStats: unable to find data in memory cache]" Mar 20 15:47:08 crc kubenswrapper[3552]: W0320 15:47:08.920780 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96d21e55_2bd4_4fbe_b8fb_4f33165bd3ae.slice/crio-5c8abc493589e32ee8164e4806cd57ccbef0238ee226a3358882098934fdbcc4 WatchSource:0}: Error finding container 5c8abc493589e32ee8164e4806cd57ccbef0238ee226a3358882098934fdbcc4: Status 404 returned error can't find the container with id 5c8abc493589e32ee8164e4806cd57ccbef0238ee226a3358882098934fdbcc4 Mar 20 15:47:09 crc kubenswrapper[3552]: I0320 15:47:09.447607 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdedd79c-7b6f-4512-a434-dc6c4daf6bfb" path="/var/lib/kubelet/pods/fdedd79c-7b6f-4512-a434-dc6c4daf6bfb/volumes" Mar 20 15:47:09 crc kubenswrapper[3552]: I0320 15:47:09.767421 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerStarted","Data":"5c8abc493589e32ee8164e4806cd57ccbef0238ee226a3358882098934fdbcc4"} Mar 20 15:47:10 crc kubenswrapper[3552]: I0320 15:47:10.994628 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.006250 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.099354 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4b2h\" (UniqueName: \"kubernetes.io/projected/682a86ca-28d3-4309-95a2-80458ddd5e31-kube-api-access-m4b2h\") pod \"682a86ca-28d3-4309-95a2-80458ddd5e31\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.099710 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/682a86ca-28d3-4309-95a2-80458ddd5e31-operator-scripts\") pod \"682a86ca-28d3-4309-95a2-80458ddd5e31\" (UID: \"682a86ca-28d3-4309-95a2-80458ddd5e31\") " Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.099739 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2cj2\" (UniqueName: \"kubernetes.io/projected/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-kube-api-access-f2cj2\") pod \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.099810 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-operator-scripts\") pod \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\" (UID: \"c4a0b1bc-7138-4e84-b566-86bbe003cd2c\") " Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.100715 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/682a86ca-28d3-4309-95a2-80458ddd5e31-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "682a86ca-28d3-4309-95a2-80458ddd5e31" (UID: "682a86ca-28d3-4309-95a2-80458ddd5e31"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.100858 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c4a0b1bc-7138-4e84-b566-86bbe003cd2c" (UID: "c4a0b1bc-7138-4e84-b566-86bbe003cd2c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.101233 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/682a86ca-28d3-4309-95a2-80458ddd5e31-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.101254 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.107049 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/682a86ca-28d3-4309-95a2-80458ddd5e31-kube-api-access-m4b2h" (OuterVolumeSpecName: "kube-api-access-m4b2h") pod "682a86ca-28d3-4309-95a2-80458ddd5e31" (UID: "682a86ca-28d3-4309-95a2-80458ddd5e31"). InnerVolumeSpecName "kube-api-access-m4b2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.107272 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-kube-api-access-f2cj2" (OuterVolumeSpecName: "kube-api-access-f2cj2") pod "c4a0b1bc-7138-4e84-b566-86bbe003cd2c" (UID: "c4a0b1bc-7138-4e84-b566-86bbe003cd2c"). InnerVolumeSpecName "kube-api-access-f2cj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.202816 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-m4b2h\" (UniqueName: \"kubernetes.io/projected/682a86ca-28d3-4309-95a2-80458ddd5e31-kube-api-access-m4b2h\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.202859 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-f2cj2\" (UniqueName: \"kubernetes.io/projected/c4a0b1bc-7138-4e84-b566-86bbe003cd2c-kube-api-access-f2cj2\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.783653 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tpfhd" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.783656 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tpfhd" event={"ID":"c4a0b1bc-7138-4e84-b566-86bbe003cd2c","Type":"ContainerDied","Data":"f7867b7301ea77bea678bc1fc70692081130f82f9e609fcdf1eb360f8356f43a"} Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.783841 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7867b7301ea77bea678bc1fc70692081130f82f9e609fcdf1eb360f8356f43a" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.785287 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-t949z" event={"ID":"97cb48d9-1b5c-45e8-b8e7-a30354047d3d","Type":"ContainerStarted","Data":"e2ed61726af561dafe4831dea479f72e5df2afcdc85ff550a691541feb211048"} Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.787200 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1ec3-account-create-update-q4lwl" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.787313 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1ec3-account-create-update-q4lwl" event={"ID":"682a86ca-28d3-4309-95a2-80458ddd5e31","Type":"ContainerDied","Data":"9900ac0e4eb5b521f2534c7ea477871835477dbc1f3511d9c845f47dffdeece7"} Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.787353 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9900ac0e4eb5b521f2534c7ea477871835477dbc1f3511d9c845f47dffdeece7" Mar 20 15:47:11 crc kubenswrapper[3552]: I0320 15:47:11.839474 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/barbican-2e8b-account-create-update-nl2l7" podStartSLOduration=9.839346259 podStartE2EDuration="9.839346259s" podCreationTimestamp="2026-03-20 15:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:11.831119299 +0000 UTC m=+1331.524816139" watchObservedRunningTime="2026-03-20 15:47:11.839346259 +0000 UTC m=+1331.533043089" Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.807392 3552 generic.go:334] "Generic (PLEG): container finished" podID="12bbf46a-6ef5-49fb-98c2-dd0e48affa63" containerID="309a7f8f52133ac9bff7c70cd77d3902694211b4c28b187f526dd5afc5ed21d0" exitCode=0 Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.807580 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2e8b-account-create-update-nl2l7" event={"ID":"12bbf46a-6ef5-49fb-98c2-dd0e48affa63","Type":"ContainerDied","Data":"309a7f8f52133ac9bff7c70cd77d3902694211b4c28b187f526dd5afc5ed21d0"} Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.810931 3552 generic.go:334] "Generic (PLEG): container finished" podID="97cb48d9-1b5c-45e8-b8e7-a30354047d3d" containerID="e2ed61726af561dafe4831dea479f72e5df2afcdc85ff550a691541feb211048" exitCode=0 Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.811098 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-t949z" event={"ID":"97cb48d9-1b5c-45e8-b8e7-a30354047d3d","Type":"ContainerDied","Data":"e2ed61726af561dafe4831dea479f72e5df2afcdc85ff550a691541feb211048"} Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.813515 3552 generic.go:334] "Generic (PLEG): container finished" podID="c88f4219-6cb4-4fd9-8414-239ef9a7c25e" containerID="7722e7a3f95ddabd6ae756fa839f757e13141bfa04a46738a3449bc3bcd308bb" exitCode=0 Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.813661 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-h9hxv" event={"ID":"c88f4219-6cb4-4fd9-8414-239ef9a7c25e","Type":"ContainerDied","Data":"7722e7a3f95ddabd6ae756fa839f757e13141bfa04a46738a3449bc3bcd308bb"} Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.817783 3552 generic.go:334] "Generic (PLEG): container finished" podID="e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" containerID="0347dbb00e01a8a90af7702baeab73fc3bd47a1218381f6fe48198f266e1ac2e" exitCode=0 Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.817897 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf-config-krqw2" event={"ID":"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab","Type":"ContainerDied","Data":"0347dbb00e01a8a90af7702baeab73fc3bd47a1218381f6fe48198f266e1ac2e"} Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.822808 3552 generic.go:334] "Generic (PLEG): container finished" podID="823cdc27-b1a5-4032-8081-3d0466e6a40c" containerID="b426825943a654d5621b1a0be0a6b9382b87b477a65f0d8db585fd124aa352c5" exitCode=0 Mar 20 15:47:12 crc kubenswrapper[3552]: I0320 15:47:12.822963 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-eb5f-account-create-update-x72h6" event={"ID":"823cdc27-b1a5-4032-8081-3d0466e6a40c","Type":"ContainerDied","Data":"b426825943a654d5621b1a0be0a6b9382b87b477a65f0d8db585fd124aa352c5"} Mar 20 15:47:13 crc kubenswrapper[3552]: I0320 15:47:13.834928 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerStarted","Data":"70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f"} Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.527873 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.583095 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/823cdc27-b1a5-4032-8081-3d0466e6a40c-operator-scripts\") pod \"823cdc27-b1a5-4032-8081-3d0466e6a40c\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.583175 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9qcg\" (UniqueName: \"kubernetes.io/projected/823cdc27-b1a5-4032-8081-3d0466e6a40c-kube-api-access-f9qcg\") pod \"823cdc27-b1a5-4032-8081-3d0466e6a40c\" (UID: \"823cdc27-b1a5-4032-8081-3d0466e6a40c\") " Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.583741 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/823cdc27-b1a5-4032-8081-3d0466e6a40c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "823cdc27-b1a5-4032-8081-3d0466e6a40c" (UID: "823cdc27-b1a5-4032-8081-3d0466e6a40c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.606662 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/823cdc27-b1a5-4032-8081-3d0466e6a40c-kube-api-access-f9qcg" (OuterVolumeSpecName: "kube-api-access-f9qcg") pod "823cdc27-b1a5-4032-8081-3d0466e6a40c" (UID: "823cdc27-b1a5-4032-8081-3d0466e6a40c"). InnerVolumeSpecName "kube-api-access-f9qcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.685363 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/823cdc27-b1a5-4032-8081-3d0466e6a40c-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.685700 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-f9qcg\" (UniqueName: \"kubernetes.io/projected/823cdc27-b1a5-4032-8081-3d0466e6a40c-kube-api-access-f9qcg\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.847394 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-eb5f-account-create-update-x72h6" event={"ID":"823cdc27-b1a5-4032-8081-3d0466e6a40c","Type":"ContainerDied","Data":"bf15f9ceebcbe98e84f6691f5b377d0b71250f659f9e07d8e5046d20ef12d21b"} Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.847440 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf15f9ceebcbe98e84f6691f5b377d0b71250f659f9e07d8e5046d20ef12d21b" Mar 20 15:47:15 crc kubenswrapper[3552]: I0320 15:47:15.847483 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-eb5f-account-create-update-x72h6" Mar 20 15:47:17 crc kubenswrapper[3552]: I0320 15:47:17.863115 3552 generic.go:334] "Generic (PLEG): container finished" podID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerID="70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f" exitCode=0 Mar 20 15:47:17 crc kubenswrapper[3552]: I0320 15:47:17.863241 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerDied","Data":"70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f"} Mar 20 15:47:19 crc kubenswrapper[3552]: I0320 15:47:19.909930 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.061207 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbqlq\" (UniqueName: \"kubernetes.io/projected/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-kube-api-access-tbqlq\") pod \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.061302 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-operator-scripts\") pod \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\" (UID: \"12bbf46a-6ef5-49fb-98c2-dd0e48affa63\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.062305 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "12bbf46a-6ef5-49fb-98c2-dd0e48affa63" (UID: "12bbf46a-6ef5-49fb-98c2-dd0e48affa63"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.066587 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-kube-api-access-tbqlq" (OuterVolumeSpecName: "kube-api-access-tbqlq") pod "12bbf46a-6ef5-49fb-98c2-dd0e48affa63" (UID: "12bbf46a-6ef5-49fb-98c2-dd0e48affa63"). InnerVolumeSpecName "kube-api-access-tbqlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.163093 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-tbqlq\" (UniqueName: \"kubernetes.io/projected/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-kube-api-access-tbqlq\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.163128 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12bbf46a-6ef5-49fb-98c2-dd0e48affa63-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.556164 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.565718 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-t949z" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.573473 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.669893 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvmvc\" (UniqueName: \"kubernetes.io/projected/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-kube-api-access-lvmvc\") pod \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.670133 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-operator-scripts\") pod \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\" (UID: \"97cb48d9-1b5c-45e8-b8e7-a30354047d3d\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.670222 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-operator-scripts\") pod \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.670274 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xhd6\" (UniqueName: \"kubernetes.io/projected/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-kube-api-access-2xhd6\") pod \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\" (UID: \"c88f4219-6cb4-4fd9-8414-239ef9a7c25e\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.670762 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "97cb48d9-1b5c-45e8-b8e7-a30354047d3d" (UID: "97cb48d9-1b5c-45e8-b8e7-a30354047d3d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.671306 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c88f4219-6cb4-4fd9-8414-239ef9a7c25e" (UID: "c88f4219-6cb4-4fd9-8414-239ef9a7c25e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.675147 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-kube-api-access-2xhd6" (OuterVolumeSpecName: "kube-api-access-2xhd6") pod "c88f4219-6cb4-4fd9-8414-239ef9a7c25e" (UID: "c88f4219-6cb4-4fd9-8414-239ef9a7c25e"). InnerVolumeSpecName "kube-api-access-2xhd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.675416 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-kube-api-access-lvmvc" (OuterVolumeSpecName: "kube-api-access-lvmvc") pod "97cb48d9-1b5c-45e8-b8e7-a30354047d3d" (UID: "97cb48d9-1b5c-45e8-b8e7-a30354047d3d"). InnerVolumeSpecName "kube-api-access-lvmvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.771635 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-log-ovn\") pod \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.771793 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" (UID: "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772170 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-scripts\") pod \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772299 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run-ovn\") pod \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772372 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" (UID: "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772442 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run\") pod \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772500 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run" (OuterVolumeSpecName: "var-run") pod "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" (UID: "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772604 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcnrw\" (UniqueName: \"kubernetes.io/projected/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-kube-api-access-xcnrw\") pod \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.772682 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-additional-scripts\") pod \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\" (UID: \"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab\") " Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.773271 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" (UID: "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.773479 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-scripts" (OuterVolumeSpecName: "scripts") pod "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" (UID: "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775234 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-kube-api-access-xcnrw" (OuterVolumeSpecName: "kube-api-access-xcnrw") pod "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" (UID: "e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab"). InnerVolumeSpecName "kube-api-access-xcnrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775620 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775675 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-xcnrw\" (UniqueName: \"kubernetes.io/projected/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-kube-api-access-xcnrw\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775692 3552 reconciler_common.go:300] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-additional-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775706 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775719 3552 reconciler_common.go:300] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-log-ovn\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775733 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2xhd6\" (UniqueName: \"kubernetes.io/projected/c88f4219-6cb4-4fd9-8414-239ef9a7c25e-kube-api-access-2xhd6\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775747 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775764 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-lvmvc\" (UniqueName: \"kubernetes.io/projected/97cb48d9-1b5c-45e8-b8e7-a30354047d3d-kube-api-access-lvmvc\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775776 3552 reconciler_common.go:300] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run-ovn\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.775788 3552 reconciler_common.go:300] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab-var-run\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.894117 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c89mf-config-krqw2" event={"ID":"e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab","Type":"ContainerDied","Data":"abefba791c94ea0a0ea965ca696600147b8fef7ee209b6a1775fe63fe316d4c1"} Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.894168 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abefba791c94ea0a0ea965ca696600147b8fef7ee209b6a1775fe63fe316d4c1" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.894278 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c89mf-config-krqw2" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.911307 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2e8b-account-create-update-nl2l7" event={"ID":"12bbf46a-6ef5-49fb-98c2-dd0e48affa63","Type":"ContainerDied","Data":"012baf7d716cfbdea4922c2e94bbd93e901df4cf69cc28c637c419d020828b55"} Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.911419 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2e8b-account-create-update-nl2l7" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.911958 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="012baf7d716cfbdea4922c2e94bbd93e901df4cf69cc28c637c419d020828b55" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.921676 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-t949z" event={"ID":"97cb48d9-1b5c-45e8-b8e7-a30354047d3d","Type":"ContainerDied","Data":"c44b6ce3a558ce60144e96ca089c5cfe27bb022e1b4ebec7fca3d1421b5c73b0"} Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.921708 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c44b6ce3a558ce60144e96ca089c5cfe27bb022e1b4ebec7fca3d1421b5c73b0" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.921769 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-t949z" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.928600 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-h9hxv" event={"ID":"c88f4219-6cb4-4fd9-8414-239ef9a7c25e","Type":"ContainerDied","Data":"aa18ff0de9adf30145838db4be008b6d91675076271d43c3d5f756e094f5a497"} Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.928633 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa18ff0de9adf30145838db4be008b6d91675076271d43c3d5f756e094f5a497" Mar 20 15:47:20 crc kubenswrapper[3552]: I0320 15:47:20.928687 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-h9hxv" Mar 20 15:47:21 crc kubenswrapper[3552]: I0320 15:47:21.677631 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-c89mf-config-krqw2"] Mar 20 15:47:21 crc kubenswrapper[3552]: I0320 15:47:21.687508 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-c89mf-config-krqw2"] Mar 20 15:47:21 crc kubenswrapper[3552]: I0320 15:47:21.946938 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerStarted","Data":"9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6"} Mar 20 15:47:22 crc kubenswrapper[3552]: I0320 15:47:22.960952 3552 generic.go:334] "Generic (PLEG): container finished" podID="e04aaab3-651f-4523-ab35-250a33f54f4d" containerID="935c10c4456ee8e387170ebbb81b8702783462a33b6dfd6c23a114af7a4b915e" exitCode=0 Mar 20 15:47:22 crc kubenswrapper[3552]: I0320 15:47:22.960999 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4ntxp" event={"ID":"e04aaab3-651f-4523-ab35-250a33f54f4d","Type":"ContainerDied","Data":"935c10c4456ee8e387170ebbb81b8702783462a33b6dfd6c23a114af7a4b915e"} Mar 20 15:47:23 crc kubenswrapper[3552]: I0320 15:47:23.440881 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" path="/var/lib/kubelet/pods/e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab/volumes" Mar 20 15:47:26 crc kubenswrapper[3552]: I0320 15:47:26.995042 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-4ntxp" event={"ID":"e04aaab3-651f-4523-ab35-250a33f54f4d","Type":"ContainerDied","Data":"2d878f79baeb09c9f756b98fae12d8686584e698e342aa5c2260927243126da5"} Mar 20 15:47:26 crc kubenswrapper[3552]: I0320 15:47:26.995687 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d878f79baeb09c9f756b98fae12d8686584e698e342aa5c2260927243126da5" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.084337 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4ntxp" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.189980 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-combined-ca-bundle\") pod \"e04aaab3-651f-4523-ab35-250a33f54f4d\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.190071 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-config-data\") pod \"e04aaab3-651f-4523-ab35-250a33f54f4d\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.190161 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g95wd\" (UniqueName: \"kubernetes.io/projected/e04aaab3-651f-4523-ab35-250a33f54f4d-kube-api-access-g95wd\") pod \"e04aaab3-651f-4523-ab35-250a33f54f4d\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.190203 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-db-sync-config-data\") pod \"e04aaab3-651f-4523-ab35-250a33f54f4d\" (UID: \"e04aaab3-651f-4523-ab35-250a33f54f4d\") " Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.194192 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e04aaab3-651f-4523-ab35-250a33f54f4d" (UID: "e04aaab3-651f-4523-ab35-250a33f54f4d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.194379 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e04aaab3-651f-4523-ab35-250a33f54f4d-kube-api-access-g95wd" (OuterVolumeSpecName: "kube-api-access-g95wd") pod "e04aaab3-651f-4523-ab35-250a33f54f4d" (UID: "e04aaab3-651f-4523-ab35-250a33f54f4d"). InnerVolumeSpecName "kube-api-access-g95wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.235827 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e04aaab3-651f-4523-ab35-250a33f54f4d" (UID: "e04aaab3-651f-4523-ab35-250a33f54f4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.269478 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-config-data" (OuterVolumeSpecName: "config-data") pod "e04aaab3-651f-4523-ab35-250a33f54f4d" (UID: "e04aaab3-651f-4523-ab35-250a33f54f4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.292085 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.292120 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.292132 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-g95wd\" (UniqueName: \"kubernetes.io/projected/e04aaab3-651f-4523-ab35-250a33f54f4d-kube-api-access-g95wd\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:27 crc kubenswrapper[3552]: I0320 15:47:27.292142 3552 reconciler_common.go:300] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e04aaab3-651f-4523-ab35-250a33f54f4d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.018479 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"f1f6e143eaaecc8e0c6eb5042ab678aa0b6974b3faa6e7fd9a40c98b7e3d0fbb"} Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.018805 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"b9be35a60f54874ee5b7a63688506633adbc045f37242682c730488ba7f04c45"} Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.020126 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-77zr7" event={"ID":"148d3ae9-f212-412a-b4a3-95c60681e8e3","Type":"ContainerStarted","Data":"b894dcd0bf3e8492a78c9f1d16364c2e1221ed4fd909f679f92ec7792dfa6a8f"} Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.022262 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-4ntxp" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.022463 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-rnz7c" event={"ID":"c8f364e0-b915-4bdc-969a-469119a78a2d","Type":"ContainerStarted","Data":"0d23070d42c5a0c319759363cf63f21a27f12d4aeb0f461bbcac501e2bbed392"} Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.039805 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-db-sync-77zr7" podStartSLOduration=6.667619141 podStartE2EDuration="26.039760687s" podCreationTimestamp="2026-03-20 15:47:02 +0000 UTC" firstStartedPulling="2026-03-20 15:47:07.471147825 +0000 UTC m=+1327.164844655" lastFinishedPulling="2026-03-20 15:47:26.843289371 +0000 UTC m=+1346.536986201" observedRunningTime="2026-03-20 15:47:28.03651532 +0000 UTC m=+1347.730212170" watchObservedRunningTime="2026-03-20 15:47:28.039760687 +0000 UTC m=+1347.733457517" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.083452 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/keystone-db-sync-rnz7c" podStartSLOduration=6.167553346 podStartE2EDuration="26.083397105s" podCreationTimestamp="2026-03-20 15:47:02 +0000 UTC" firstStartedPulling="2026-03-20 15:47:06.930651768 +0000 UTC m=+1326.624348598" lastFinishedPulling="2026-03-20 15:47:26.846495537 +0000 UTC m=+1346.540192357" observedRunningTime="2026-03-20 15:47:28.079543152 +0000 UTC m=+1347.773239992" watchObservedRunningTime="2026-03-20 15:47:28.083397105 +0000 UTC m=+1347.777093935" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.511881 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7968dc795-kgnvn"] Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.512358 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" podNamespace="openstack" podName="dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519367 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" containerName="ovn-config" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519393 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" containerName="ovn-config" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519424 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="97cb48d9-1b5c-45e8-b8e7-a30354047d3d" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519432 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="97cb48d9-1b5c-45e8-b8e7-a30354047d3d" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519452 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="823cdc27-b1a5-4032-8081-3d0466e6a40c" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519459 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="823cdc27-b1a5-4032-8081-3d0466e6a40c" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519497 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c88f4219-6cb4-4fd9-8414-239ef9a7c25e" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519506 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88f4219-6cb4-4fd9-8414-239ef9a7c25e" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519527 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c4a0b1bc-7138-4e84-b566-86bbe003cd2c" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519538 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4a0b1bc-7138-4e84-b566-86bbe003cd2c" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519583 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="12bbf46a-6ef5-49fb-98c2-dd0e48affa63" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519592 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="12bbf46a-6ef5-49fb-98c2-dd0e48affa63" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519612 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="682a86ca-28d3-4309-95a2-80458ddd5e31" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519625 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="682a86ca-28d3-4309-95a2-80458ddd5e31" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: E0320 15:47:28.519641 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e04aaab3-651f-4523-ab35-250a33f54f4d" containerName="glance-db-sync" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.519647 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e04aaab3-651f-4523-ab35-250a33f54f4d" containerName="glance-db-sync" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520113 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="682a86ca-28d3-4309-95a2-80458ddd5e31" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520141 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e04aaab3-651f-4523-ab35-250a33f54f4d" containerName="glance-db-sync" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520167 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c88f4219-6cb4-4fd9-8414-239ef9a7c25e" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520190 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="12bbf46a-6ef5-49fb-98c2-dd0e48affa63" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520209 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4a0b1bc-7138-4e84-b566-86bbe003cd2c" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520227 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5fc0f11-9a44-4d52-a051-dfd09cc2c0ab" containerName="ovn-config" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520245 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="823cdc27-b1a5-4032-8081-3d0466e6a40c" containerName="mariadb-account-create-update" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.520268 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="97cb48d9-1b5c-45e8-b8e7-a30354047d3d" containerName="mariadb-database-create" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.521731 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.588473 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7968dc795-kgnvn"] Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.653118 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-dns-svc\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.653174 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxqgs\" (UniqueName: \"kubernetes.io/projected/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-kube-api-access-vxqgs\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.653250 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-sb\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.653429 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-nb\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.653465 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-config\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.754926 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-sb\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.754975 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-nb\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.755005 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-config\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.755066 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-dns-svc\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.755093 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vxqgs\" (UniqueName: \"kubernetes.io/projected/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-kube-api-access-vxqgs\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.756173 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-sb\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.756778 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-nb\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.757189 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-config\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.757306 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-dns-svc\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.776567 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxqgs\" (UniqueName: \"kubernetes.io/projected/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-kube-api-access-vxqgs\") pod \"dnsmasq-dns-7968dc795-kgnvn\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:28 crc kubenswrapper[3552]: I0320 15:47:28.863482 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:29 crc kubenswrapper[3552]: I0320 15:47:29.033227 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"860631158ae38f645c7d3a728ab797a3df1ad8e0295d8f7ad1c4588384edc392"} Mar 20 15:47:29 crc kubenswrapper[3552]: I0320 15:47:29.033258 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"5457f2421721bee3a190533f005a664f856b812a0da0e3ec9c62ead8aa22be77"} Mar 20 15:47:29 crc kubenswrapper[3552]: I0320 15:47:29.595707 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7968dc795-kgnvn"] Mar 20 15:47:30 crc kubenswrapper[3552]: I0320 15:47:30.041595 3552 generic.go:334] "Generic (PLEG): container finished" podID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerID="3e2a5c0bd56e03eb9248cc3f17e3e23284085c9b587749fae69b08348db85632" exitCode=0 Mar 20 15:47:30 crc kubenswrapper[3552]: I0320 15:47:30.041747 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" event={"ID":"a0e4188f-98f6-4c10-ad13-7f2061f2af9f","Type":"ContainerDied","Data":"3e2a5c0bd56e03eb9248cc3f17e3e23284085c9b587749fae69b08348db85632"} Mar 20 15:47:30 crc kubenswrapper[3552]: I0320 15:47:30.041948 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" event={"ID":"a0e4188f-98f6-4c10-ad13-7f2061f2af9f","Type":"ContainerStarted","Data":"1f163741730797c6e7f42e661bdfccd36342ecc8bd1d779d6799d909423a4dce"} Mar 20 15:47:31 crc kubenswrapper[3552]: I0320 15:47:31.092020 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" event={"ID":"a0e4188f-98f6-4c10-ad13-7f2061f2af9f","Type":"ContainerStarted","Data":"44afb2b63ac7b371659a7ca0f789d6aa3a1fb01c7c38a8272c5bb939cb3b8c5b"} Mar 20 15:47:31 crc kubenswrapper[3552]: I0320 15:47:31.127746 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" podStartSLOduration=3.127707783 podStartE2EDuration="3.127707783s" podCreationTimestamp="2026-03-20 15:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:31.125640268 +0000 UTC m=+1350.819337098" watchObservedRunningTime="2026-03-20 15:47:31.127707783 +0000 UTC m=+1350.821404613" Mar 20 15:47:31 crc kubenswrapper[3552]: I0320 15:47:31.147789 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"c4ffbd06eb572f539a400011edbd585afe0144696f583f1d00581e8d729f9c36"} Mar 20 15:47:32 crc kubenswrapper[3552]: I0320 15:47:32.159235 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"318ff718b79a493e8f63b200d6a10452ccc78db5d8cb855eeb5b68ffcbe2ca31"} Mar 20 15:47:32 crc kubenswrapper[3552]: I0320 15:47:32.159767 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"ee8ec97e84e78acdfb1b4b6f78930345a1e015ae8f32e382896868f7b4572a29"} Mar 20 15:47:32 crc kubenswrapper[3552]: I0320 15:47:32.162418 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerStarted","Data":"e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3"} Mar 20 15:47:32 crc kubenswrapper[3552]: I0320 15:47:32.162451 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerStarted","Data":"feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3"} Mar 20 15:47:32 crc kubenswrapper[3552]: I0320 15:47:32.163262 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:32 crc kubenswrapper[3552]: I0320 15:47:32.193934 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=25.193897132 podStartE2EDuration="25.193897132s" podCreationTimestamp="2026-03-20 15:47:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:32.18860425 +0000 UTC m=+1351.882301090" watchObservedRunningTime="2026-03-20 15:47:32.193897132 +0000 UTC m=+1351.887593952" Mar 20 15:47:33 crc kubenswrapper[3552]: I0320 15:47:33.169779 3552 generic.go:334] "Generic (PLEG): container finished" podID="148d3ae9-f212-412a-b4a3-95c60681e8e3" containerID="b894dcd0bf3e8492a78c9f1d16364c2e1221ed4fd909f679f92ec7792dfa6a8f" exitCode=0 Mar 20 15:47:33 crc kubenswrapper[3552]: I0320 15:47:33.169855 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-77zr7" event={"ID":"148d3ae9-f212-412a-b4a3-95c60681e8e3","Type":"ContainerDied","Data":"b894dcd0bf3e8492a78c9f1d16364c2e1221ed4fd909f679f92ec7792dfa6a8f"} Mar 20 15:47:33 crc kubenswrapper[3552]: I0320 15:47:33.172414 3552 generic.go:334] "Generic (PLEG): container finished" podID="c8f364e0-b915-4bdc-969a-469119a78a2d" containerID="0d23070d42c5a0c319759363cf63f21a27f12d4aeb0f461bbcac501e2bbed392" exitCode=0 Mar 20 15:47:33 crc kubenswrapper[3552]: I0320 15:47:33.172551 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-rnz7c" event={"ID":"c8f364e0-b915-4bdc-969a-469119a78a2d","Type":"ContainerDied","Data":"0d23070d42c5a0c319759363cf63f21a27f12d4aeb0f461bbcac501e2bbed392"} Mar 20 15:47:33 crc kubenswrapper[3552]: I0320 15:47:33.174317 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:33 crc kubenswrapper[3552]: I0320 15:47:33.177444 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"d6f05a129c84bf1d6ac99d18bb1c888fe54bbcb37441712665aa859f1cb5be6a"} Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.617354 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.636735 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663438 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpz9b\" (UniqueName: \"kubernetes.io/projected/c8f364e0-b915-4bdc-969a-469119a78a2d-kube-api-access-lpz9b\") pod \"c8f364e0-b915-4bdc-969a-469119a78a2d\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663535 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-combined-ca-bundle\") pod \"148d3ae9-f212-412a-b4a3-95c60681e8e3\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663615 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-db-sync-config-data\") pod \"148d3ae9-f212-412a-b4a3-95c60681e8e3\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663663 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5xb7\" (UniqueName: \"kubernetes.io/projected/148d3ae9-f212-412a-b4a3-95c60681e8e3-kube-api-access-b5xb7\") pod \"148d3ae9-f212-412a-b4a3-95c60681e8e3\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663696 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-config-data\") pod \"c8f364e0-b915-4bdc-969a-469119a78a2d\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663725 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-config-data\") pod \"148d3ae9-f212-412a-b4a3-95c60681e8e3\" (UID: \"148d3ae9-f212-412a-b4a3-95c60681e8e3\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.663785 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-combined-ca-bundle\") pod \"c8f364e0-b915-4bdc-969a-469119a78a2d\" (UID: \"c8f364e0-b915-4bdc-969a-469119a78a2d\") " Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.672959 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8f364e0-b915-4bdc-969a-469119a78a2d-kube-api-access-lpz9b" (OuterVolumeSpecName: "kube-api-access-lpz9b") pod "c8f364e0-b915-4bdc-969a-469119a78a2d" (UID: "c8f364e0-b915-4bdc-969a-469119a78a2d"). InnerVolumeSpecName "kube-api-access-lpz9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.674541 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "148d3ae9-f212-412a-b4a3-95c60681e8e3" (UID: "148d3ae9-f212-412a-b4a3-95c60681e8e3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.688250 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/148d3ae9-f212-412a-b4a3-95c60681e8e3-kube-api-access-b5xb7" (OuterVolumeSpecName: "kube-api-access-b5xb7") pod "148d3ae9-f212-412a-b4a3-95c60681e8e3" (UID: "148d3ae9-f212-412a-b4a3-95c60681e8e3"). InnerVolumeSpecName "kube-api-access-b5xb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.729304 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8f364e0-b915-4bdc-969a-469119a78a2d" (UID: "c8f364e0-b915-4bdc-969a-469119a78a2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.731149 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "148d3ae9-f212-412a-b4a3-95c60681e8e3" (UID: "148d3ae9-f212-412a-b4a3-95c60681e8e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.753325 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-config-data" (OuterVolumeSpecName: "config-data") pod "148d3ae9-f212-412a-b4a3-95c60681e8e3" (UID: "148d3ae9-f212-412a-b4a3-95c60681e8e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.755232 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-config-data" (OuterVolumeSpecName: "config-data") pod "c8f364e0-b915-4bdc-969a-469119a78a2d" (UID: "c8f364e0-b915-4bdc-969a-469119a78a2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.765523 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.765773 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.765866 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-lpz9b\" (UniqueName: \"kubernetes.io/projected/c8f364e0-b915-4bdc-969a-469119a78a2d-kube-api-access-lpz9b\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.765965 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.766051 3552 reconciler_common.go:300] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/148d3ae9-f212-412a-b4a3-95c60681e8e3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.766146 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-b5xb7\" (UniqueName: \"kubernetes.io/projected/148d3ae9-f212-412a-b4a3-95c60681e8e3-kube-api-access-b5xb7\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:34 crc kubenswrapper[3552]: I0320 15:47:34.766232 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8f364e0-b915-4bdc-969a-469119a78a2d-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.210792 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"8ab44821de9dbfcc50302a0f150da6e221d82b5286a7e5fac5c5cdef6148eb6f"} Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.211235 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"abf828f6c7a8f90b95a0aac01efc48af101cda5c8f42642055ca2e20c224d5bf"} Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.211249 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"bb7feacb93d252b6e0c19c1d67eaae6bb87ec790263a2aa09dd76332abbd70ac"} Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.212901 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-77zr7" event={"ID":"148d3ae9-f212-412a-b4a3-95c60681e8e3","Type":"ContainerDied","Data":"dd2c09578afe8ba913bd03bf8efe6ec82714d6f37e2c04ddc41d04b40327a0d2"} Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.212931 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd2c09578afe8ba913bd03bf8efe6ec82714d6f37e2c04ddc41d04b40327a0d2" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.212987 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-77zr7" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.222541 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-rnz7c" event={"ID":"c8f364e0-b915-4bdc-969a-469119a78a2d","Type":"ContainerDied","Data":"3785f36a9476dc333a8cefc2b792721ce5db86495cbb2789edc8626ee34a5c87"} Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.222595 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3785f36a9476dc333a8cefc2b792721ce5db86495cbb2789edc8626ee34a5c87" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.222644 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-rnz7c" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.412813 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-q7n8s"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.413228 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" podNamespace="openstack" podName="keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: E0320 15:47:35.413689 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c8f364e0-b915-4bdc-969a-469119a78a2d" containerName="keystone-db-sync" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.413709 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8f364e0-b915-4bdc-969a-469119a78a2d" containerName="keystone-db-sync" Mar 20 15:47:35 crc kubenswrapper[3552]: E0320 15:47:35.413767 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="148d3ae9-f212-412a-b4a3-95c60681e8e3" containerName="watcher-db-sync" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.413783 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="148d3ae9-f212-412a-b4a3-95c60681e8e3" containerName="watcher-db-sync" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.414342 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8f364e0-b915-4bdc-969a-469119a78a2d" containerName="keystone-db-sync" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.414459 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="148d3ae9-f212-412a-b4a3-95c60681e8e3" containerName="watcher-db-sync" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.415912 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.425881 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.426067 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.426596 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.427177 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.427961 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tt8n9" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.474327 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-combined-ca-bundle\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.479188 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-credential-keys\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.479327 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-fernet-keys\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.479514 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-scripts\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.479617 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g46wh\" (UniqueName: \"kubernetes.io/projected/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-kube-api-access-g46wh\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.479766 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-config-data\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.476485 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-q7n8s"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.512735 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7968dc795-kgnvn"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.512992 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerName="dnsmasq-dns" containerID="cri-o://44afb2b63ac7b371659a7ca0f789d6aa3a1fb01c7c38a8272c5bb939cb3b8c5b" gracePeriod=10 Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.526610 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.583376 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-scripts\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.583460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g46wh\" (UniqueName: \"kubernetes.io/projected/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-kube-api-access-g46wh\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.583531 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-config-data\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.583590 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-combined-ca-bundle\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.583630 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-credential-keys\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.583659 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-fernet-keys\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.592502 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-scripts\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.596355 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-fernet-keys\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.602423 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-config-data\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.614989 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-credential-keys\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.617021 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-combined-ca-bundle\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.662037 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-g46wh\" (UniqueName: \"kubernetes.io/projected/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-kube-api-access-g46wh\") pod \"keystone-bootstrap-q7n8s\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.682574 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cb75b9d7f-kzdxv"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.682742 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e4a55ec5-94e2-436d-a800-e707eba56538" podNamespace="openstack" podName="dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.685033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.701285 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cb75b9d7f-kzdxv"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.743675 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-dncfs"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.743904 3552 topology_manager.go:215] "Topology Admit Handler" podUID="625d76ad-7531-4be1-ab6d-769f15e6a7e5" podNamespace="openstack" podName="cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.744859 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.776865 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.778024 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-w2wb9" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.778328 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.778706 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.785928 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-config-data\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.785973 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-nb\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.785996 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw299\" (UniqueName: \"kubernetes.io/projected/625d76ad-7531-4be1-ab6d-769f15e6a7e5-kube-api-access-bw299\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786033 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-sb\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786057 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-db-sync-config-data\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786111 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-dns-svc\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786140 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js68r\" (UniqueName: \"kubernetes.io/projected/e4a55ec5-94e2-436d-a800-e707eba56538-kube-api-access-js68r\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786170 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-combined-ca-bundle\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786189 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-config\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786230 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/625d76ad-7531-4be1-ab6d-769f15e6a7e5-etc-machine-id\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.786248 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-scripts\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.811755 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dncfs"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.823816 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-mrt5x"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.824006 3552 topology_manager.go:215] "Topology Admit Handler" podUID="55759243-c923-4bbc-8693-f0c35e30f6a1" podNamespace="openstack" podName="neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.825216 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.837513 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.837766 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mpcl4" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.852734 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.868323 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.868488 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" podNamespace="openstack" podName="watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.869376 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.879678 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.879810 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-t9t8c" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890270 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-nb\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890331 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bw299\" (UniqueName: \"kubernetes.io/projected/625d76ad-7531-4be1-ab6d-769f15e6a7e5-kube-api-access-bw299\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890356 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890398 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-sb\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890435 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-db-sync-config-data\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890491 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-dns-svc\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890518 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-js68r\" (UniqueName: \"kubernetes.io/projected/e4a55ec5-94e2-436d-a800-e707eba56538-kube-api-access-js68r\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890538 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-config\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890559 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z949q\" (UniqueName: \"kubernetes.io/projected/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-kube-api-access-z949q\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890584 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-combined-ca-bundle\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890604 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-config\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890630 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-combined-ca-bundle\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890652 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-logs\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890675 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ndk7\" (UniqueName: \"kubernetes.io/projected/55759243-c923-4bbc-8693-f0c35e30f6a1-kube-api-access-8ndk7\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890695 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-config-data\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890716 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/625d76ad-7531-4be1-ab6d-769f15e6a7e5-etc-machine-id\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890734 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-scripts\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.890755 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-config-data\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.893381 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-nb\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.894165 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-sb\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.901756 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-dns-svc\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.905108 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/625d76ad-7531-4be1-ab6d-769f15e6a7e5-etc-machine-id\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.906291 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-config\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.906338 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/horizon-c585b5b7c-lrxqx"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.906486 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" podNamespace="openstack" podName="horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.907672 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.908684 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-config-data\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.908951 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-scripts\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.923102 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-combined-ca-bundle\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.932228 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-db-sync-config-data\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.932597 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.933269 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-gv8p6" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.965621 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"horizon" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.965840 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.977491 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.977757 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" podNamespace="openstack" podName="watcher-decision-engine-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.979889 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.993868 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.993945 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsnwr\" (UniqueName: \"kubernetes.io/projected/7c90e274-fdb6-4e4a-a3af-1b48027f429e-kube-api-access-zsnwr\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.993998 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-config\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994024 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-z949q\" (UniqueName: \"kubernetes.io/projected/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-kube-api-access-z949q\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994056 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7c90e274-fdb6-4e4a-a3af-1b48027f429e-horizon-secret-key\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994080 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c90e274-fdb6-4e4a-a3af-1b48027f429e-logs\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994108 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-scripts\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994141 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-config-data\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994178 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-combined-ca-bundle\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994213 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-logs\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994254 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8ndk7\" (UniqueName: \"kubernetes.io/projected/55759243-c923-4bbc-8693-f0c35e30f6a1-kube-api-access-8ndk7\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994291 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-config-data\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994347 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994446 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-config-data\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994483 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrn92\" (UniqueName: \"kubernetes.io/projected/7b208453-e2bc-4dc0-b407-c652e393c13d-kube-api-access-jrn92\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994514 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:35 crc kubenswrapper[3552]: I0320 15:47:35.994536 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b208453-e2bc-4dc0-b407-c652e393c13d-logs\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:35.998806 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.003944 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-logs\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.009477 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.010356 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-config\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.011218 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-js68r\" (UniqueName: \"kubernetes.io/projected/e4a55ec5-94e2-436d-a800-e707eba56538-kube-api-access-js68r\") pod \"dnsmasq-dns-5cb75b9d7f-kzdxv\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.018125 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-combined-ca-bundle\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.032668 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-config-data\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.037704 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.083991 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-z949q\" (UniqueName: \"kubernetes.io/projected/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-kube-api-access-z949q\") pod \"watcher-applier-0\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " pod="openstack/watcher-applier-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.084081 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.093740 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw299\" (UniqueName: \"kubernetes.io/projected/625d76ad-7531-4be1-ab6d-769f15e6a7e5-kube-api-access-bw299\") pod \"cinder-db-sync-dncfs\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.098942 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7c90e274-fdb6-4e4a-a3af-1b48027f429e-horizon-secret-key\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.099231 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c90e274-fdb6-4e4a-a3af-1b48027f429e-logs\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.099375 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-scripts\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.099563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-config-data\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.099803 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-config-data\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.099951 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-jrn92\" (UniqueName: \"kubernetes.io/projected/7b208453-e2bc-4dc0-b407-c652e393c13d-kube-api-access-jrn92\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.100093 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.100277 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b208453-e2bc-4dc0-b407-c652e393c13d-logs\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.100502 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.100668 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zsnwr\" (UniqueName: \"kubernetes.io/projected/7c90e274-fdb6-4e4a-a3af-1b48027f429e-kube-api-access-zsnwr\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.102345 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-scripts\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.102720 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c90e274-fdb6-4e4a-a3af-1b48027f429e-logs\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.104674 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b208453-e2bc-4dc0-b407-c652e393c13d-logs\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.113959 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-config-data\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.124137 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dncfs" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.128005 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ndk7\" (UniqueName: \"kubernetes.io/projected/55759243-c923-4bbc-8693-f0c35e30f6a1-kube-api-access-8ndk7\") pod \"neutron-db-sync-mrt5x\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.139182 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7c90e274-fdb6-4e4a-a3af-1b48027f429e-horizon-secret-key\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.157141 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-config-data\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.158016 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.171896 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsnwr\" (UniqueName: \"kubernetes.io/projected/7c90e274-fdb6-4e4a-a3af-1b48027f429e-kube-api-access-zsnwr\") pod \"horizon-c585b5b7c-lrxqx\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.172339 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.179174 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.205894 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.206358 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-mrt5x"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.212317 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.214252 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c585b5b7c-lrxqx"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.234455 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-z758k"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.234666 3552 topology_manager.go:215] "Topology Admit Handler" podUID="739e2036-5958-4ee3-9fe3-4734696fdc6a" podNamespace="openstack" podName="barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.236106 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.244418 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrn92\" (UniqueName: \"kubernetes.io/projected/7b208453-e2bc-4dc0-b407-c652e393c13d-kube-api-access-jrn92\") pod \"watcher-decision-engine-0\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.244515 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.244602 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-zxnl5" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.252681 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-z758k"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.263766 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.268128 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.268302 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" podNamespace="openstack" podName="watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.269725 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.278738 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.290644 3552 generic.go:334] "Generic (PLEG): container finished" podID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerID="44afb2b63ac7b371659a7ca0f789d6aa3a1fb01c7c38a8272c5bb939cb3b8c5b" exitCode=0 Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.290701 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.290725 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" event={"ID":"a0e4188f-98f6-4c10-ad13-7f2061f2af9f","Type":"ContainerDied","Data":"44afb2b63ac7b371659a7ca0f789d6aa3a1fb01c7c38a8272c5bb939cb3b8c5b"} Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.313065 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.320713 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-db-sync-config-data\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.320768 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klxr7\" (UniqueName: \"kubernetes.io/projected/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-kube-api-access-klxr7\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.320794 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-combined-ca-bundle\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.320861 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.320964 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-logs\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.321023 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-config-data\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.321105 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz2tf\" (UniqueName: \"kubernetes.io/projected/739e2036-5958-4ee3-9fe3-4734696fdc6a-kube-api-access-lz2tf\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.321161 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.394276 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vbvt2"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.394435 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8ae12b08-a678-4f33-8b7c-d23c6aca08fe" podNamespace="openstack" podName="placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.395494 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435262 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lz2tf\" (UniqueName: \"kubernetes.io/projected/739e2036-5958-4ee3-9fe3-4734696fdc6a-kube-api-access-lz2tf\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435321 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435370 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-db-sync-config-data\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435391 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-klxr7\" (UniqueName: \"kubernetes.io/projected/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-kube-api-access-klxr7\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435424 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-combined-ca-bundle\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435504 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-logs\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.435532 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-config-data\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.444537 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-config-data\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.456674 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.456848 3552 topology_manager.go:215] "Topology Admit Handler" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.459550 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.475956 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.476147 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.476245 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-98wcw" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.477512 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.479745 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-logs\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.480895 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz2tf\" (UniqueName: \"kubernetes.io/projected/739e2036-5958-4ee3-9fe3-4734696fdc6a-kube-api-access-lz2tf\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.485395 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-combined-ca-bundle\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.495074 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-db-sync-config-data\") pod \"barbican-db-sync-z758k\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.499518 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vbvt2"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.500324 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.512989 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.521352 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.556338 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.556487 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cb75b9d7f-kzdxv"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.557683 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.557718 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.557767 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-log-httpd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.557942 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-run-httpd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.557997 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgzgd\" (UniqueName: \"kubernetes.io/projected/be47322e-8d8e-47c7-b0c6-c0de22806d81-kube-api-access-zgzgd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.558064 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-config-data\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.558151 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-scripts\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.584550 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.584710 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" podNamespace="openstack" podName="glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.608077 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-klxr7\" (UniqueName: \"kubernetes.io/projected/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-kube-api-access-klxr7\") pod \"watcher-api-0\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.618674 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-96b46c767-7xf5t"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.618862 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0b6a467f-e04d-436e-ba50-72789f592266" podNamespace="openstack" podName="dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.623203 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.627737 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.629457 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.634754 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rwgjn" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.636670 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.636830 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.644833 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.649739 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.656489 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-96b46c767-7xf5t"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666188 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-scripts\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666241 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666270 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666302 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-combined-ca-bundle\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666374 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-log-httpd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666444 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-run-httpd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666465 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zgzgd\" (UniqueName: \"kubernetes.io/projected/be47322e-8d8e-47c7-b0c6-c0de22806d81-kube-api-access-zgzgd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666512 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-logs\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666531 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-config-data\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666569 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hkmh\" (UniqueName: \"kubernetes.io/projected/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-kube-api-access-4hkmh\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666632 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-scripts\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.666752 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-config-data\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.680086 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.680301 3552 topology_manager.go:215] "Topology Admit Handler" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" podNamespace="openstack" podName="glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.681609 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.684001 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-config-data\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.685891 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.687048 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.687357 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.688628 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.696257 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z758k" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.704411 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fddf585b5-c58zk"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.704573 3552 topology_manager.go:215] "Topology Admit Handler" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" podNamespace="openstack" podName="horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.705888 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.708780 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-run-httpd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.710069 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-log-httpd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.727202 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-scripts\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.752311 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgzgd\" (UniqueName: \"kubernetes.io/projected/be47322e-8d8e-47c7-b0c6-c0de22806d81-kube-api-access-zgzgd\") pod \"ceilometer-0\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769012 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769062 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-logs\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769080 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769124 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769145 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-logs\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769190 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-logs\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769231 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4hkmh\" (UniqueName: \"kubernetes.io/projected/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-kube-api-access-4hkmh\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769274 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-scripts\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769298 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-config-data\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769332 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcsnm\" (UniqueName: \"kubernetes.io/projected/0b6a467f-e04d-436e-ba50-72789f592266-kube-api-access-zcsnm\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769361 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-config-data\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769427 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-dns-svc\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769451 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769475 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vv24z\" (UniqueName: \"kubernetes.io/projected/af48c0e5-6bff-4868-b778-c8724a3b2e68-kube-api-access-vv24z\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769500 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769522 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769544 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769568 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-nb\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769585 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-sb\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769607 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769625 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769655 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-scripts\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769680 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjlqm\" (UniqueName: \"kubernetes.io/projected/7b93e070-b03a-4c4d-8190-af41fd5b705e-kube-api-access-kjlqm\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769699 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-config\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.769722 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-combined-ca-bundle\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.772844 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-logs\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.776754 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-scripts\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.781528 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-config-data\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.795288 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-combined-ca-bundle\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.812440 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.817379 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hkmh\" (UniqueName: \"kubernetes.io/projected/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-kube-api-access-4hkmh\") pod \"placement-db-sync-vbvt2\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.832987 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vbvt2" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.846566 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fddf585b5-c58zk"] Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877122 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-scripts\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877161 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-config-data\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877200 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/30916098-d2f2-40d6-b7e8-e3c784f7169b-horizon-secret-key\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877219 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-config-data\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877263 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zcsnm\" (UniqueName: \"kubernetes.io/projected/0b6a467f-e04d-436e-ba50-72789f592266-kube-api-access-zcsnm\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877336 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877369 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-dns-svc\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877426 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877451 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vv24z\" (UniqueName: \"kubernetes.io/projected/af48c0e5-6bff-4868-b778-c8724a3b2e68-kube-api-access-vv24z\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877479 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-scripts\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877508 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq6fj\" (UniqueName: \"kubernetes.io/projected/30916098-d2f2-40d6-b7e8-e3c784f7169b-kube-api-access-wq6fj\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877532 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877563 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877593 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.877629 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-nb\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.879032 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-dns-svc\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.881992 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.884177 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.906172 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-sb\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.909752 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-nb\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.910912 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-sb\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.910983 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911017 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911080 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30916098-d2f2-40d6-b7e8-e3c784f7169b-logs\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911146 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kjlqm\" (UniqueName: \"kubernetes.io/projected/7b93e070-b03a-4c4d-8190-af41fd5b705e-kube-api-access-kjlqm\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911177 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-config\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911263 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911317 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-logs\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911341 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911435 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.911473 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-logs\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.913260 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.917793 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.920693 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.921854 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.921368 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-scripts\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.938541 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.959203 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vv24z\" (UniqueName: \"kubernetes.io/projected/af48c0e5-6bff-4868-b778-c8724a3b2e68-kube-api-access-vv24z\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:36 crc kubenswrapper[3552]: I0320 15:47:36.999688 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-logs\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.027737 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30916098-d2f2-40d6-b7e8-e3c784f7169b-logs\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.043374 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-config-data\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.044844 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/30916098-d2f2-40d6-b7e8-e3c784f7169b-horizon-secret-key\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.044908 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-config-data\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.045016 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-scripts\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.045139 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wq6fj\" (UniqueName: \"kubernetes.io/projected/30916098-d2f2-40d6-b7e8-e3c784f7169b-kube-api-access-wq6fj\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.060836 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.125780 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-logs\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.169640 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30916098-d2f2-40d6-b7e8-e3c784f7169b-logs\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.183041 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-config\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.193165 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-scripts\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.193311 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.194893 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcsnm\" (UniqueName: \"kubernetes.io/projected/0b6a467f-e04d-436e-ba50-72789f592266-kube-api-access-zcsnm\") pod \"dnsmasq-dns-96b46c767-7xf5t\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.205821 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.221800 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjlqm\" (UniqueName: \"kubernetes.io/projected/7b93e070-b03a-4c4d-8190-af41fd5b705e-kube-api-access-kjlqm\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.227790 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq6fj\" (UniqueName: \"kubernetes.io/projected/30916098-d2f2-40d6-b7e8-e3c784f7169b-kube-api-access-wq6fj\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.228239 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.233887 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-config-data\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.207821 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/30916098-d2f2-40d6-b7e8-e3c784f7169b-horizon-secret-key\") pod \"horizon-5fddf585b5-c58zk\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.271872 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.293603 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.304227 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.338242 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.354607 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-q7n8s" event={"ID":"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4","Type":"ContainerStarted","Data":"8a556bd8d42562dc702b4ceab30b14067afc9055b963c73f44ce0d5d6d5c5913"} Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.357769 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.391351 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"24f412bdef7c67b4ca6b5753f09d63e41fdeb12ff5a3a3725900194bed5db965"} Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.407203 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-q7n8s"] Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.522063 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cb75b9d7f-kzdxv"] Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.554928 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:47:37 crc kubenswrapper[3552]: W0320 15:47:37.705125 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4a55ec5_94e2_436d_a800_e707eba56538.slice/crio-208f9058ac0ecff041e5add10c1d919234223f7a9c02a1bc3d72724d6a433087 WatchSource:0}: Error finding container 208f9058ac0ecff041e5add10c1d919234223f7a9c02a1bc3d72724d6a433087: Status 404 returned error can't find the container with id 208f9058ac0ecff041e5add10c1d919234223f7a9c02a1bc3d72724d6a433087 Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.841042 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dncfs"] Mar 20 15:47:37 crc kubenswrapper[3552]: I0320 15:47:37.866693 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c585b5b7c-lrxqx"] Mar 20 15:47:38 crc kubenswrapper[3552]: W0320 15:47:38.008540 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c90e274_fdb6_4e4a_a3af_1b48027f429e.slice/crio-911891d394d12c2444c6ad6bef03c620ed7594199b83cc5adb486de0aa7fd313 WatchSource:0}: Error finding container 911891d394d12c2444c6ad6bef03c620ed7594199b83cc5adb486de0aa7fd313: Status 404 returned error can't find the container with id 911891d394d12c2444c6ad6bef03c620ed7594199b83cc5adb486de0aa7fd313 Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.119577 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.166692 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-nb\") pod \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.166762 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxqgs\" (UniqueName: \"kubernetes.io/projected/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-kube-api-access-vxqgs\") pod \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.166852 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-dns-svc\") pod \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.166900 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-config\") pod \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.166970 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-sb\") pod \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\" (UID: \"a0e4188f-98f6-4c10-ad13-7f2061f2af9f\") " Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.179012 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.181646 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.203666 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-kube-api-access-vxqgs" (OuterVolumeSpecName: "kube-api-access-vxqgs") pod "a0e4188f-98f6-4c10-ad13-7f2061f2af9f" (UID: "a0e4188f-98f6-4c10-ad13-7f2061f2af9f"). InnerVolumeSpecName "kube-api-access-vxqgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.204690 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.270124 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a0e4188f-98f6-4c10-ad13-7f2061f2af9f" (UID: "a0e4188f-98f6-4c10-ad13-7f2061f2af9f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.271590 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-vxqgs\" (UniqueName: \"kubernetes.io/projected/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-kube-api-access-vxqgs\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.271610 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.311346 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c585b5b7c-lrxqx"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.337617 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-config" (OuterVolumeSpecName: "config") pod "a0e4188f-98f6-4c10-ad13-7f2061f2af9f" (UID: "a0e4188f-98f6-4c10-ad13-7f2061f2af9f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.338185 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/horizon-5cf7d98dbf-jv84z"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.338346 3552 topology_manager.go:215] "Topology Admit Handler" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" podNamespace="openstack" podName="horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: E0320 15:47:38.338624 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerName="dnsmasq-dns" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.338642 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerName="dnsmasq-dns" Mar 20 15:47:38 crc kubenswrapper[3552]: E0320 15:47:38.338662 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerName="init" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.338668 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerName="init" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.338889 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" containerName="dnsmasq-dns" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.344141 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.353971 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5cf7d98dbf-jv84z"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.373455 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.404709 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.456306 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.469575 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a0e4188f-98f6-4c10-ad13-7f2061f2af9f" (UID: "a0e4188f-98f6-4c10-ad13-7f2061f2af9f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.474468 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451d55ac-e9ef-427a-aba6-b85a62a08e59-logs\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.474532 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-config-data\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.474571 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-scripts\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.474632 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/451d55ac-e9ef-427a-aba6-b85a62a08e59-horizon-secret-key\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.474673 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g662\" (UniqueName: \"kubernetes.io/projected/451d55ac-e9ef-427a-aba6-b85a62a08e59-kube-api-access-9g662\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.474723 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.482770 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.486754 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a0e4188f-98f6-4c10-ad13-7f2061f2af9f" (UID: "a0e4188f-98f6-4c10-ad13-7f2061f2af9f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.487248 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"59cad4cac2c53df1e8e9f1f41b092a09a415a678fd85e4e68bf23fde16b7acf0"} Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.498753 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-mrt5x"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.514055 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.517290 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" event={"ID":"a0e4188f-98f6-4c10-ad13-7f2061f2af9f","Type":"ContainerDied","Data":"1f163741730797c6e7f42e661bdfccd36342ecc8bd1d779d6799d909423a4dce"} Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.517332 3552 scope.go:117] "RemoveContainer" containerID="44afb2b63ac7b371659a7ca0f789d6aa3a1fb01c7c38a8272c5bb939cb3b8c5b" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.518201 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7968dc795-kgnvn" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.519103 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dncfs" event={"ID":"625d76ad-7531-4be1-ab6d-769f15e6a7e5","Type":"ContainerStarted","Data":"bdb0ad4c71a27c27082f840e51fe132d70f96d4eb123c67324bb8b46cff0ca8c"} Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.530394 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c585b5b7c-lrxqx" event={"ID":"7c90e274-fdb6-4e4a-a3af-1b48027f429e","Type":"ContainerStarted","Data":"911891d394d12c2444c6ad6bef03c620ed7594199b83cc5adb486de0aa7fd313"} Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.537539 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" event={"ID":"e4a55ec5-94e2-436d-a800-e707eba56538","Type":"ContainerStarted","Data":"208f9058ac0ecff041e5add10c1d919234223f7a9c02a1bc3d72724d6a433087"} Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.555930 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.571868 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/keystone-bootstrap-q7n8s" podStartSLOduration=3.57180125 podStartE2EDuration="3.57180125s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:38.545413634 +0000 UTC m=+1358.239110484" watchObservedRunningTime="2026-03-20 15:47:38.57180125 +0000 UTC m=+1358.265498080" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.577607 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451d55ac-e9ef-427a-aba6-b85a62a08e59-logs\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.577666 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-config-data\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.577702 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-scripts\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.577736 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/451d55ac-e9ef-427a-aba6-b85a62a08e59-horizon-secret-key\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.577790 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9g662\" (UniqueName: \"kubernetes.io/projected/451d55ac-e9ef-427a-aba6-b85a62a08e59-kube-api-access-9g662\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.577899 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0e4188f-98f6-4c10-ad13-7f2061f2af9f-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.583388 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451d55ac-e9ef-427a-aba6-b85a62a08e59-logs\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.584196 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-scripts\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.591420 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-config-data\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.628315 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/451d55ac-e9ef-427a-aba6-b85a62a08e59-horizon-secret-key\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.645416 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g662\" (UniqueName: \"kubernetes.io/projected/451d55ac-e9ef-427a-aba6-b85a62a08e59-kube-api-access-9g662\") pod \"horizon-5cf7d98dbf-jv84z\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.687272 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7968dc795-kgnvn"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.694838 3552 scope.go:117] "RemoveContainer" containerID="3e2a5c0bd56e03eb9248cc3f17e3e23284085c9b587749fae69b08348db85632" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.710731 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.713735 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7968dc795-kgnvn"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.767961 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.901945 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.915054 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-z758k"] Mar 20 15:47:38 crc kubenswrapper[3552]: W0320 15:47:38.920225 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f6f7ad8_3778_4a4d_bee7_eeeb14b9b48a.slice/crio-699358d3f203f0454e3c7e767b03220aa53ce32f270bf1a7ffa20e7c3344bbdf WatchSource:0}: Error finding container 699358d3f203f0454e3c7e767b03220aa53ce32f270bf1a7ffa20e7c3344bbdf: Status 404 returned error can't find the container with id 699358d3f203f0454e3c7e767b03220aa53ce32f270bf1a7ffa20e7c3344bbdf Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.949378 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:47:38 crc kubenswrapper[3552]: I0320 15:47:38.959216 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vbvt2"] Mar 20 15:47:38 crc kubenswrapper[3552]: W0320 15:47:38.986998 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe47322e_8d8e_47c7_b0c6_c0de22806d81.slice/crio-593cad958dbecccd3c4bf332b462932ff17406de70c6565c09110aa9482ebf0a WatchSource:0}: Error finding container 593cad958dbecccd3c4bf332b462932ff17406de70c6565c09110aa9482ebf0a: Status 404 returned error can't find the container with id 593cad958dbecccd3c4bf332b462932ff17406de70c6565c09110aa9482ebf0a Mar 20 15:47:39 crc kubenswrapper[3552]: W0320 15:47:39.002957 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ae12b08_a678_4f33_8b7c_d23c6aca08fe.slice/crio-de3e903bb96db6d557a3fe8546a98dcb9c755376204dd7da8791955b5d310bfe WatchSource:0}: Error finding container de3e903bb96db6d557a3fe8546a98dcb9c755376204dd7da8791955b5d310bfe: Status 404 returned error can't find the container with id de3e903bb96db6d557a3fe8546a98dcb9c755376204dd7da8791955b5d310bfe Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.219575 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-96b46c767-7xf5t"] Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.272820 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fddf585b5-c58zk"] Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.376841 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.454855 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0e4188f-98f6-4c10-ad13-7f2061f2af9f" path="/var/lib/kubelet/pods/a0e4188f-98f6-4c10-ad13-7f2061f2af9f/volumes" Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.477170 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5cf7d98dbf-jv84z"] Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.590474 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"7b208453-e2bc-4dc0-b407-c652e393c13d","Type":"ContainerStarted","Data":"083fc75dd19dcbeb01cd87010135c6bc9231f85f8ca52d06d8fd26e75cc1958f"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.707677 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"2733a44586a2c85d9f403b3df3099ec174bfac181bdd543d4088a50ffdc1e58b"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.719700 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z758k" event={"ID":"739e2036-5958-4ee3-9fe3-4734696fdc6a","Type":"ContainerStarted","Data":"94f719e569f905261f976d1bdba0c6999965d1f963db67698e8fa12c3fb8c275"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.722597 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96","Type":"ContainerStarted","Data":"0b769ac80af95b0a1fbf172d42490db8a9f784d88d44866e1470f65d5ba59eeb"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.733952 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a","Type":"ContainerStarted","Data":"173d68c9761c3dd859af3ace1e5911468bcfd831a4529b339dde6f2f3115742b"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.733991 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a","Type":"ContainerStarted","Data":"699358d3f203f0454e3c7e767b03220aa53ce32f270bf1a7ffa20e7c3344bbdf"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.748010 3552 generic.go:334] "Generic (PLEG): container finished" podID="e4a55ec5-94e2-436d-a800-e707eba56538" containerID="21b936c5bac14f298074dc4510127d1853e458de588d48a33beb4ccc45dd4524" exitCode=0 Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.748100 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" event={"ID":"e4a55ec5-94e2-436d-a800-e707eba56538","Type":"ContainerDied","Data":"21b936c5bac14f298074dc4510127d1853e458de588d48a33beb4ccc45dd4524"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.767651 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-q7n8s" event={"ID":"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4","Type":"ContainerStarted","Data":"f341dabebe9ed0ad5bb2ee9a8d89a89629dbfcc253e54f96f0383b88efadd11e"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.804667 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vbvt2" event={"ID":"8ae12b08-a678-4f33-8b7c-d23c6aca08fe","Type":"ContainerStarted","Data":"de3e903bb96db6d557a3fe8546a98dcb9c755376204dd7da8791955b5d310bfe"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.829212 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mrt5x" event={"ID":"55759243-c923-4bbc-8693-f0c35e30f6a1","Type":"ContainerStarted","Data":"c75f2060be9c78c13e97a92cd5fce4452ab57bb77e0480c2be041bbe17f45e04"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.829251 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mrt5x" event={"ID":"55759243-c923-4bbc-8693-f0c35e30f6a1","Type":"ContainerStarted","Data":"24c13fcc82f178507fc7b22332e947c94de94340b054b67b1f0b31a5ab28d08c"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.843689 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerStarted","Data":"593cad958dbecccd3c4bf332b462932ff17406de70c6565c09110aa9482ebf0a"} Mar 20 15:47:39 crc kubenswrapper[3552]: I0320 15:47:39.864553 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-db-sync-mrt5x" podStartSLOduration=4.864510832 podStartE2EDuration="4.864510832s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:39.859221291 +0000 UTC m=+1359.552918131" watchObservedRunningTime="2026-03-20 15:47:39.864510832 +0000 UTC m=+1359.558207662" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.181274 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:47:40 crc kubenswrapper[3552]: W0320 15:47:40.370887 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b6a467f_e04d_436e_ba50_72789f592266.slice/crio-f1fe3d191449288ed363a91d87f2c129ab4a38b242c1d1dbbdd208fe2e812b01 WatchSource:0}: Error finding container f1fe3d191449288ed363a91d87f2c129ab4a38b242c1d1dbbdd208fe2e812b01: Status 404 returned error can't find the container with id f1fe3d191449288ed363a91d87f2c129ab4a38b242c1d1dbbdd208fe2e812b01 Mar 20 15:47:40 crc kubenswrapper[3552]: W0320 15:47:40.384235 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30916098_d2f2_40d6_b7e8_e3c784f7169b.slice/crio-41b21e3d83869d74023a36290c7ba426597e60daa17e8a73d6169fa9517d904d WatchSource:0}: Error finding container 41b21e3d83869d74023a36290c7ba426597e60daa17e8a73d6169fa9517d904d: Status 404 returned error can't find the container with id 41b21e3d83869d74023a36290c7ba426597e60daa17e8a73d6169fa9517d904d Mar 20 15:47:40 crc kubenswrapper[3552]: W0320 15:47:40.392035 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf48c0e5_6bff_4868_b778_c8724a3b2e68.slice/crio-0675099c089093bfe6b61a9e86f9ca7ac55f399f29916194de33e19135f45832 WatchSource:0}: Error finding container 0675099c089093bfe6b61a9e86f9ca7ac55f399f29916194de33e19135f45832: Status 404 returned error can't find the container with id 0675099c089093bfe6b61a9e86f9ca7ac55f399f29916194de33e19135f45832 Mar 20 15:47:40 crc kubenswrapper[3552]: W0320 15:47:40.393866 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b93e070_b03a_4c4d_8190_af41fd5b705e.slice/crio-bb09728334b1db19b3fb35ba0510732d594b193eb08c41dbac39ee335c263c1d WatchSource:0}: Error finding container bb09728334b1db19b3fb35ba0510732d594b193eb08c41dbac39ee335c263c1d: Status 404 returned error can't find the container with id bb09728334b1db19b3fb35ba0510732d594b193eb08c41dbac39ee335c263c1d Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.423686 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.550899 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-config\") pod \"e4a55ec5-94e2-436d-a800-e707eba56538\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.550954 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-nb\") pod \"e4a55ec5-94e2-436d-a800-e707eba56538\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.551027 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-dns-svc\") pod \"e4a55ec5-94e2-436d-a800-e707eba56538\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.551067 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js68r\" (UniqueName: \"kubernetes.io/projected/e4a55ec5-94e2-436d-a800-e707eba56538-kube-api-access-js68r\") pod \"e4a55ec5-94e2-436d-a800-e707eba56538\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.551147 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-sb\") pod \"e4a55ec5-94e2-436d-a800-e707eba56538\" (UID: \"e4a55ec5-94e2-436d-a800-e707eba56538\") " Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.555137 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4a55ec5-94e2-436d-a800-e707eba56538-kube-api-access-js68r" (OuterVolumeSpecName: "kube-api-access-js68r") pod "e4a55ec5-94e2-436d-a800-e707eba56538" (UID: "e4a55ec5-94e2-436d-a800-e707eba56538"). InnerVolumeSpecName "kube-api-access-js68r". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.572967 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e4a55ec5-94e2-436d-a800-e707eba56538" (UID: "e4a55ec5-94e2-436d-a800-e707eba56538"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.580822 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e4a55ec5-94e2-436d-a800-e707eba56538" (UID: "e4a55ec5-94e2-436d-a800-e707eba56538"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.582884 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e4a55ec5-94e2-436d-a800-e707eba56538" (UID: "e4a55ec5-94e2-436d-a800-e707eba56538"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.603566 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-config" (OuterVolumeSpecName: "config") pod "e4a55ec5-94e2-436d-a800-e707eba56538" (UID: "e4a55ec5-94e2-436d-a800-e707eba56538"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.653316 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.653355 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-js68r\" (UniqueName: \"kubernetes.io/projected/e4a55ec5-94e2-436d-a800-e707eba56538-kube-api-access-js68r\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.653368 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.653379 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.653389 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a55ec5-94e2-436d-a800-e707eba56538-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.853789 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" event={"ID":"0b6a467f-e04d-436e-ba50-72789f592266","Type":"ContainerStarted","Data":"f1fe3d191449288ed363a91d87f2c129ab4a38b242c1d1dbbdd208fe2e812b01"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.856033 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fddf585b5-c58zk" event={"ID":"30916098-d2f2-40d6-b7e8-e3c784f7169b","Type":"ContainerStarted","Data":"41b21e3d83869d74023a36290c7ba426597e60daa17e8a73d6169fa9517d904d"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.859841 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5cf7d98dbf-jv84z" event={"ID":"451d55ac-e9ef-427a-aba6-b85a62a08e59","Type":"ContainerStarted","Data":"a174f6d54970fce0c9d2e5555288841d2b162fe95feb6484c7a348ae2723a42f"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.866104 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" event={"ID":"e4a55ec5-94e2-436d-a800-e707eba56538","Type":"ContainerDied","Data":"208f9058ac0ecff041e5add10c1d919234223f7a9c02a1bc3d72724d6a433087"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.866162 3552 scope.go:117] "RemoveContainer" containerID="21b936c5bac14f298074dc4510127d1853e458de588d48a33beb4ccc45dd4524" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.866334 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cb75b9d7f-kzdxv" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.877027 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"af48c0e5-6bff-4868-b778-c8724a3b2e68","Type":"ContainerStarted","Data":"0675099c089093bfe6b61a9e86f9ca7ac55f399f29916194de33e19135f45832"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.890841 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dd24d70f-864e-4803-8e8c-9d9e5aadfa84","Type":"ContainerStarted","Data":"9d492a6f388e9bcbfd17d953530cd126fc5333a329c3dc8c7c8feec3144ad387"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.900954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b93e070-b03a-4c4d-8190-af41fd5b705e","Type":"ContainerStarted","Data":"bb09728334b1db19b3fb35ba0510732d594b193eb08c41dbac39ee335c263c1d"} Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.979865 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=49.857488566 podStartE2EDuration="1m15.979828586s" podCreationTimestamp="2026-03-20 15:46:25 +0000 UTC" firstStartedPulling="2026-03-20 15:47:07.749198708 +0000 UTC m=+1327.442895538" lastFinishedPulling="2026-03-20 15:47:33.871538728 +0000 UTC m=+1353.565235558" observedRunningTime="2026-03-20 15:47:40.946439683 +0000 UTC m=+1360.640136513" watchObservedRunningTime="2026-03-20 15:47:40.979828586 +0000 UTC m=+1360.673525406" Mar 20 15:47:40 crc kubenswrapper[3552]: I0320 15:47:40.998475 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cb75b9d7f-kzdxv"] Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.004190 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cb75b9d7f-kzdxv"] Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.285967 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-96b46c767-7xf5t"] Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.312720 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-599c89c5c9-zrtzk"] Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.312919 3552 topology_manager.go:215] "Topology Admit Handler" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" podNamespace="openstack" podName="dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: E0320 15:47:41.313217 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e4a55ec5-94e2-436d-a800-e707eba56538" containerName="init" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.313239 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a55ec5-94e2-436d-a800-e707eba56538" containerName="init" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.313511 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4a55ec5-94e2-436d-a800-e707eba56538" containerName="init" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.314676 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.319591 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.328424 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-599c89c5c9-zrtzk"] Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.376631 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz8k8\" (UniqueName: \"kubernetes.io/projected/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-kube-api-access-fz8k8\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.376722 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-svc\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.376853 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-config\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.376912 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-swift-storage-0\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.376990 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-sb\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.377029 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-nb\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.442780 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4a55ec5-94e2-436d-a800-e707eba56538" path="/var/lib/kubelet/pods/e4a55ec5-94e2-436d-a800-e707eba56538/volumes" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.478252 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-svc\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.478344 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-config\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.478385 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-swift-storage-0\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.478453 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-sb\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.478480 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-nb\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.478545 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fz8k8\" (UniqueName: \"kubernetes.io/projected/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-kube-api-access-fz8k8\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.479344 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-svc\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.479368 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-config\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.479960 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-sb\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.479982 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-swift-storage-0\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.480191 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-nb\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.514467 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz8k8\" (UniqueName: \"kubernetes.io/projected/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-kube-api-access-fz8k8\") pod \"dnsmasq-dns-599c89c5c9-zrtzk\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:41 crc kubenswrapper[3552]: I0320 15:47:41.636286 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:43 crc kubenswrapper[3552]: E0320 15:47:43.884889 3552 remote_runtime.go:193] "RunPodSandbox from runtime service failed" err=< Mar 20 15:47:43 crc kubenswrapper[3552]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dnsmasq-dns-599c89c5c9-zrtzk_openstack_533f846d-89ff-4b46-84b6-3cbc4f66a5f2_0(8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910): error adding pod openstack_dnsmasq-dns-599c89c5c9-zrtzk to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910" Netns:"/var/run/netns/c1696dce-ae36-467d-98cd-06a3548daa9f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=dnsmasq-dns-599c89c5c9-zrtzk;K8S_POD_INFRA_CONTAINER_ID=8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910;K8S_POD_UID=533f846d-89ff-4b46-84b6-3cbc4f66a5f2" Path:"" ERRORED: error configuring pod [openstack/dnsmasq-dns-599c89c5c9-zrtzk] networking: [openstack/dnsmasq-dns-599c89c5c9-zrtzk/533f846d-89ff-4b46-84b6-3cbc4f66a5f2:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] [openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] failed to configure pod interface: OVS sandbox port 8d8c4f5fe6f1fd8 is no longer active (probably due to a subsequent CNI ADD) Mar 20 15:47:43 crc kubenswrapper[3552]: ' Mar 20 15:47:43 crc kubenswrapper[3552]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 20 15:47:43 crc kubenswrapper[3552]: > Mar 20 15:47:43 crc kubenswrapper[3552]: E0320 15:47:43.885463 3552 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Mar 20 15:47:43 crc kubenswrapper[3552]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dnsmasq-dns-599c89c5c9-zrtzk_openstack_533f846d-89ff-4b46-84b6-3cbc4f66a5f2_0(8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910): error adding pod openstack_dnsmasq-dns-599c89c5c9-zrtzk to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910" Netns:"/var/run/netns/c1696dce-ae36-467d-98cd-06a3548daa9f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=dnsmasq-dns-599c89c5c9-zrtzk;K8S_POD_INFRA_CONTAINER_ID=8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910;K8S_POD_UID=533f846d-89ff-4b46-84b6-3cbc4f66a5f2" Path:"" ERRORED: error configuring pod [openstack/dnsmasq-dns-599c89c5c9-zrtzk] networking: [openstack/dnsmasq-dns-599c89c5c9-zrtzk/533f846d-89ff-4b46-84b6-3cbc4f66a5f2:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] [openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] failed to configure pod interface: OVS sandbox port 8d8c4f5fe6f1fd8 is no longer active (probably due to a subsequent CNI ADD) Mar 20 15:47:43 crc kubenswrapper[3552]: ' Mar 20 15:47:43 crc kubenswrapper[3552]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 20 15:47:43 crc kubenswrapper[3552]: > pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:43 crc kubenswrapper[3552]: E0320 15:47:43.885482 3552 kuberuntime_manager.go:1172] "CreatePodSandbox for pod failed" err=< Mar 20 15:47:43 crc kubenswrapper[3552]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dnsmasq-dns-599c89c5c9-zrtzk_openstack_533f846d-89ff-4b46-84b6-3cbc4f66a5f2_0(8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910): error adding pod openstack_dnsmasq-dns-599c89c5c9-zrtzk to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910" Netns:"/var/run/netns/c1696dce-ae36-467d-98cd-06a3548daa9f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=dnsmasq-dns-599c89c5c9-zrtzk;K8S_POD_INFRA_CONTAINER_ID=8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910;K8S_POD_UID=533f846d-89ff-4b46-84b6-3cbc4f66a5f2" Path:"" ERRORED: error configuring pod [openstack/dnsmasq-dns-599c89c5c9-zrtzk] networking: [openstack/dnsmasq-dns-599c89c5c9-zrtzk/533f846d-89ff-4b46-84b6-3cbc4f66a5f2:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] [openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] failed to configure pod interface: OVS sandbox port 8d8c4f5fe6f1fd8 is no longer active (probably due to a subsequent CNI ADD) Mar 20 15:47:43 crc kubenswrapper[3552]: ' Mar 20 15:47:43 crc kubenswrapper[3552]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Mar 20 15:47:43 crc kubenswrapper[3552]: > pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:43 crc kubenswrapper[3552]: E0320 15:47:43.885569 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"dnsmasq-dns-599c89c5c9-zrtzk_openstack(533f846d-89ff-4b46-84b6-3cbc4f66a5f2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"dnsmasq-dns-599c89c5c9-zrtzk_openstack(533f846d-89ff-4b46-84b6-3cbc4f66a5f2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dnsmasq-dns-599c89c5c9-zrtzk_openstack_533f846d-89ff-4b46-84b6-3cbc4f66a5f2_0(8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910): error adding pod openstack_dnsmasq-dns-599c89c5c9-zrtzk to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910\\\" Netns:\\\"/var/run/netns/c1696dce-ae36-467d-98cd-06a3548daa9f\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=dnsmasq-dns-599c89c5c9-zrtzk;K8S_POD_INFRA_CONTAINER_ID=8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910;K8S_POD_UID=533f846d-89ff-4b46-84b6-3cbc4f66a5f2\\\" Path:\\\"\\\" ERRORED: error configuring pod [openstack/dnsmasq-dns-599c89c5c9-zrtzk] networking: [openstack/dnsmasq-dns-599c89c5c9-zrtzk/533f846d-89ff-4b46-84b6-3cbc4f66a5f2:ovn-kubernetes]: error adding container to network \\\"ovn-kubernetes\\\": CNI request failed with status 400: '[openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] [openstack/dnsmasq-dns-599c89c5c9-zrtzk 8d8c4f5fe6f1fd8040dd58ea31588ef81ef7f254e87c2d778208bead46647910 network default NAD default] failed to configure pod interface: OVS sandbox port 8d8c4f5fe6f1fd8 is no longer active (probably due to a subsequent CNI ADD)\\n'\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.475589 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96","Type":"ContainerStarted","Data":"e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222"} Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.483888 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"7b208453-e2bc-4dc0-b407-c652e393c13d","Type":"ContainerStarted","Data":"a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43"} Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.505294 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a","Type":"ContainerStarted","Data":"10d6ccd1a75dfc22b2fccce1394d49d48715b6f97d05b3077130d91a9ea66f69"} Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.505471 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api-log" containerID="cri-o://173d68c9761c3dd859af3ace1e5911468bcfd831a4529b339dde6f2f3115742b" gracePeriod=30 Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.505958 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" containerID="cri-o://10d6ccd1a75dfc22b2fccce1394d49d48715b6f97d05b3077130d91a9ea66f69" gracePeriod=30 Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.506008 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.529668 3552 generic.go:334] "Generic (PLEG): container finished" podID="0b6a467f-e04d-436e-ba50-72789f592266" containerID="44c7fb287feb1b260213f8530bdaf224caf4abc5709a2f10f497d641243253c8" exitCode=0 Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.529742 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.530322 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" event={"ID":"0b6a467f-e04d-436e-ba50-72789f592266","Type":"ContainerDied","Data":"44c7fb287feb1b260213f8530bdaf224caf4abc5709a2f10f497d641243253c8"} Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.530487 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.539315 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": EOF" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.567574 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=9.567517279 podStartE2EDuration="9.567517279s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:44.551384788 +0000 UTC m=+1364.245081628" watchObservedRunningTime="2026-03-20 15:47:44.567517279 +0000 UTC m=+1364.261214119" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.587096 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=6.088539383 podStartE2EDuration="9.58704377s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="2026-03-20 15:47:38.452775004 +0000 UTC m=+1358.146471834" lastFinishedPulling="2026-03-20 15:47:41.951279391 +0000 UTC m=+1361.644976221" observedRunningTime="2026-03-20 15:47:44.529713449 +0000 UTC m=+1364.223410289" watchObservedRunningTime="2026-03-20 15:47:44.58704377 +0000 UTC m=+1364.280740600" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.594523 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=6.13964806 podStartE2EDuration="9.594430037s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="2026-03-20 15:47:38.496495594 +0000 UTC m=+1358.190192424" lastFinishedPulling="2026-03-20 15:47:41.951277571 +0000 UTC m=+1361.644974401" observedRunningTime="2026-03-20 15:47:44.591742726 +0000 UTC m=+1364.285439566" watchObservedRunningTime="2026-03-20 15:47:44.594430037 +0000 UTC m=+1364.288126867" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.910285 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fddf585b5-c58zk"] Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.945589 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/horizon-6f9cbcd486-gj8tz"] Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.945762 3552 topology_manager.go:215] "Topology Admit Handler" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" podNamespace="openstack" podName="horizon-6f9cbcd486-gj8tz" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.949536 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.958475 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Mar 20 15:47:44 crc kubenswrapper[3552]: I0320 15:47:44.984033 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6f9cbcd486-gj8tz"] Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.053111 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5cf7d98dbf-jv84z"] Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.085042 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/horizon-9c7459748-mvczs"] Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.085274 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e30af56a-3534-429c-bbe2-3014515d530f" podNamespace="openstack" podName="horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.086608 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.117921 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-9c7459748-mvczs"] Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137627 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-tls-certs\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137708 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-scripts\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137737 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq6rj\" (UniqueName: \"kubernetes.io/projected/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-kube-api-access-nq6rj\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137765 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-config-data\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137785 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-logs\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137828 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-combined-ca-bundle\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.137870 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-secret-key\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.173816 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-599c89c5c9-zrtzk"] Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.198013 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240342 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-scripts\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240423 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nq6rj\" (UniqueName: \"kubernetes.io/projected/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-kube-api-access-nq6rj\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240460 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-config-data\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240479 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-logs\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240520 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql9wv\" (UniqueName: \"kubernetes.io/projected/e30af56a-3534-429c-bbe2-3014515d530f-kube-api-access-ql9wv\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240539 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-combined-ca-bundle\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240569 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-combined-ca-bundle\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240589 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30af56a-3534-429c-bbe2-3014515d530f-logs\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240617 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-horizon-secret-key\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240641 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30af56a-3534-429c-bbe2-3014515d530f-scripts\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240671 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-secret-key\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240698 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-horizon-tls-certs\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240741 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-tls-certs\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.240761 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30af56a-3534-429c-bbe2-3014515d530f-config-data\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.241538 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-scripts\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.242780 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-config-data\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.242987 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-logs\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.254479 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-combined-ca-bundle\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.283751 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-tls-certs\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.284289 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-secret-key\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.288150 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq6rj\" (UniqueName: \"kubernetes.io/projected/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-kube-api-access-nq6rj\") pod \"horizon-6f9cbcd486-gj8tz\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.303984 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.345078 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-dns-svc\") pod \"0b6a467f-e04d-436e-ba50-72789f592266\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.345826 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-nb\") pod \"0b6a467f-e04d-436e-ba50-72789f592266\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.345989 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-config\") pod \"0b6a467f-e04d-436e-ba50-72789f592266\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.346144 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-sb\") pod \"0b6a467f-e04d-436e-ba50-72789f592266\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.347684 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcsnm\" (UniqueName: \"kubernetes.io/projected/0b6a467f-e04d-436e-ba50-72789f592266-kube-api-access-zcsnm\") pod \"0b6a467f-e04d-436e-ba50-72789f592266\" (UID: \"0b6a467f-e04d-436e-ba50-72789f592266\") " Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348049 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30af56a-3534-429c-bbe2-3014515d530f-config-data\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348206 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ql9wv\" (UniqueName: \"kubernetes.io/projected/e30af56a-3534-429c-bbe2-3014515d530f-kube-api-access-ql9wv\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348232 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-combined-ca-bundle\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348295 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30af56a-3534-429c-bbe2-3014515d530f-logs\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348337 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-horizon-secret-key\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348365 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30af56a-3534-429c-bbe2-3014515d530f-scripts\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.348442 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-horizon-tls-certs\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.355707 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e30af56a-3534-429c-bbe2-3014515d530f-logs\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.355962 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e30af56a-3534-429c-bbe2-3014515d530f-scripts\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.358824 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e30af56a-3534-429c-bbe2-3014515d530f-config-data\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.370090 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-horizon-tls-certs\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.371835 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b6a467f-e04d-436e-ba50-72789f592266-kube-api-access-zcsnm" (OuterVolumeSpecName: "kube-api-access-zcsnm") pod "0b6a467f-e04d-436e-ba50-72789f592266" (UID: "0b6a467f-e04d-436e-ba50-72789f592266"). InnerVolumeSpecName "kube-api-access-zcsnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.373960 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-combined-ca-bundle\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.383036 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql9wv\" (UniqueName: \"kubernetes.io/projected/e30af56a-3534-429c-bbe2-3014515d530f-kube-api-access-ql9wv\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.389833 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e30af56a-3534-429c-bbe2-3014515d530f-horizon-secret-key\") pod \"horizon-9c7459748-mvczs\" (UID: \"e30af56a-3534-429c-bbe2-3014515d530f\") " pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.411284 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0b6a467f-e04d-436e-ba50-72789f592266" (UID: "0b6a467f-e04d-436e-ba50-72789f592266"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.423192 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-config" (OuterVolumeSpecName: "config") pod "0b6a467f-e04d-436e-ba50-72789f592266" (UID: "0b6a467f-e04d-436e-ba50-72789f592266"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.432795 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0b6a467f-e04d-436e-ba50-72789f592266" (UID: "0b6a467f-e04d-436e-ba50-72789f592266"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.450878 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.451013 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.451105 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.451348 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zcsnm\" (UniqueName: \"kubernetes.io/projected/0b6a467f-e04d-436e-ba50-72789f592266-kube-api-access-zcsnm\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.459678 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.478506 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0b6a467f-e04d-436e-ba50-72789f592266" (UID: "0b6a467f-e04d-436e-ba50-72789f592266"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.558548 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b6a467f-e04d-436e-ba50-72789f592266-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.630422 3552 generic.go:334] "Generic (PLEG): container finished" podID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerID="173d68c9761c3dd859af3ace1e5911468bcfd831a4529b339dde6f2f3115742b" exitCode=143 Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.630493 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a","Type":"ContainerDied","Data":"173d68c9761c3dd859af3ace1e5911468bcfd831a4529b339dde6f2f3115742b"} Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.680998 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" event={"ID":"533f846d-89ff-4b46-84b6-3cbc4f66a5f2","Type":"ContainerStarted","Data":"c680ab7234152d79f785f61d9906572f8a626ffb7544112a264f3ebc7b423100"} Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.721767 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b93e070-b03a-4c4d-8190-af41fd5b705e","Type":"ContainerStarted","Data":"118baa0aaf9e13fbfb2f55f74cef0c0365f5cef8978e869b93aad36e5b5acb52"} Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.745263 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.748861 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-96b46c767-7xf5t" event={"ID":"0b6a467f-e04d-436e-ba50-72789f592266","Type":"ContainerDied","Data":"f1fe3d191449288ed363a91d87f2c129ab4a38b242c1d1dbbdd208fe2e812b01"} Mar 20 15:47:45 crc kubenswrapper[3552]: I0320 15:47:45.748925 3552 scope.go:117] "RemoveContainer" containerID="44c7fb287feb1b260213f8530bdaf224caf4abc5709a2f10f497d641243253c8" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.106530 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-96b46c767-7xf5t"] Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.148145 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-96b46c767-7xf5t"] Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.184940 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.185111 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.316644 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.332944 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6f9cbcd486-gj8tz"] Mar 20 15:47:46 crc kubenswrapper[3552]: W0320 15:47:46.424956 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc2127a5_f3c4_4ae0_8cf3_918fd64c689a.slice/crio-c3015cdf04e3af83985921e5784c19bce490d7d0e0f4ac1dfdb9630d2aac750e WatchSource:0}: Error finding container c3015cdf04e3af83985921e5784c19bce490d7d0e0f4ac1dfdb9630d2aac750e: Status 404 returned error can't find the container with id c3015cdf04e3af83985921e5784c19bce490d7d0e0f4ac1dfdb9630d2aac750e Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.599326 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.648761 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-9c7459748-mvczs"] Mar 20 15:47:46 crc kubenswrapper[3552]: W0320 15:47:46.649876 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode30af56a_3534_429c_bbe2_3014515d530f.slice/crio-68f32d45ff7cb9a939a420c86e5ef7d7153a20f6ee1c2f8143a23455bf10dfdc WatchSource:0}: Error finding container 68f32d45ff7cb9a939a420c86e5ef7d7153a20f6ee1c2f8143a23455bf10dfdc: Status 404 returned error can't find the container with id 68f32d45ff7cb9a939a420c86e5ef7d7153a20f6ee1c2f8143a23455bf10dfdc Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.650571 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.696155 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.760770 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-9c7459748-mvczs" event={"ID":"e30af56a-3534-429c-bbe2-3014515d530f","Type":"ContainerStarted","Data":"68f32d45ff7cb9a939a420c86e5ef7d7153a20f6ee1c2f8143a23455bf10dfdc"} Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.769440 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f9cbcd486-gj8tz" event={"ID":"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a","Type":"ContainerStarted","Data":"c3015cdf04e3af83985921e5784c19bce490d7d0e0f4ac1dfdb9630d2aac750e"} Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.776978 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"af48c0e5-6bff-4868-b778-c8724a3b2e68","Type":"ContainerStarted","Data":"7c23aef4459fddbfe888c06adf6ba1d6cbdf8516d57c4820a5e18c217082fc2c"} Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.791616 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.967156 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Mar 20 15:47:46 crc kubenswrapper[3552]: I0320 15:47:46.969111 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.043850 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.093716 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.449267 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b6a467f-e04d-436e-ba50-72789f592266" path="/var/lib/kubelet/pods/0b6a467f-e04d-436e-ba50-72789f592266/volumes" Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.866241 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b93e070-b03a-4c4d-8190-af41fd5b705e","Type":"ContainerStarted","Data":"334aa02a719bb889bf448c34f2a50274d3eb5ee97876b6a80ecbfb4d2cc98044"} Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.866300 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-log" containerID="cri-o://118baa0aaf9e13fbfb2f55f74cef0c0365f5cef8978e869b93aad36e5b5acb52" gracePeriod=30 Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.866447 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-httpd" containerID="cri-o://334aa02a719bb889bf448c34f2a50274d3eb5ee97876b6a80ecbfb4d2cc98044" gracePeriod=30 Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.871938 3552 generic.go:334] "Generic (PLEG): container finished" podID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerID="db23c3b8ab92119d3b76ee65c27015ae2d67de92ce71fa38549c0d9bd6cdc961" exitCode=0 Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.873481 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" event={"ID":"533f846d-89ff-4b46-84b6-3cbc4f66a5f2","Type":"ContainerDied","Data":"db23c3b8ab92119d3b76ee65c27015ae2d67de92ce71fa38549c0d9bd6cdc961"} Mar 20 15:47:47 crc kubenswrapper[3552]: I0320 15:47:47.891244 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=11.891210342 podStartE2EDuration="11.891210342s" podCreationTimestamp="2026-03-20 15:47:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:47.887864513 +0000 UTC m=+1367.581561343" watchObservedRunningTime="2026-03-20 15:47:47.891210342 +0000 UTC m=+1367.584907172" Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.881692 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerID="334aa02a719bb889bf448c34f2a50274d3eb5ee97876b6a80ecbfb4d2cc98044" exitCode=143 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.881960 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerID="118baa0aaf9e13fbfb2f55f74cef0c0365f5cef8978e869b93aad36e5b5acb52" exitCode=143 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.881798 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b93e070-b03a-4c4d-8190-af41fd5b705e","Type":"ContainerDied","Data":"334aa02a719bb889bf448c34f2a50274d3eb5ee97876b6a80ecbfb4d2cc98044"} Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.882018 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b93e070-b03a-4c4d-8190-af41fd5b705e","Type":"ContainerDied","Data":"118baa0aaf9e13fbfb2f55f74cef0c0365f5cef8978e869b93aad36e5b5acb52"} Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.885439 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"af48c0e5-6bff-4868-b778-c8724a3b2e68","Type":"ContainerStarted","Data":"2a57556032ddac3ae269ef2a041141651a3fdbfc908c1ed29f1dc5b88b560fcf"} Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.885533 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-log" containerID="cri-o://7c23aef4459fddbfe888c06adf6ba1d6cbdf8516d57c4820a5e18c217082fc2c" gracePeriod=30 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.885641 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-httpd" containerID="cri-o://2a57556032ddac3ae269ef2a041141651a3fdbfc908c1ed29f1dc5b88b560fcf" gracePeriod=30 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.893880 3552 generic.go:334] "Generic (PLEG): container finished" podID="4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" containerID="f341dabebe9ed0ad5bb2ee9a8d89a89629dbfcc253e54f96f0383b88efadd11e" exitCode=0 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.894030 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" containerName="watcher-decision-engine" containerID="cri-o://a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43" gracePeriod=30 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.894097 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-q7n8s" event={"ID":"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4","Type":"ContainerDied","Data":"f341dabebe9ed0ad5bb2ee9a8d89a89629dbfcc253e54f96f0383b88efadd11e"} Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.894188 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-applier-0" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" containerID="cri-o://e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" gracePeriod=30 Mar 20 15:47:48 crc kubenswrapper[3552]: I0320 15:47:48.922850 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=12.922807396 podStartE2EDuration="12.922807396s" podCreationTimestamp="2026-03-20 15:47:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:47:48.910312212 +0000 UTC m=+1368.604009042" watchObservedRunningTime="2026-03-20 15:47:48.922807396 +0000 UTC m=+1368.616504226" Mar 20 15:47:49 crc kubenswrapper[3552]: I0320 15:47:49.910517 3552 generic.go:334] "Generic (PLEG): container finished" podID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerID="2a57556032ddac3ae269ef2a041141651a3fdbfc908c1ed29f1dc5b88b560fcf" exitCode=0 Mar 20 15:47:49 crc kubenswrapper[3552]: I0320 15:47:49.910549 3552 generic.go:334] "Generic (PLEG): container finished" podID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerID="7c23aef4459fddbfe888c06adf6ba1d6cbdf8516d57c4820a5e18c217082fc2c" exitCode=143 Mar 20 15:47:49 crc kubenswrapper[3552]: I0320 15:47:49.910722 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"af48c0e5-6bff-4868-b778-c8724a3b2e68","Type":"ContainerDied","Data":"2a57556032ddac3ae269ef2a041141651a3fdbfc908c1ed29f1dc5b88b560fcf"} Mar 20 15:47:49 crc kubenswrapper[3552]: I0320 15:47:49.910781 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"af48c0e5-6bff-4868-b778-c8724a3b2e68","Type":"ContainerDied","Data":"7c23aef4459fddbfe888c06adf6ba1d6cbdf8516d57c4820a5e18c217082fc2c"} Mar 20 15:47:50 crc kubenswrapper[3552]: I0320 15:47:50.921033 3552 generic.go:334] "Generic (PLEG): container finished" podID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" exitCode=0 Mar 20 15:47:50 crc kubenswrapper[3552]: I0320 15:47:50.921371 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96","Type":"ContainerDied","Data":"e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222"} Mar 20 15:47:51 crc kubenswrapper[3552]: E0320 15:47:51.174681 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:47:51 crc kubenswrapper[3552]: E0320 15:47:51.175556 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:47:51 crc kubenswrapper[3552]: E0320 15:47:51.176974 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:47:51 crc kubenswrapper[3552]: E0320 15:47:51.177018 3552 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" Mar 20 15:47:51 crc kubenswrapper[3552]: I0320 15:47:51.694593 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:47:52 crc kubenswrapper[3552]: I0320 15:47:52.958246 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": read tcp 10.217.0.2:54868->10.217.0.151:9322: read: connection reset by peer" Mar 20 15:47:53 crc kubenswrapper[3552]: I0320 15:47:53.952878 3552 generic.go:334] "Generic (PLEG): container finished" podID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerID="10d6ccd1a75dfc22b2fccce1394d49d48715b6f97d05b3077130d91a9ea66f69" exitCode=0 Mar 20 15:47:53 crc kubenswrapper[3552]: I0320 15:47:53.953224 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a","Type":"ContainerDied","Data":"10d6ccd1a75dfc22b2fccce1394d49d48715b6f97d05b3077130d91a9ea66f69"} Mar 20 15:47:56 crc kubenswrapper[3552]: E0320 15:47:56.174020 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:47:56 crc kubenswrapper[3552]: E0320 15:47:56.175571 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:47:56 crc kubenswrapper[3552]: E0320 15:47:56.175946 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:47:56 crc kubenswrapper[3552]: E0320 15:47:56.175982 3552 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" Mar 20 15:47:56 crc kubenswrapper[3552]: I0320 15:47:56.651378 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": dial tcp 10.217.0.151:9322: connect: connection refused" Mar 20 15:47:56 crc kubenswrapper[3552]: I0320 15:47:56.976854 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-q7n8s" event={"ID":"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4","Type":"ContainerDied","Data":"8a556bd8d42562dc702b4ceab30b14067afc9055b963c73f44ce0d5d6d5c5913"} Mar 20 15:47:56 crc kubenswrapper[3552]: I0320 15:47:56.976897 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a556bd8d42562dc702b4ceab30b14067afc9055b963c73f44ce0d5d6d5c5913" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.054974 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.152847 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-scripts\") pod \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.152932 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g46wh\" (UniqueName: \"kubernetes.io/projected/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-kube-api-access-g46wh\") pod \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.152985 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-config-data\") pod \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.153078 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-combined-ca-bundle\") pod \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.153103 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-credential-keys\") pod \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.153158 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-fernet-keys\") pod \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\" (UID: \"4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4\") " Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.158621 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" (UID: "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.158801 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-kube-api-access-g46wh" (OuterVolumeSpecName: "kube-api-access-g46wh") pod "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" (UID: "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4"). InnerVolumeSpecName "kube-api-access-g46wh". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.159250 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" (UID: "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.159907 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-scripts" (OuterVolumeSpecName: "scripts") pod "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" (UID: "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.178259 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-config-data" (OuterVolumeSpecName: "config-data") pod "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" (UID: "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.188589 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" (UID: "4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.254766 3552 reconciler_common.go:300] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-fernet-keys\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.254808 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.254820 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-g46wh\" (UniqueName: \"kubernetes.io/projected/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-kube-api-access-g46wh\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.254830 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.254839 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.254848 3552 reconciler_common.go:300] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4-credential-keys\") on node \"crc\" DevicePath \"\"" Mar 20 15:47:57 crc kubenswrapper[3552]: I0320 15:47:57.986534 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-q7n8s" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.182293 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-q7n8s"] Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.198815 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-q7n8s"] Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.220094 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-l664w"] Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.220275 3552 topology_manager.go:215] "Topology Admit Handler" podUID="9dfce561-7659-4b99-8f83-02573b343f5e" podNamespace="openstack" podName="keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: E0320 15:47:58.220513 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" containerName="keystone-bootstrap" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.220528 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" containerName="keystone-bootstrap" Mar 20 15:47:58 crc kubenswrapper[3552]: E0320 15:47:58.220595 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0b6a467f-e04d-436e-ba50-72789f592266" containerName="init" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.220604 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b6a467f-e04d-436e-ba50-72789f592266" containerName="init" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.220771 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" containerName="keystone-bootstrap" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.220790 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b6a467f-e04d-436e-ba50-72789f592266" containerName="init" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.221342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.225117 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tt8n9" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.225549 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.225811 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.226512 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.226879 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.235727 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l664w"] Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.376416 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-scripts\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.376473 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm5zm\" (UniqueName: \"kubernetes.io/projected/9dfce561-7659-4b99-8f83-02573b343f5e-kube-api-access-xm5zm\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.376527 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-combined-ca-bundle\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.376566 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-fernet-keys\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.376609 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-config-data\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.376649 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-credential-keys\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.479295 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-combined-ca-bundle\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.480626 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-fernet-keys\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.480790 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-config-data\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.480909 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-credential-keys\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.481127 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-scripts\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.481206 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xm5zm\" (UniqueName: \"kubernetes.io/projected/9dfce561-7659-4b99-8f83-02573b343f5e-kube-api-access-xm5zm\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.492781 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-fernet-keys\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.498091 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-combined-ca-bundle\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.498298 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-scripts\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.499101 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-config-data\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.504084 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-credential-keys\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.507325 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm5zm\" (UniqueName: \"kubernetes.io/projected/9dfce561-7659-4b99-8f83-02573b343f5e-kube-api-access-xm5zm\") pod \"keystone-bootstrap-l664w\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:58 crc kubenswrapper[3552]: I0320 15:47:58.547091 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l664w" Mar 20 15:47:59 crc kubenswrapper[3552]: I0320 15:47:59.447324 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4" path="/var/lib/kubelet/pods/4d19b7e3-95b9-4c37-bdbd-dcb7e922f4d4/volumes" Mar 20 15:48:01 crc kubenswrapper[3552]: E0320 15:48:01.174358 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:48:01 crc kubenswrapper[3552]: E0320 15:48:01.175704 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:48:01 crc kubenswrapper[3552]: E0320 15:48:01.176210 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Mar 20 15:48:01 crc kubenswrapper[3552]: E0320 15:48:01.176245 3552 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.304235 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.304302 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.304352 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.304376 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.304417 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.650781 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": dial tcp 10.217.0.151:9322: connect: connection refused" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.966342 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:01 crc kubenswrapper[3552]: I0320 15:48:01.988881 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.015095 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048286 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-config-data\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048456 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048486 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-combined-ca-bundle\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048524 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-logs\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048555 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-scripts\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048590 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjlqm\" (UniqueName: \"kubernetes.io/projected/7b93e070-b03a-4c4d-8190-af41fd5b705e-kube-api-access-kjlqm\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048679 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-httpd-run\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.048733 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-internal-tls-certs\") pod \"7b93e070-b03a-4c4d-8190-af41fd5b705e\" (UID: \"7b93e070-b03a-4c4d-8190-af41fd5b705e\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.051803 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-logs" (OuterVolumeSpecName: "logs") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.053032 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.080197 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-scripts" (OuterVolumeSpecName: "scripts") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.091682 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.093252 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b93e070-b03a-4c4d-8190-af41fd5b705e-kube-api-access-kjlqm" (OuterVolumeSpecName: "kube-api-access-kjlqm") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "kube-api-access-kjlqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.093475 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96","Type":"ContainerDied","Data":"0b769ac80af95b0a1fbf172d42490db8a9f784d88d44866e1470f65d5ba59eeb"} Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.093506 3552 scope.go:117] "RemoveContainer" containerID="e3cffa640f070577daaa7c2e80e61ee458cce44ec332d060c8e8d5e32ac2b222" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.093521 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.108157 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"af48c0e5-6bff-4868-b778-c8724a3b2e68","Type":"ContainerDied","Data":"0675099c089093bfe6b61a9e86f9ca7ac55f399f29916194de33e19135f45832"} Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.108232 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.110509 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b93e070-b03a-4c4d-8190-af41fd5b705e","Type":"ContainerDied","Data":"bb09728334b1db19b3fb35ba0510732d594b193eb08c41dbac39ee335c263c1d"} Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.110574 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155054 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-logs\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155150 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vv24z\" (UniqueName: \"kubernetes.io/projected/af48c0e5-6bff-4868-b778-c8724a3b2e68-kube-api-access-vv24z\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155197 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-logs\") pod \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155234 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z949q\" (UniqueName: \"kubernetes.io/projected/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-kube-api-access-z949q\") pod \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155309 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-scripts\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155336 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-config-data\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155383 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155477 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-combined-ca-bundle\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155551 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-httpd-run\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155581 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-config-data\") pod \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155665 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-public-tls-certs\") pod \"af48c0e5-6bff-4868-b778-c8724a3b2e68\" (UID: \"af48c0e5-6bff-4868-b778-c8724a3b2e68\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.155703 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-combined-ca-bundle\") pod \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\" (UID: \"ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96\") " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.156097 3552 reconciler_common.go:300] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-httpd-run\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.156134 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.156149 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b93e070-b03a-4c4d-8190-af41fd5b705e-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.156161 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.156175 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kjlqm\" (UniqueName: \"kubernetes.io/projected/7b93e070-b03a-4c4d-8190-af41fd5b705e-kube-api-access-kjlqm\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.163925 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-logs" (OuterVolumeSpecName: "logs") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.180829 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-logs" (OuterVolumeSpecName: "logs") pod "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" (UID: "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.185925 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.188597 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.193671 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af48c0e5-6bff-4868-b778-c8724a3b2e68-kube-api-access-vv24z" (OuterVolumeSpecName: "kube-api-access-vv24z") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "kube-api-access-vv24z". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.195934 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.217615 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-scripts" (OuterVolumeSpecName: "scripts") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.219613 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-kube-api-access-z949q" (OuterVolumeSpecName: "kube-api-access-z949q") pod "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" (UID: "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96"). InnerVolumeSpecName "kube-api-access-z949q". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.224592 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-config-data" (OuterVolumeSpecName: "config-data") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.227207 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.246852 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b93e070-b03a-4c4d-8190-af41fd5b705e" (UID: "7b93e070-b03a-4c4d-8190-af41fd5b705e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259184 3552 reconciler_common.go:300] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259223 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259236 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259246 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-vv24z\" (UniqueName: \"kubernetes.io/projected/af48c0e5-6bff-4868-b778-c8724a3b2e68-kube-api-access-vv24z\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259257 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259267 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-z949q\" (UniqueName: \"kubernetes.io/projected/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-kube-api-access-z949q\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259277 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259287 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259300 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b93e070-b03a-4c4d-8190-af41fd5b705e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259337 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.259348 3552 reconciler_common.go:300] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/af48c0e5-6bff-4868-b778-c8724a3b2e68-httpd-run\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.263787 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" (UID: "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.267155 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.301340 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.313452 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-config-data" (OuterVolumeSpecName: "config-data") pod "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" (UID: "ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.323534 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-config-data" (OuterVolumeSpecName: "config-data") pod "af48c0e5-6bff-4868-b778-c8724a3b2e68" (UID: "af48c0e5-6bff-4868-b778-c8724a3b2e68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.325853 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.360654 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.360689 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.360701 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.360712 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.360723 3552 reconciler_common.go:300] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/af48c0e5-6bff-4868-b778-c8724a3b2e68-public-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.360732 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.462533 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.491490 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.504507 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.522250 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.575153 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.598753 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.598927 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cef2d399-77a5-4f2f-ac1d-801e04745c2e" podNamespace="openstack" podName="watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: E0320 15:48:02.599166 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599181 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" Mar 20 15:48:02 crc kubenswrapper[3552]: E0320 15:48:02.599199 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-log" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599205 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-log" Mar 20 15:48:02 crc kubenswrapper[3552]: E0320 15:48:02.599223 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-httpd" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599229 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-httpd" Mar 20 15:48:02 crc kubenswrapper[3552]: E0320 15:48:02.599237 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-log" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599243 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-log" Mar 20 15:48:02 crc kubenswrapper[3552]: E0320 15:48:02.599254 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-httpd" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599260 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-httpd" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599503 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-log" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599526 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-httpd" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599537 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" containerName="glance-httpd" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599549 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" containerName="glance-log" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.599592 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" containerName="watcher-applier" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.600291 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.608265 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.616531 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.631765 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.644508 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.644698 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" podNamespace="openstack" podName="glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.645985 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.648336 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.648636 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.648758 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.649845 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rwgjn" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.651441 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.671186 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.671377 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" podNamespace="openstack" podName="glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.672285 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef2d399-77a5-4f2f-ac1d-801e04745c2e-config-data\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.672332 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef2d399-77a5-4f2f-ac1d-801e04745c2e-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.672366 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cef2d399-77a5-4f2f-ac1d-801e04745c2e-logs\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.673003 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.676642 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.676864 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xth4s\" (UniqueName: \"kubernetes.io/projected/cef2d399-77a5-4f2f-ac1d-801e04745c2e-kube-api-access-xth4s\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.676868 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.684483 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.778626 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.778680 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhj84\" (UniqueName: \"kubernetes.io/projected/4a4b8a27-5d4a-44d2-8553-124cd38ec665-kube-api-access-hhj84\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.778708 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6bhq\" (UniqueName: \"kubernetes.io/projected/0d151a0e-e371-44af-a237-0d70a5876ace-kube-api-access-l6bhq\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.778728 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.778903 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-logs\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.779165 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xth4s\" (UniqueName: \"kubernetes.io/projected/cef2d399-77a5-4f2f-ac1d-801e04745c2e-kube-api-access-xth4s\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.779201 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.779227 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-logs\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.779257 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.779350 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.779710 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.782962 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783034 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783066 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783129 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783248 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783306 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef2d399-77a5-4f2f-ac1d-801e04745c2e-config-data\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783325 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef2d399-77a5-4f2f-ac1d-801e04745c2e-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783350 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cef2d399-77a5-4f2f-ac1d-801e04745c2e-logs\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.783378 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.784013 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cef2d399-77a5-4f2f-ac1d-801e04745c2e-logs\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.787877 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef2d399-77a5-4f2f-ac1d-801e04745c2e-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.788036 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef2d399-77a5-4f2f-ac1d-801e04745c2e-config-data\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.798843 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xth4s\" (UniqueName: \"kubernetes.io/projected/cef2d399-77a5-4f2f-ac1d-801e04745c2e-kube-api-access-xth4s\") pod \"watcher-applier-0\" (UID: \"cef2d399-77a5-4f2f-ac1d-801e04745c2e\") " pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.884856 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.884916 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.884939 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hhj84\" (UniqueName: \"kubernetes.io/projected/4a4b8a27-5d4a-44d2-8553-124cd38ec665-kube-api-access-hhj84\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885108 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l6bhq\" (UniqueName: \"kubernetes.io/projected/0d151a0e-e371-44af-a237-0d70a5876ace-kube-api-access-l6bhq\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885161 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885188 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-logs\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885245 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885466 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885517 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-logs\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885545 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885579 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885658 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885708 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885741 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885778 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885811 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885829 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-logs\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.885875 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.886041 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-logs\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.886740 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.886924 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.887175 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.902829 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.902937 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.903863 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.904075 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.907192 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.908753 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.910271 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.912672 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.917157 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhj84\" (UniqueName: \"kubernetes.io/projected/4a4b8a27-5d4a-44d2-8553-124cd38ec665-kube-api-access-hhj84\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.925151 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6bhq\" (UniqueName: \"kubernetes.io/projected/0d151a0e-e371-44af-a237-0d70a5876ace-kube-api-access-l6bhq\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.929048 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.935501 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.944145 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " pod="openstack/glance-default-external-api-0" Mar 20 15:48:02 crc kubenswrapper[3552]: I0320 15:48:02.973340 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:48:03 crc kubenswrapper[3552]: I0320 15:48:03.001971 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:03 crc kubenswrapper[3552]: I0320 15:48:03.442181 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b93e070-b03a-4c4d-8190-af41fd5b705e" path="/var/lib/kubelet/pods/7b93e070-b03a-4c4d-8190-af41fd5b705e/volumes" Mar 20 15:48:03 crc kubenswrapper[3552]: I0320 15:48:03.443172 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af48c0e5-6bff-4868-b778-c8724a3b2e68" path="/var/lib/kubelet/pods/af48c0e5-6bff-4868-b778-c8724a3b2e68/volumes" Mar 20 15:48:03 crc kubenswrapper[3552]: I0320 15:48:03.444283 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96" path="/var/lib/kubelet/pods/ba0b3cf9-af42-4c03-a60b-f1cda0ad0d96/volumes" Mar 20 15:48:06 crc kubenswrapper[3552]: E0320 15:48:06.316249 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Mar 20 15:48:06 crc kubenswrapper[3552]: E0320 15:48:06.318578 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Mar 20 15:48:06 crc kubenswrapper[3552]: E0320 15:48:06.320974 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Mar 20 15:48:06 crc kubenswrapper[3552]: E0320 15:48:06.321019 3552 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-decision-engine-0" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" containerName="watcher-decision-engine" Mar 20 15:48:11 crc kubenswrapper[3552]: I0320 15:48:11.651554 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:48:12 crc kubenswrapper[3552]: I0320 15:48:12.939931 3552 scope.go:117] "RemoveContainer" containerID="2a57556032ddac3ae269ef2a041141651a3fdbfc908c1ed29f1dc5b88b560fcf" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.070906 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.176717 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klxr7\" (UniqueName: \"kubernetes.io/projected/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-kube-api-access-klxr7\") pod \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.176804 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-custom-prometheus-ca\") pod \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.176918 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-logs\") pod \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.176978 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-config-data\") pod \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.177036 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-combined-ca-bundle\") pod \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\" (UID: \"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a\") " Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.179717 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-logs" (OuterVolumeSpecName: "logs") pod "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" (UID: "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.183517 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-kube-api-access-klxr7" (OuterVolumeSpecName: "kube-api-access-klxr7") pod "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" (UID: "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a"). InnerVolumeSpecName "kube-api-access-klxr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.209298 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a","Type":"ContainerDied","Data":"699358d3f203f0454e3c7e767b03220aa53ce32f270bf1a7ffa20e7c3344bbdf"} Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.209675 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.213001 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" (UID: "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.237094 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" (UID: "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.259791 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-config-data" (OuterVolumeSpecName: "config-data") pod "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" (UID: "4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.279211 3552 reconciler_common.go:300] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.279254 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.279285 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.279300 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.279314 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-klxr7\" (UniqueName: \"kubernetes.io/projected/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a-kube-api-access-klxr7\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.546901 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.565186 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.582741 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.582917 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c27910f4-0275-4126-8012-70aa6554456a" podNamespace="openstack" podName="watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: E0320 15:48:13.583181 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.583197 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" Mar 20 15:48:13 crc kubenswrapper[3552]: E0320 15:48:13.583218 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api-log" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.583224 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api-log" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.583388 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api-log" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.583422 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.584343 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.586921 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.598968 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.696429 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.696549 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.696627 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-config-data\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.696795 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x65pl\" (UniqueName: \"kubernetes.io/projected/c27910f4-0275-4126-8012-70aa6554456a-kube-api-access-x65pl\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.696913 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c27910f4-0275-4126-8012-70aa6554456a-logs\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.798429 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.798517 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.798571 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-config-data\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.798601 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-x65pl\" (UniqueName: \"kubernetes.io/projected/c27910f4-0275-4126-8012-70aa6554456a-kube-api-access-x65pl\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.798653 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c27910f4-0275-4126-8012-70aa6554456a-logs\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.799327 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c27910f4-0275-4126-8012-70aa6554456a-logs\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.804238 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-config-data\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.806877 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.815251 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.815389 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-x65pl\" (UniqueName: \"kubernetes.io/projected/c27910f4-0275-4126-8012-70aa6554456a-kube-api-access-x65pl\") pod \"watcher-api-0\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " pod="openstack/watcher-api-0" Mar 20 15:48:13 crc kubenswrapper[3552]: I0320 15:48:13.910987 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.219481 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" event={"ID":"533f846d-89ff-4b46-84b6-3cbc4f66a5f2","Type":"ContainerStarted","Data":"a7c5145287d9b656b6d0652cb00ce20aaa500bad96c83e2bba16c0fb72aa43a8"} Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.219781 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.240039 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" podStartSLOduration=33.239987011 podStartE2EDuration="33.239987011s" podCreationTimestamp="2026-03-20 15:47:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:14.235850051 +0000 UTC m=+1393.929546921" watchObservedRunningTime="2026-03-20 15:48:14.239987011 +0000 UTC m=+1393.933683841" Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.482370 3552 scope.go:117] "RemoveContainer" containerID="7c23aef4459fddbfe888c06adf6ba1d6cbdf8516d57c4820a5e18c217082fc2c" Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.784737 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l664w"] Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.846867 3552 scope.go:117] "RemoveContainer" containerID="334aa02a719bb889bf448c34f2a50274d3eb5ee97876b6a80ecbfb4d2cc98044" Mar 20 15:48:14 crc kubenswrapper[3552]: W0320 15:48:14.864022 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9dfce561_7659_4b99_8f83_02573b343f5e.slice/crio-64a66a5afdac3bbf655a82db907b6e04c7af0cced098e10c2aaa46e079adc7fe WatchSource:0}: Error finding container 64a66a5afdac3bbf655a82db907b6e04c7af0cced098e10c2aaa46e079adc7fe: Status 404 returned error can't find the container with id 64a66a5afdac3bbf655a82db907b6e04c7af0cced098e10c2aaa46e079adc7fe Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.873118 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.929736 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Mar 20 15:48:14 crc kubenswrapper[3552]: I0320 15:48:14.976708 3552 scope.go:117] "RemoveContainer" containerID="118baa0aaf9e13fbfb2f55f74cef0c0365f5cef8978e869b93aad36e5b5acb52" Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.069294 3552 scope.go:117] "RemoveContainer" containerID="10d6ccd1a75dfc22b2fccce1394d49d48715b6f97d05b3077130d91a9ea66f69" Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.110905 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.161672 3552 scope.go:117] "RemoveContainer" containerID="173d68c9761c3dd859af3ace1e5911468bcfd831a4529b339dde6f2f3115742b" Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.251520 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.266731 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.293172 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5cf7d98dbf-jv84z" event={"ID":"451d55ac-e9ef-427a-aba6-b85a62a08e59","Type":"ContainerStarted","Data":"6c1e1e01ba6adee7fbd18035f095d777ff3a61806c29e3571d2b3d0b51f1da12"} Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.300224 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l664w" event={"ID":"9dfce561-7659-4b99-8f83-02573b343f5e","Type":"ContainerStarted","Data":"64a66a5afdac3bbf655a82db907b6e04c7af0cced098e10c2aaa46e079adc7fe"} Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.306363 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"cef2d399-77a5-4f2f-ac1d-801e04745c2e","Type":"ContainerStarted","Data":"9f3879f0ef6515b3842630a87306a81202b44ccd0a5a50d844bb23b132a9b9fb"} Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.309963 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c585b5b7c-lrxqx" event={"ID":"7c90e274-fdb6-4e4a-a3af-1b48027f429e","Type":"ContainerStarted","Data":"d7f89164ec91d06a70408407dbd90834709af88ed7e48495c533c51fc148c4d1"} Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.313540 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a4b8a27-5d4a-44d2-8553-124cd38ec665","Type":"ContainerStarted","Data":"34e646c1b17ccaafb75e346fe4fc6d4dc82d942e73a0716a4e85b6342cc658d7"} Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.316253 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vbvt2" event={"ID":"8ae12b08-a678-4f33-8b7c-d23c6aca08fe","Type":"ContainerStarted","Data":"f8494ea855028bd40e206f89694c62395409d373aa19726fdd6e6f3ee407ab65"} Mar 20 15:48:15 crc kubenswrapper[3552]: W0320 15:48:15.316385 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d151a0e_e371_44af_a237_0d70a5876ace.slice/crio-cc7c42c207d50a83c05b50f9504c0a6185c6727daee4cbdd5c6f2c5e5f6c1ef7 WatchSource:0}: Error finding container cc7c42c207d50a83c05b50f9504c0a6185c6727daee4cbdd5c6f2c5e5f6c1ef7: Status 404 returned error can't find the container with id cc7c42c207d50a83c05b50f9504c0a6185c6727daee4cbdd5c6f2c5e5f6c1ef7 Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.346768 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/placement-db-sync-vbvt2" podStartSLOduration=5.402897532 podStartE2EDuration="39.346710541s" podCreationTimestamp="2026-03-20 15:47:36 +0000 UTC" firstStartedPulling="2026-03-20 15:47:39.016627537 +0000 UTC m=+1358.710324367" lastFinishedPulling="2026-03-20 15:48:12.960440536 +0000 UTC m=+1392.654137376" observedRunningTime="2026-03-20 15:48:15.333627152 +0000 UTC m=+1395.027324002" watchObservedRunningTime="2026-03-20 15:48:15.346710541 +0000 UTC m=+1395.040407381" Mar 20 15:48:15 crc kubenswrapper[3552]: I0320 15:48:15.556137 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" path="/var/lib/kubelet/pods/4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a/volumes" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.376439 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z758k" event={"ID":"739e2036-5958-4ee3-9fe3-4734696fdc6a","Type":"ContainerStarted","Data":"60fd76bb1637a9197587ef589c085e368feed4145a6273d9e562f9fcd4d7a941"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.407134 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"cef2d399-77a5-4f2f-ac1d-801e04745c2e","Type":"ContainerStarted","Data":"7f2ef4d58f363b76b49d00c5972dd6a6cff70fb69f5433a1fe8adc7ed49bf33f"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.409241 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/barbican-db-sync-z758k" podStartSLOduration=6.024204735 podStartE2EDuration="41.40919721s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="2026-03-20 15:47:38.942321818 +0000 UTC m=+1358.636018638" lastFinishedPulling="2026-03-20 15:48:14.327314283 +0000 UTC m=+1394.021011113" observedRunningTime="2026-03-20 15:48:16.407621538 +0000 UTC m=+1396.101318368" watchObservedRunningTime="2026-03-20 15:48:16.40919721 +0000 UTC m=+1396.102894040" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.410629 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c585b5b7c-lrxqx" event={"ID":"7c90e274-fdb6-4e4a-a3af-1b48027f429e","Type":"ContainerStarted","Data":"bc3946586488c4ab10f05e60c67783bcc86ad32da7f7f8b7588c25f3b5bea593"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.410804 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-c585b5b7c-lrxqx" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon-log" containerID="cri-o://d7f89164ec91d06a70408407dbd90834709af88ed7e48495c533c51fc148c4d1" gracePeriod=30 Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.410904 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-c585b5b7c-lrxqx" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon" containerID="cri-o://bc3946586488c4ab10f05e60c67783bcc86ad32da7f7f8b7588c25f3b5bea593" gracePeriod=30 Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.442878 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-5fddf585b5-c58zk" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon-log" containerID="cri-o://d01d19fcbc1a03d2f56b9a99db7bda241210e995facfe5e1e8929b12d511fb2c" gracePeriod=30 Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.443054 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fddf585b5-c58zk" event={"ID":"30916098-d2f2-40d6-b7e8-e3c784f7169b","Type":"ContainerStarted","Data":"d01d19fcbc1a03d2f56b9a99db7bda241210e995facfe5e1e8929b12d511fb2c"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.443270 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fddf585b5-c58zk" event={"ID":"30916098-d2f2-40d6-b7e8-e3c784f7169b","Type":"ContainerStarted","Data":"188de2be36b062b71fd3a820aaf340a68057bb9695648564affc73b6b662a719"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.443089 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-5fddf585b5-c58zk" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon" containerID="cri-o://188de2be36b062b71fd3a820aaf340a68057bb9695648564affc73b6b662a719" gracePeriod=30 Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.444253 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=14.444222945 podStartE2EDuration="14.444222945s" podCreationTimestamp="2026-03-20 15:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:16.439866979 +0000 UTC m=+1396.133563809" watchObservedRunningTime="2026-03-20 15:48:16.444222945 +0000 UTC m=+1396.137919775" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.462358 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dncfs" event={"ID":"625d76ad-7531-4be1-ab6d-769f15e6a7e5","Type":"ContainerStarted","Data":"8ae763f3fbcc142bbe20ff962a7dae27043bae90a1b8f2a09bb3e5af65f18c80"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.485826 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/horizon-c585b5b7c-lrxqx" podStartSLOduration=5.154996546 podStartE2EDuration="41.485782335s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="2026-03-20 15:47:38.017938436 +0000 UTC m=+1357.711635256" lastFinishedPulling="2026-03-20 15:48:14.348724215 +0000 UTC m=+1394.042421045" observedRunningTime="2026-03-20 15:48:16.462653038 +0000 UTC m=+1396.156349878" watchObservedRunningTime="2026-03-20 15:48:16.485782335 +0000 UTC m=+1396.179479165" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.486543 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-9c7459748-mvczs" event={"ID":"e30af56a-3534-429c-bbe2-3014515d530f","Type":"ContainerStarted","Data":"5f7e744438b945fa97f98e8186a5f3781d739c036e2ecd701176f3d695d09999"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.492781 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/horizon-5fddf585b5-c58zk" podStartSLOduration=6.587571195 podStartE2EDuration="40.492723931s" podCreationTimestamp="2026-03-20 15:47:36 +0000 UTC" firstStartedPulling="2026-03-20 15:47:40.393874962 +0000 UTC m=+1360.087571792" lastFinishedPulling="2026-03-20 15:48:14.299027658 +0000 UTC m=+1393.992724528" observedRunningTime="2026-03-20 15:48:16.48782313 +0000 UTC m=+1396.181519960" watchObservedRunningTime="2026-03-20 15:48:16.492723931 +0000 UTC m=+1396.186420761" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.501108 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0d151a0e-e371-44af-a237-0d70a5876ace","Type":"ContainerStarted","Data":"cc7c42c207d50a83c05b50f9504c0a6185c6727daee4cbdd5c6f2c5e5f6c1ef7"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.547724 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5cf7d98dbf-jv84z" event={"ID":"451d55ac-e9ef-427a-aba6-b85a62a08e59","Type":"ContainerStarted","Data":"cf8852c52014a54cbdb5e746421b4f3503ddcbe1740912196f825aea876b1e3b"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.547910 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-5cf7d98dbf-jv84z" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon-log" containerID="cri-o://6c1e1e01ba6adee7fbd18035f095d777ff3a61806c29e3571d2b3d0b51f1da12" gracePeriod=30 Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.548398 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-5cf7d98dbf-jv84z" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon" containerID="cri-o://cf8852c52014a54cbdb5e746421b4f3503ddcbe1740912196f825aea876b1e3b" gracePeriod=30 Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.554131 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/horizon-9c7459748-mvczs" podStartSLOduration=3.906707246 podStartE2EDuration="31.55408152s" podCreationTimestamp="2026-03-20 15:47:45 +0000 UTC" firstStartedPulling="2026-03-20 15:47:46.651691505 +0000 UTC m=+1366.345388335" lastFinishedPulling="2026-03-20 15:48:14.299065779 +0000 UTC m=+1393.992762609" observedRunningTime="2026-03-20 15:48:16.546139128 +0000 UTC m=+1396.239835968" watchObservedRunningTime="2026-03-20 15:48:16.55408152 +0000 UTC m=+1396.247778350" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.557314 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/cinder-db-sync-dncfs" podStartSLOduration=4.980579231 podStartE2EDuration="41.557269785s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="2026-03-20 15:47:38.084734043 +0000 UTC m=+1357.778430873" lastFinishedPulling="2026-03-20 15:48:14.661424587 +0000 UTC m=+1394.355121427" observedRunningTime="2026-03-20 15:48:16.516725962 +0000 UTC m=+1396.210422802" watchObservedRunningTime="2026-03-20 15:48:16.557269785 +0000 UTC m=+1396.250966615" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.569466 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f9cbcd486-gj8tz" event={"ID":"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a","Type":"ContainerStarted","Data":"3f4231a6bdea50fa6b04a73dff18ca3f8de0f2b3d05f0819f7ee3850852822ca"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.597756 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/horizon-5cf7d98dbf-jv84z" podStartSLOduration=4.704278003 podStartE2EDuration="38.597714915s" podCreationTimestamp="2026-03-20 15:47:38 +0000 UTC" firstStartedPulling="2026-03-20 15:47:40.420494064 +0000 UTC m=+1360.114190894" lastFinishedPulling="2026-03-20 15:48:14.313930936 +0000 UTC m=+1394.007627806" observedRunningTime="2026-03-20 15:48:16.580046003 +0000 UTC m=+1396.273742843" watchObservedRunningTime="2026-03-20 15:48:16.597714915 +0000 UTC m=+1396.291411745" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.604953 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l664w" event={"ID":"9dfce561-7659-4b99-8f83-02573b343f5e","Type":"ContainerStarted","Data":"593e5c4d98df339765f31ca5e81de748b1d378067e8cb0190f51b4c70ac56db9"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.606239 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/horizon-6f9cbcd486-gj8tz" podStartSLOduration=6.068854416 podStartE2EDuration="32.606202142s" podCreationTimestamp="2026-03-20 15:47:44 +0000 UTC" firstStartedPulling="2026-03-20 15:47:46.462901853 +0000 UTC m=+1366.156598683" lastFinishedPulling="2026-03-20 15:48:13.000249579 +0000 UTC m=+1392.693946409" observedRunningTime="2026-03-20 15:48:16.598987299 +0000 UTC m=+1396.292684149" watchObservedRunningTime="2026-03-20 15:48:16.606202142 +0000 UTC m=+1396.299898972" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.609972 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"c27910f4-0275-4126-8012-70aa6554456a","Type":"ContainerStarted","Data":"3c3e1dbca9ff8a200091d2a0481f5a0fc15570ab3e040a7a355f4b07cac723e7"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.610009 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"c27910f4-0275-4126-8012-70aa6554456a","Type":"ContainerStarted","Data":"a2871cde31ca12f188300e8a3162877e9b50fbe36ebf445ff3386e97e2585f99"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.611500 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerStarted","Data":"48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728"} Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.637373 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/keystone-bootstrap-l664w" podStartSLOduration=18.637330693 podStartE2EDuration="18.637330693s" podCreationTimestamp="2026-03-20 15:47:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:16.626848943 +0000 UTC m=+1396.320545803" watchObservedRunningTime="2026-03-20 15:48:16.637330693 +0000 UTC m=+1396.331027523" Mar 20 15:48:16 crc kubenswrapper[3552]: I0320 15:48:16.653718 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="4f6f7ad8-3778-4a4d-bee7-eeeb14b9b48a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.151:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.358692 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.659492 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-9c7459748-mvczs" event={"ID":"e30af56a-3534-429c-bbe2-3014515d530f","Type":"ContainerStarted","Data":"ff7c18cc6227bae876863a3cc826975ee115003cf68734f69b0023840395a95d"} Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.662439 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a4b8a27-5d4a-44d2-8553-124cd38ec665","Type":"ContainerStarted","Data":"9b723fd8ce90c7dfda83c7ec544843f705baf74280b9dc2949273e67b902b3de"} Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.663873 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f9cbcd486-gj8tz" event={"ID":"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a","Type":"ContainerStarted","Data":"75c1164319928057684acf21c1fff47d01d37d6c03dc88c0b8893dd430f2ca09"} Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.665533 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"c27910f4-0275-4126-8012-70aa6554456a","Type":"ContainerStarted","Data":"67925bdd26d95e0250c3a71f1dd748e9a5e63befcf3c57f21eb83d2f95eb29b8"} Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.667333 3552 generic.go:334] "Generic (PLEG): container finished" podID="55759243-c923-4bbc-8693-f0c35e30f6a1" containerID="c75f2060be9c78c13e97a92cd5fce4452ab57bb77e0480c2be041bbe17f45e04" exitCode=0 Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.667486 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mrt5x" event={"ID":"55759243-c923-4bbc-8693-f0c35e30f6a1","Type":"ContainerDied","Data":"c75f2060be9c78c13e97a92cd5fce4452ab57bb77e0480c2be041bbe17f45e04"} Mar 20 15:48:17 crc kubenswrapper[3552]: I0320 15:48:17.930499 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Mar 20 15:48:18 crc kubenswrapper[3552]: I0320 15:48:18.677586 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0d151a0e-e371-44af-a237-0d70a5876ace","Type":"ContainerStarted","Data":"8644ecfa8b1ecf6234deef254f61f78ad7a80a11d4e2ae93140b9aa6c981f7bc"} Mar 20 15:48:18 crc kubenswrapper[3552]: I0320 15:48:18.713213 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:48:18 crc kubenswrapper[3552]: I0320 15:48:18.719618 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=5.719574938 podStartE2EDuration="5.719574938s" podCreationTimestamp="2026-03-20 15:48:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:18.712920871 +0000 UTC m=+1398.406617711" watchObservedRunningTime="2026-03-20 15:48:18.719574938 +0000 UTC m=+1398.413271768" Mar 20 15:48:18 crc kubenswrapper[3552]: I0320 15:48:18.911529 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Mar 20 15:48:18 crc kubenswrapper[3552]: I0320 15:48:18.911876 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.513699 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.551215 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-combined-ca-bundle\") pod \"55759243-c923-4bbc-8693-f0c35e30f6a1\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.551302 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ndk7\" (UniqueName: \"kubernetes.io/projected/55759243-c923-4bbc-8693-f0c35e30f6a1-kube-api-access-8ndk7\") pod \"55759243-c923-4bbc-8693-f0c35e30f6a1\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.551513 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-config\") pod \"55759243-c923-4bbc-8693-f0c35e30f6a1\" (UID: \"55759243-c923-4bbc-8693-f0c35e30f6a1\") " Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.588613 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55759243-c923-4bbc-8693-f0c35e30f6a1-kube-api-access-8ndk7" (OuterVolumeSpecName: "kube-api-access-8ndk7") pod "55759243-c923-4bbc-8693-f0c35e30f6a1" (UID: "55759243-c923-4bbc-8693-f0c35e30f6a1"). InnerVolumeSpecName "kube-api-access-8ndk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.619198 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-config" (OuterVolumeSpecName: "config") pod "55759243-c923-4bbc-8693-f0c35e30f6a1" (UID: "55759243-c923-4bbc-8693-f0c35e30f6a1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.625778 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55759243-c923-4bbc-8693-f0c35e30f6a1" (UID: "55759243-c923-4bbc-8693-f0c35e30f6a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.654368 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.654418 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55759243-c923-4bbc-8693-f0c35e30f6a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.654430 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-8ndk7\" (UniqueName: \"kubernetes.io/projected/55759243-c923-4bbc-8693-f0c35e30f6a1-kube-api-access-8ndk7\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.692991 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a4b8a27-5d4a-44d2-8553-124cd38ec665","Type":"ContainerStarted","Data":"5b287d58f9489ee09695211f3aaaa8e7ef67ef45fb8f2cc4dbdfeb21aa6999ad"} Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.703818 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mrt5x" event={"ID":"55759243-c923-4bbc-8693-f0c35e30f6a1","Type":"ContainerDied","Data":"24c13fcc82f178507fc7b22332e947c94de94340b054b67b1f0b31a5ab28d08c"} Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.704108 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24c13fcc82f178507fc7b22332e947c94de94340b054b67b1f0b31a5ab28d08c" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.704071 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mrt5x" Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.723696 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b208453-e2bc-4dc0-b407-c652e393c13d" containerID="a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43" exitCode=137 Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.723778 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"7b208453-e2bc-4dc0-b407-c652e393c13d","Type":"ContainerDied","Data":"a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43"} Mar 20 15:48:19 crc kubenswrapper[3552]: I0320 15:48:19.729891 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.729853442 podStartE2EDuration="17.729853442s" podCreationTimestamp="2026-03-20 15:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:19.720816761 +0000 UTC m=+1399.414513601" watchObservedRunningTime="2026-03-20 15:48:19.729853442 +0000 UTC m=+1399.423550262" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.054599 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-599c89c5c9-zrtzk"] Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.054884 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerName="dnsmasq-dns" containerID="cri-o://a7c5145287d9b656b6d0652cb00ce20aaa500bad96c83e2bba16c0fb72aa43a8" gracePeriod=10 Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.071927 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.131570 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fb5d4fb47-48ntc"] Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.132011 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" podNamespace="openstack" podName="dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: E0320 15:48:20.132273 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="55759243-c923-4bbc-8693-f0c35e30f6a1" containerName="neutron-db-sync" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.132287 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="55759243-c923-4bbc-8693-f0c35e30f6a1" containerName="neutron-db-sync" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.132476 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="55759243-c923-4bbc-8693-f0c35e30f6a1" containerName="neutron-db-sync" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.133540 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.156721 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fb5d4fb47-48ntc"] Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.258446 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-66bd8f794-2dbwx"] Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.258621 3552 topology_manager.go:215] "Topology Admit Handler" podUID="14da306a-fa6c-460e-af02-1180237b4366" podNamespace="openstack" podName="neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.260569 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.272389 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-66bd8f794-2dbwx"] Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.302863 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.303094 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mpcl4" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.303205 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.303314 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307357 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-config\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307424 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-swift-storage-0\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307451 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-svc\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307473 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgrfh\" (UniqueName: \"kubernetes.io/projected/8a910859-d35b-4395-a991-3d073b07f9e2-kube-api-access-dgrfh\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307502 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-combined-ca-bundle\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307526 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-config\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307562 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-sb\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307589 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-nb\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307611 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9d7h\" (UniqueName: \"kubernetes.io/projected/14da306a-fa6c-460e-af02-1180237b4366-kube-api-access-f9d7h\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307637 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-httpd-config\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.307662 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-ovndb-tls-certs\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.312543 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.429695 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b208453-e2bc-4dc0-b407-c652e393c13d-logs\") pod \"7b208453-e2bc-4dc0-b407-c652e393c13d\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.429850 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-combined-ca-bundle\") pod \"7b208453-e2bc-4dc0-b407-c652e393c13d\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.429925 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrn92\" (UniqueName: \"kubernetes.io/projected/7b208453-e2bc-4dc0-b407-c652e393c13d-kube-api-access-jrn92\") pod \"7b208453-e2bc-4dc0-b407-c652e393c13d\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.429960 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-config-data\") pod \"7b208453-e2bc-4dc0-b407-c652e393c13d\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.429986 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-custom-prometheus-ca\") pod \"7b208453-e2bc-4dc0-b407-c652e393c13d\" (UID: \"7b208453-e2bc-4dc0-b407-c652e393c13d\") " Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430131 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-config\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430163 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-swift-storage-0\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430198 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-svc\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430223 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dgrfh\" (UniqueName: \"kubernetes.io/projected/8a910859-d35b-4395-a991-3d073b07f9e2-kube-api-access-dgrfh\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430255 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-combined-ca-bundle\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430277 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-config\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430304 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-sb\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430331 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-nb\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430353 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f9d7h\" (UniqueName: \"kubernetes.io/projected/14da306a-fa6c-460e-af02-1180237b4366-kube-api-access-f9d7h\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430386 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-httpd-config\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.430429 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-ovndb-tls-certs\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.432045 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-svc\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.467746 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-sb\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.469787 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b208453-e2bc-4dc0-b407-c652e393c13d-logs" (OuterVolumeSpecName: "logs") pod "7b208453-e2bc-4dc0-b407-c652e393c13d" (UID: "7b208453-e2bc-4dc0-b407-c652e393c13d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.507676 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b208453-e2bc-4dc0-b407-c652e393c13d-kube-api-access-jrn92" (OuterVolumeSpecName: "kube-api-access-jrn92") pod "7b208453-e2bc-4dc0-b407-c652e393c13d" (UID: "7b208453-e2bc-4dc0-b407-c652e393c13d"). InnerVolumeSpecName "kube-api-access-jrn92". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.520533 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-ovndb-tls-certs\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.521144 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-nb\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.521262 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-config\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.523118 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-combined-ca-bundle\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.533716 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-httpd-config\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.544945 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-config\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.545734 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-swift-storage-0\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.549256 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b208453-e2bc-4dc0-b407-c652e393c13d-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.549289 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-jrn92\" (UniqueName: \"kubernetes.io/projected/7b208453-e2bc-4dc0-b407-c652e393c13d-kube-api-access-jrn92\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.549515 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgrfh\" (UniqueName: \"kubernetes.io/projected/8a910859-d35b-4395-a991-3d073b07f9e2-kube-api-access-dgrfh\") pod \"dnsmasq-dns-7fb5d4fb47-48ntc\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.559125 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9d7h\" (UniqueName: \"kubernetes.io/projected/14da306a-fa6c-460e-af02-1180237b4366-kube-api-access-f9d7h\") pod \"neutron-66bd8f794-2dbwx\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.589523 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "7b208453-e2bc-4dc0-b407-c652e393c13d" (UID: "7b208453-e2bc-4dc0-b407-c652e393c13d"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.655668 3552 reconciler_common.go:300] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.658248 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.690223 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b208453-e2bc-4dc0-b407-c652e393c13d" (UID: "7b208453-e2bc-4dc0-b407-c652e393c13d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.752749 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-config-data" (OuterVolumeSpecName: "config-data") pod "7b208453-e2bc-4dc0-b407-c652e393c13d" (UID: "7b208453-e2bc-4dc0-b407-c652e393c13d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.758279 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.758318 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b208453-e2bc-4dc0-b407-c652e393c13d-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.771133 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.772623 3552 generic.go:334] "Generic (PLEG): container finished" podID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerID="a7c5145287d9b656b6d0652cb00ce20aaa500bad96c83e2bba16c0fb72aa43a8" exitCode=0 Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.772693 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" event={"ID":"533f846d-89ff-4b46-84b6-3cbc4f66a5f2","Type":"ContainerDied","Data":"a7c5145287d9b656b6d0652cb00ce20aaa500bad96c83e2bba16c0fb72aa43a8"} Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.805528 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.805764 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"7b208453-e2bc-4dc0-b407-c652e393c13d","Type":"ContainerDied","Data":"083fc75dd19dcbeb01cd87010135c6bc9231f85f8ca52d06d8fd26e75cc1958f"} Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.805795 3552 scope.go:117] "RemoveContainer" containerID="a973e99a2043feda5223f3763362b9e0bca2e905cb9243aea7287391aced8e43" Mar 20 15:48:20 crc kubenswrapper[3552]: I0320 15:48:20.820230 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:20.977428 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:20.981875 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.012511 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.024803 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.024997 3552 topology_manager.go:215] "Topology Admit Handler" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" podNamespace="openstack" podName="watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: E0320 15:48:21.025344 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerName="init" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.025359 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerName="init" Mar 20 15:48:21 crc kubenswrapper[3552]: E0320 15:48:21.025382 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" containerName="watcher-decision-engine" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.025391 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" containerName="watcher-decision-engine" Mar 20 15:48:21 crc kubenswrapper[3552]: E0320 15:48:21.025449 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerName="dnsmasq-dns" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.025459 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerName="dnsmasq-dns" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.025662 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" containerName="dnsmasq-dns" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.025692 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" containerName="watcher-decision-engine" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.026434 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.029952 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.049471 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.064352 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fz8k8\" (UniqueName: \"kubernetes.io/projected/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-kube-api-access-fz8k8\") pod \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.064433 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-nb\") pod \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.064491 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-swift-storage-0\") pod \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.064609 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-sb\") pod \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.064635 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-config\") pod \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.064828 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-svc\") pod \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\" (UID: \"533f846d-89ff-4b46-84b6-3cbc4f66a5f2\") " Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.095946 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-kube-api-access-fz8k8" (OuterVolumeSpecName: "kube-api-access-fz8k8") pod "533f846d-89ff-4b46-84b6-3cbc4f66a5f2" (UID: "533f846d-89ff-4b46-84b6-3cbc4f66a5f2"). InnerVolumeSpecName "kube-api-access-fz8k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.156197 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-config" (OuterVolumeSpecName: "config") pod "533f846d-89ff-4b46-84b6-3cbc4f66a5f2" (UID: "533f846d-89ff-4b46-84b6-3cbc4f66a5f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169678 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfhxb\" (UniqueName: \"kubernetes.io/projected/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-kube-api-access-cfhxb\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169726 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-logs\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169777 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169825 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-config-data\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169854 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169900 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.169914 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-fz8k8\" (UniqueName: \"kubernetes.io/projected/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-kube-api-access-fz8k8\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.233045 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "533f846d-89ff-4b46-84b6-3cbc4f66a5f2" (UID: "533f846d-89ff-4b46-84b6-3cbc4f66a5f2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.271544 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-cfhxb\" (UniqueName: \"kubernetes.io/projected/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-kube-api-access-cfhxb\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.271821 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-logs\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.271970 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.272087 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-config-data\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.272179 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.272293 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.275625 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-logs\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.283004 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "533f846d-89ff-4b46-84b6-3cbc4f66a5f2" (UID: "533f846d-89ff-4b46-84b6-3cbc4f66a5f2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.285093 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "533f846d-89ff-4b46-84b6-3cbc4f66a5f2" (UID: "533f846d-89ff-4b46-84b6-3cbc4f66a5f2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.292585 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.300442 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "533f846d-89ff-4b46-84b6-3cbc4f66a5f2" (UID: "533f846d-89ff-4b46-84b6-3cbc4f66a5f2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.302150 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.303881 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-config-data\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.303924 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfhxb\" (UniqueName: \"kubernetes.io/projected/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-kube-api-access-cfhxb\") pod \"watcher-decision-engine-0\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.374146 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.374180 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.374192 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/533f846d-89ff-4b46-84b6-3cbc4f66a5f2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.375820 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.443119 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b208453-e2bc-4dc0-b407-c652e393c13d" path="/var/lib/kubelet/pods/7b208453-e2bc-4dc0-b407-c652e393c13d/volumes" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.504178 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fb5d4fb47-48ntc"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.766234 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-66bd8f794-2dbwx"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.813082 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" event={"ID":"533f846d-89ff-4b46-84b6-3cbc4f66a5f2","Type":"ContainerDied","Data":"c680ab7234152d79f785f61d9906572f8a626ffb7544112a264f3ebc7b423100"} Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.813103 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-599c89c5c9-zrtzk" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.813128 3552 scope.go:117] "RemoveContainer" containerID="a7c5145287d9b656b6d0652cb00ce20aaa500bad96c83e2bba16c0fb72aa43a8" Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.852902 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-599c89c5c9-zrtzk"] Mar 20 15:48:21 crc kubenswrapper[3552]: I0320 15:48:21.864822 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-599c89c5c9-zrtzk"] Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.470135 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-bb7976659-5s6lm"] Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.470319 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" podNamespace="openstack" podName="neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.471658 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.473648 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.476835 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.485252 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-bb7976659-5s6lm"] Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605579 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvkt7\" (UniqueName: \"kubernetes.io/projected/4b220040-ff08-4478-8b58-bc3ccc670c86-kube-api-access-xvkt7\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605651 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-httpd-config\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605708 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-ovndb-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605743 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-combined-ca-bundle\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605836 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-config\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605864 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-internal-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.605904 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-public-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707065 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xvkt7\" (UniqueName: \"kubernetes.io/projected/4b220040-ff08-4478-8b58-bc3ccc670c86-kube-api-access-xvkt7\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707136 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-httpd-config\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707201 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-ovndb-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707237 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-combined-ca-bundle\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707329 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-config\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707355 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-internal-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.707417 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-public-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.712438 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-public-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.715423 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-ovndb-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.716139 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-combined-ca-bundle\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.717668 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-config\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.757329 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-internal-tls-certs\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.757354 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-httpd-config\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.760582 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvkt7\" (UniqueName: \"kubernetes.io/projected/4b220040-ff08-4478-8b58-bc3ccc670c86-kube-api-access-xvkt7\") pod \"neutron-bb7976659-5s6lm\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.791767 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.930638 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.973682 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.977101 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Mar 20 15:48:22 crc kubenswrapper[3552]: I0320 15:48:22.985055 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.078451 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.104364 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.163066 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.503554 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="533f846d-89ff-4b46-84b6-3cbc4f66a5f2" path="/var/lib/kubelet/pods/533f846d-89ff-4b46-84b6-3cbc4f66a5f2/volumes" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.828336 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.828377 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.884437 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.912500 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Mar 20 15:48:23 crc kubenswrapper[3552]: I0320 15:48:23.924060 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Mar 20 15:48:24 crc kubenswrapper[3552]: I0320 15:48:24.843778 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Mar 20 15:48:25 crc kubenswrapper[3552]: W0320 15:48:25.284541 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a910859_d35b_4395_a991_3d073b07f9e2.slice/crio-17ebec0f37f88894edad786985ff181a2016327aae9cb48ecd218b6d1aea2d3a WatchSource:0}: Error finding container 17ebec0f37f88894edad786985ff181a2016327aae9cb48ecd218b6d1aea2d3a: Status 404 returned error can't find the container with id 17ebec0f37f88894edad786985ff181a2016327aae9cb48ecd218b6d1aea2d3a Mar 20 15:48:25 crc kubenswrapper[3552]: W0320 15:48:25.285143 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14da306a_fa6c_460e_af02_1180237b4366.slice/crio-73650634b93f463b262573cce5e93045172c5c898c98d827b34e5e85afb10b93 WatchSource:0}: Error finding container 73650634b93f463b262573cce5e93045172c5c898c98d827b34e5e85afb10b93: Status 404 returned error can't find the container with id 73650634b93f463b262573cce5e93045172c5c898c98d827b34e5e85afb10b93 Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.304760 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.306421 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.313573 3552 scope.go:117] "RemoveContainer" containerID="db23c3b8ab92119d3b76ee65c27015ae2d67de92ce71fa38549c0d9bd6cdc961" Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.462548 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.464249 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.861111 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" event={"ID":"8a910859-d35b-4395-a991-3d073b07f9e2","Type":"ContainerStarted","Data":"17ebec0f37f88894edad786985ff181a2016327aae9cb48ecd218b6d1aea2d3a"} Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.886486 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerStarted","Data":"2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791"} Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.915560 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0d151a0e-e371-44af-a237-0d70a5876ace","Type":"ContainerStarted","Data":"92375a2be83b6898fc8603064f22745660193225a002ac5103aa08281c6f908f"} Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.920806 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66bd8f794-2dbwx" event={"ID":"14da306a-fa6c-460e-af02-1180237b4366","Type":"ContainerStarted","Data":"73650634b93f463b262573cce5e93045172c5c898c98d827b34e5e85afb10b93"} Mar 20 15:48:25 crc kubenswrapper[3552]: I0320 15:48:25.958713 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=23.95867711 podStartE2EDuration="23.95867711s" podCreationTimestamp="2026-03-20 15:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:25.956875952 +0000 UTC m=+1405.650572792" watchObservedRunningTime="2026-03-20 15:48:25.95867711 +0000 UTC m=+1405.652373940" Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.159323 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.213288 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.335427 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-bb7976659-5s6lm"] Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.936763 3552 generic.go:334] "Generic (PLEG): container finished" podID="8a910859-d35b-4395-a991-3d073b07f9e2" containerID="c73775c4577ae22dc883186ebe48adc86671ffcc43571f66d3724b668f1584dc" exitCode=0 Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.936840 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" event={"ID":"8a910859-d35b-4395-a991-3d073b07f9e2","Type":"ContainerDied","Data":"c73775c4577ae22dc883186ebe48adc86671ffcc43571f66d3724b668f1584dc"} Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.939720 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66bd8f794-2dbwx" event={"ID":"14da306a-fa6c-460e-af02-1180237b4366","Type":"ContainerStarted","Data":"87ed38b7065d811248c493011799a72fd06e1ec3280d05d12fd19718f0afe37d"} Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.941216 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bb7976659-5s6lm" event={"ID":"4b220040-ff08-4478-8b58-bc3ccc670c86","Type":"ContainerStarted","Data":"811eff1f82a058833fc017ec49f079e854f8464ab3a9fce611fb2587e0824642"} Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.944356 3552 generic.go:334] "Generic (PLEG): container finished" podID="9dfce561-7659-4b99-8f83-02573b343f5e" containerID="593e5c4d98df339765f31ca5e81de748b1d378067e8cb0190f51b4c70ac56db9" exitCode=0 Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.944449 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l664w" event={"ID":"9dfce561-7659-4b99-8f83-02573b343f5e","Type":"ContainerDied","Data":"593e5c4d98df339765f31ca5e81de748b1d378067e8cb0190f51b4c70ac56db9"} Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.947967 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1","Type":"ContainerStarted","Data":"d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36"} Mar 20 15:48:26 crc kubenswrapper[3552]: I0320 15:48:26.948006 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1","Type":"ContainerStarted","Data":"d1b0126f16122e2e91fed6fbd4282f3807557eaad107a59ffab1bd18b451ce86"} Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.005051 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66bd8f794-2dbwx" event={"ID":"14da306a-fa6c-460e-af02-1180237b4366","Type":"ContainerStarted","Data":"b756336336337216255047984af6d67759643f9b8364310287b066f2d973badc"} Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.007261 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.022153 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bb7976659-5s6lm" event={"ID":"4b220040-ff08-4478-8b58-bc3ccc670c86","Type":"ContainerStarted","Data":"f914ef14fce3809ef7373ba9424927ac2926dc300a9140af5c324c48646876a2"} Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.044480 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" event={"ID":"8a910859-d35b-4395-a991-3d073b07f9e2","Type":"ContainerStarted","Data":"b826c65c836ae95217d65ecde58da66d567aaa03ff58b9fb38948adbbc1a6f3d"} Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.045181 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-66bd8f794-2dbwx" podStartSLOduration=8.045133859 podStartE2EDuration="8.045133859s" podCreationTimestamp="2026-03-20 15:48:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:28.043284839 +0000 UTC m=+1407.736981679" watchObservedRunningTime="2026-03-20 15:48:28.045133859 +0000 UTC m=+1407.738830699" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.083288 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=8.080850783 podStartE2EDuration="8.080850783s" podCreationTimestamp="2026-03-20 15:48:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:28.063567681 +0000 UTC m=+1407.757264521" watchObservedRunningTime="2026-03-20 15:48:28.080850783 +0000 UTC m=+1407.774547613" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.112044 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" podStartSLOduration=8.111984554 podStartE2EDuration="8.111984554s" podCreationTimestamp="2026-03-20 15:48:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:28.097736654 +0000 UTC m=+1407.791433494" watchObservedRunningTime="2026-03-20 15:48:28.111984554 +0000 UTC m=+1407.805681384" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.830692 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l664w" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.884417 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm5zm\" (UniqueName: \"kubernetes.io/projected/9dfce561-7659-4b99-8f83-02573b343f5e-kube-api-access-xm5zm\") pod \"9dfce561-7659-4b99-8f83-02573b343f5e\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.884502 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-combined-ca-bundle\") pod \"9dfce561-7659-4b99-8f83-02573b343f5e\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.884572 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-scripts\") pod \"9dfce561-7659-4b99-8f83-02573b343f5e\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.884631 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-fernet-keys\") pod \"9dfce561-7659-4b99-8f83-02573b343f5e\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.884736 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-config-data\") pod \"9dfce561-7659-4b99-8f83-02573b343f5e\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.884765 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-credential-keys\") pod \"9dfce561-7659-4b99-8f83-02573b343f5e\" (UID: \"9dfce561-7659-4b99-8f83-02573b343f5e\") " Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.895751 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "9dfce561-7659-4b99-8f83-02573b343f5e" (UID: "9dfce561-7659-4b99-8f83-02573b343f5e"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.912526 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-scripts" (OuterVolumeSpecName: "scripts") pod "9dfce561-7659-4b99-8f83-02573b343f5e" (UID: "9dfce561-7659-4b99-8f83-02573b343f5e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.914531 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "9dfce561-7659-4b99-8f83-02573b343f5e" (UID: "9dfce561-7659-4b99-8f83-02573b343f5e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.958573 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dfce561-7659-4b99-8f83-02573b343f5e-kube-api-access-xm5zm" (OuterVolumeSpecName: "kube-api-access-xm5zm") pod "9dfce561-7659-4b99-8f83-02573b343f5e" (UID: "9dfce561-7659-4b99-8f83-02573b343f5e"). InnerVolumeSpecName "kube-api-access-xm5zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.970320 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.970553 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api-log" containerID="cri-o://3c3e1dbca9ff8a200091d2a0481f5a0fc15570ab3e040a7a355f4b07cac723e7" gracePeriod=30 Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.971287 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api" containerID="cri-o://67925bdd26d95e0250c3a71f1dd748e9a5e63befcf3c57f21eb83d2f95eb29b8" gracePeriod=30 Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.994418 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.994461 3552 reconciler_common.go:300] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-fernet-keys\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.994471 3552 reconciler_common.go:300] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-credential-keys\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:28 crc kubenswrapper[3552]: I0320 15:48:28.994482 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-xm5zm\" (UniqueName: \"kubernetes.io/projected/9dfce561-7659-4b99-8f83-02573b343f5e-kube-api-access-xm5zm\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.011932 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-config-data" (OuterVolumeSpecName: "config-data") pod "9dfce561-7659-4b99-8f83-02573b343f5e" (UID: "9dfce561-7659-4b99-8f83-02573b343f5e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.047529 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9dfce561-7659-4b99-8f83-02573b343f5e" (UID: "9dfce561-7659-4b99-8f83-02573b343f5e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.092507 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l664w" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.092653 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l664w" event={"ID":"9dfce561-7659-4b99-8f83-02573b343f5e","Type":"ContainerDied","Data":"64a66a5afdac3bbf655a82db907b6e04c7af0cced098e10c2aaa46e079adc7fe"} Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.092696 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64a66a5afdac3bbf655a82db907b6e04c7af0cced098e10c2aaa46e079adc7fe" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.095258 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.100521 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.100808 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dfce561-7659-4b99-8f83-02573b343f5e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.548058 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-8487bc87b9-4thvb"] Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.565537 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7be78447-08e2-4e07-8b7b-f5faa5e093eb" podNamespace="openstack" podName="keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: E0320 15:48:29.565888 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="9dfce561-7659-4b99-8f83-02573b343f5e" containerName="keystone-bootstrap" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.565900 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dfce561-7659-4b99-8f83-02573b343f5e" containerName="keystone-bootstrap" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.566101 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dfce561-7659-4b99-8f83-02573b343f5e" containerName="keystone-bootstrap" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.566928 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.573510 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.573605 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.573656 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.573726 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.573883 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.574451 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tt8n9" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.578432 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8487bc87b9-4thvb"] Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.683698 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-fernet-keys\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684032 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-config-data\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684056 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-scripts\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684123 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-credential-keys\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684153 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp5vt\" (UniqueName: \"kubernetes.io/projected/7be78447-08e2-4e07-8b7b-f5faa5e093eb-kube-api-access-bp5vt\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684178 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-public-tls-certs\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684209 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-combined-ca-bundle\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.684265 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-internal-tls-certs\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785416 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-internal-tls-certs\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785481 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-fernet-keys\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785520 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-config-data\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785539 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-scripts\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785589 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-credential-keys\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785617 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bp5vt\" (UniqueName: \"kubernetes.io/projected/7be78447-08e2-4e07-8b7b-f5faa5e093eb-kube-api-access-bp5vt\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785637 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-public-tls-certs\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.785664 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-combined-ca-bundle\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.793641 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-fernet-keys\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.793908 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-internal-tls-certs\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.794102 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-scripts\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.795028 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-combined-ca-bundle\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.795894 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-public-tls-certs\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.796800 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-credential-keys\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.798684 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7be78447-08e2-4e07-8b7b-f5faa5e093eb-config-data\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:29 crc kubenswrapper[3552]: I0320 15:48:29.812118 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp5vt\" (UniqueName: \"kubernetes.io/projected/7be78447-08e2-4e07-8b7b-f5faa5e093eb-kube-api-access-bp5vt\") pod \"keystone-8487bc87b9-4thvb\" (UID: \"7be78447-08e2-4e07-8b7b-f5faa5e093eb\") " pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:30 crc kubenswrapper[3552]: I0320 15:48:30.025751 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:30 crc kubenswrapper[3552]: I0320 15:48:30.138320 3552 generic.go:334] "Generic (PLEG): container finished" podID="c27910f4-0275-4126-8012-70aa6554456a" containerID="3c3e1dbca9ff8a200091d2a0481f5a0fc15570ab3e040a7a355f4b07cac723e7" exitCode=143 Mar 20 15:48:30 crc kubenswrapper[3552]: I0320 15:48:30.138482 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"c27910f4-0275-4126-8012-70aa6554456a","Type":"ContainerDied","Data":"3c3e1dbca9ff8a200091d2a0481f5a0fc15570ab3e040a7a355f4b07cac723e7"} Mar 20 15:48:30 crc kubenswrapper[3552]: I0320 15:48:30.156580 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bb7976659-5s6lm" event={"ID":"4b220040-ff08-4478-8b58-bc3ccc670c86","Type":"ContainerStarted","Data":"2d0120992c2347bcb66495ba1126b75dcaeaf0922d715ab79159442ca390bcab"} Mar 20 15:48:30 crc kubenswrapper[3552]: I0320 15:48:30.350599 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8487bc87b9-4thvb"] Mar 20 15:48:31 crc kubenswrapper[3552]: I0320 15:48:31.163990 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8487bc87b9-4thvb" event={"ID":"7be78447-08e2-4e07-8b7b-f5faa5e093eb","Type":"ContainerStarted","Data":"d0186074c16423b86a16e0f52ea0d0433eb1bbe023d04f753008fdf05e6202da"} Mar 20 15:48:31 crc kubenswrapper[3552]: I0320 15:48:31.186638 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-bb7976659-5s6lm" podStartSLOduration=9.186595465 podStartE2EDuration="9.186595465s" podCreationTimestamp="2026-03-20 15:48:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:31.183591995 +0000 UTC m=+1410.877288825" watchObservedRunningTime="2026-03-20 15:48:31.186595465 +0000 UTC m=+1410.880292295" Mar 20 15:48:31 crc kubenswrapper[3552]: I0320 15:48:31.377150 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:31 crc kubenswrapper[3552]: I0320 15:48:31.617550 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:32 crc kubenswrapper[3552]: I0320 15:48:32.190024 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8487bc87b9-4thvb" event={"ID":"7be78447-08e2-4e07-8b7b-f5faa5e093eb","Type":"ContainerStarted","Data":"3896902b7f6f5a98eafcc20088ebee9b52a79699a93bd79cb34a04ac37043ac3"} Mar 20 15:48:32 crc kubenswrapper[3552]: I0320 15:48:32.190064 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:32 crc kubenswrapper[3552]: I0320 15:48:32.291954 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.002851 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.002927 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.002942 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.002955 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.070411 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.076902 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.205124 3552 generic.go:334] "Generic (PLEG): container finished" podID="c27910f4-0275-4126-8012-70aa6554456a" containerID="67925bdd26d95e0250c3a71f1dd748e9a5e63befcf3c57f21eb83d2f95eb29b8" exitCode=0 Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.207930 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"c27910f4-0275-4126-8012-70aa6554456a","Type":"ContainerDied","Data":"67925bdd26d95e0250c3a71f1dd748e9a5e63befcf3c57f21eb83d2f95eb29b8"} Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.208081 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.225190 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/keystone-8487bc87b9-4thvb" podStartSLOduration=4.225147783 podStartE2EDuration="4.225147783s" podCreationTimestamp="2026-03-20 15:48:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:33.224779833 +0000 UTC m=+1412.918476683" watchObservedRunningTime="2026-03-20 15:48:33.225147783 +0000 UTC m=+1412.918844613" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.912469 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.166:9322/\": dial tcp 10.217.0.166:9322: connect: connection refused" Mar 20 15:48:33 crc kubenswrapper[3552]: I0320 15:48:33.912541 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.166:9322/\": dial tcp 10.217.0.166:9322: connect: connection refused" Mar 20 15:48:35 crc kubenswrapper[3552]: I0320 15:48:35.306678 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6f9cbcd486-gj8tz" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Mar 20 15:48:35 crc kubenswrapper[3552]: I0320 15:48:35.466567 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-9c7459748-mvczs" podUID="e30af56a-3534-429c-bbe2-3014515d530f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.161:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.161:8443: connect: connection refused" Mar 20 15:48:35 crc kubenswrapper[3552]: I0320 15:48:35.772621 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:48:35 crc kubenswrapper[3552]: I0320 15:48:35.842793 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d58c49d99-lznpf"] Mar 20 15:48:35 crc kubenswrapper[3552]: I0320 15:48:35.843263 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="dnsmasq-dns" containerID="cri-o://c8979f267bfa243f15c7fa737169c6c8237493db03fa21d44455302f3ce8b80e" gracePeriod=10 Mar 20 15:48:36 crc kubenswrapper[3552]: E0320 15:48:36.098251 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f102abe_1535_4233_ba10_e6ce2e4daa29.slice/crio-c8979f267bfa243f15c7fa737169c6c8237493db03fa21d44455302f3ce8b80e.scope\": RecentStats: unable to find data in memory cache]" Mar 20 15:48:36 crc kubenswrapper[3552]: I0320 15:48:36.227946 3552 generic.go:334] "Generic (PLEG): container finished" podID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerID="c8979f267bfa243f15c7fa737169c6c8237493db03fa21d44455302f3ce8b80e" exitCode=0 Mar 20 15:48:36 crc kubenswrapper[3552]: I0320 15:48:36.227986 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" event={"ID":"8f102abe-1535-4233-ba10-e6ce2e4daa29","Type":"ContainerDied","Data":"c8979f267bfa243f15c7fa737169c6c8237493db03fa21d44455302f3ce8b80e"} Mar 20 15:48:38 crc kubenswrapper[3552]: I0320 15:48:38.911780 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.166:9322/\": dial tcp 10.217.0.166:9322: connect: connection refused" Mar 20 15:48:38 crc kubenswrapper[3552]: I0320 15:48:38.912109 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.166:9322/\": dial tcp 10.217.0.166:9322: connect: connection refused" Mar 20 15:48:40 crc kubenswrapper[3552]: I0320 15:48:40.833558 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Mar 20 15:48:41 crc kubenswrapper[3552]: I0320 15:48:41.381130 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:41 crc kubenswrapper[3552]: I0320 15:48:41.381225 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:48:41 crc kubenswrapper[3552]: I0320 15:48:41.383189 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Mar 20 15:48:41 crc kubenswrapper[3552]: I0320 15:48:41.388695 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Mar 20 15:48:41 crc kubenswrapper[3552]: I0320 15:48:41.388793 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:48:41 crc kubenswrapper[3552]: I0320 15:48:41.393128 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.663711 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.778732 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.778797 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.796396 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x65pl\" (UniqueName: \"kubernetes.io/projected/c27910f4-0275-4126-8012-70aa6554456a-kube-api-access-x65pl\") pod \"c27910f4-0275-4126-8012-70aa6554456a\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.796478 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c27910f4-0275-4126-8012-70aa6554456a-logs\") pod \"c27910f4-0275-4126-8012-70aa6554456a\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.796514 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-config-data\") pod \"c27910f4-0275-4126-8012-70aa6554456a\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.796536 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-combined-ca-bundle\") pod \"c27910f4-0275-4126-8012-70aa6554456a\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.797028 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27910f4-0275-4126-8012-70aa6554456a-logs" (OuterVolumeSpecName: "logs") pod "c27910f4-0275-4126-8012-70aa6554456a" (UID: "c27910f4-0275-4126-8012-70aa6554456a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.797304 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-custom-prometheus-ca\") pod \"c27910f4-0275-4126-8012-70aa6554456a\" (UID: \"c27910f4-0275-4126-8012-70aa6554456a\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.797899 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c27910f4-0275-4126-8012-70aa6554456a-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.812619 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c27910f4-0275-4126-8012-70aa6554456a-kube-api-access-x65pl" (OuterVolumeSpecName: "kube-api-access-x65pl") pod "c27910f4-0275-4126-8012-70aa6554456a" (UID: "c27910f4-0275-4126-8012-70aa6554456a"). InnerVolumeSpecName "kube-api-access-x65pl". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.829370 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "c27910f4-0275-4126-8012-70aa6554456a" (UID: "c27910f4-0275-4126-8012-70aa6554456a"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.842171 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.858604 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c27910f4-0275-4126-8012-70aa6554456a" (UID: "c27910f4-0275-4126-8012-70aa6554456a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.886491 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-config-data" (OuterVolumeSpecName: "config-data") pod "c27910f4-0275-4126-8012-70aa6554456a" (UID: "c27910f4-0275-4126-8012-70aa6554456a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.899226 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-dns-svc\") pod \"8f102abe-1535-4233-ba10-e6ce2e4daa29\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.899477 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-sb\") pod \"8f102abe-1535-4233-ba10-e6ce2e4daa29\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.899571 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ktss\" (UniqueName: \"kubernetes.io/projected/8f102abe-1535-4233-ba10-e6ce2e4daa29-kube-api-access-8ktss\") pod \"8f102abe-1535-4233-ba10-e6ce2e4daa29\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.899685 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-nb\") pod \"8f102abe-1535-4233-ba10-e6ce2e4daa29\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.899724 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-config\") pod \"8f102abe-1535-4233-ba10-e6ce2e4daa29\" (UID: \"8f102abe-1535-4233-ba10-e6ce2e4daa29\") " Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.900218 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-x65pl\" (UniqueName: \"kubernetes.io/projected/c27910f4-0275-4126-8012-70aa6554456a-kube-api-access-x65pl\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.900238 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.900255 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.900265 3552 reconciler_common.go:300] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c27910f4-0275-4126-8012-70aa6554456a-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:42 crc kubenswrapper[3552]: I0320 15:48:42.965591 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f102abe-1535-4233-ba10-e6ce2e4daa29-kube-api-access-8ktss" (OuterVolumeSpecName: "kube-api-access-8ktss") pod "8f102abe-1535-4233-ba10-e6ce2e4daa29" (UID: "8f102abe-1535-4233-ba10-e6ce2e4daa29"). InnerVolumeSpecName "kube-api-access-8ktss". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.002184 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-8ktss\" (UniqueName: \"kubernetes.io/projected/8f102abe-1535-4233-ba10-e6ce2e4daa29-kube-api-access-8ktss\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.012889 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8f102abe-1535-4233-ba10-e6ce2e4daa29" (UID: "8f102abe-1535-4233-ba10-e6ce2e4daa29"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.017373 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8f102abe-1535-4233-ba10-e6ce2e4daa29" (UID: "8f102abe-1535-4233-ba10-e6ce2e4daa29"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.019049 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8f102abe-1535-4233-ba10-e6ce2e4daa29" (UID: "8f102abe-1535-4233-ba10-e6ce2e4daa29"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.021157 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-config" (OuterVolumeSpecName: "config") pod "8f102abe-1535-4233-ba10-e6ce2e4daa29" (UID: "8f102abe-1535-4233-ba10-e6ce2e4daa29"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.104875 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.105241 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.105257 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.105271 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f102abe-1535-4233-ba10-e6ce2e4daa29-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.307570 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"c27910f4-0275-4126-8012-70aa6554456a","Type":"ContainerDied","Data":"a2871cde31ca12f188300e8a3162877e9b50fbe36ebf445ff3386e97e2585f99"} Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.307879 3552 scope.go:117] "RemoveContainer" containerID="67925bdd26d95e0250c3a71f1dd748e9a5e63befcf3c57f21eb83d2f95eb29b8" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.308069 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.311023 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" event={"ID":"8f102abe-1535-4233-ba10-e6ce2e4daa29","Type":"ContainerDied","Data":"970f573e43c78ddf2b5f665667cfb9072cb18729d3dba783425406ad02525db2"} Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.311060 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d58c49d99-lznpf" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.368945 3552 scope.go:117] "RemoveContainer" containerID="3c3e1dbca9ff8a200091d2a0481f5a0fc15570ab3e040a7a355f4b07cac723e7" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.415800 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.429089 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.450708 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c27910f4-0275-4126-8012-70aa6554456a" path="/var/lib/kubelet/pods/c27910f4-0275-4126-8012-70aa6554456a/volumes" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.451396 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d58c49d99-lznpf"] Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.469993 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d58c49d99-lznpf"] Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508078 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508265 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5cbde9b3-0a58-4706-bb8c-5a8694ddb51c" podNamespace="openstack" podName="watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: E0320 15:48:43.508582 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="init" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508597 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="init" Mar 20 15:48:43 crc kubenswrapper[3552]: E0320 15:48:43.508610 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api-log" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508618 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api-log" Mar 20 15:48:43 crc kubenswrapper[3552]: E0320 15:48:43.508647 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="dnsmasq-dns" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508653 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="dnsmasq-dns" Mar 20 15:48:43 crc kubenswrapper[3552]: E0320 15:48:43.508667 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508673 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508958 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api-log" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508979 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27910f4-0275-4126-8012-70aa6554456a" containerName="watcher-api" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.508988 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" containerName="dnsmasq-dns" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.512685 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.516119 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.516422 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.518823 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.518965 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.548842 3552 scope.go:117] "RemoveContainer" containerID="c8979f267bfa243f15c7fa737169c6c8237493db03fa21d44455302f3ce8b80e" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618689 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618759 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jqzq\" (UniqueName: \"kubernetes.io/projected/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-kube-api-access-7jqzq\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618798 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618825 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-config-data\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618854 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-public-tls-certs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618887 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.618910 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-logs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.720922 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.721881 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-7jqzq\" (UniqueName: \"kubernetes.io/projected/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-kube-api-access-7jqzq\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.722025 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.722067 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-config-data\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.722115 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-public-tls-certs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.722171 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.722196 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-logs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.746126 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.746291 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-public-tls-certs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.746394 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.746568 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-config-data\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.746676 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-logs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.747821 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.747931 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jqzq\" (UniqueName: \"kubernetes.io/projected/5cbde9b3-0a58-4706-bb8c-5a8694ddb51c-kube-api-access-7jqzq\") pod \"watcher-api-0\" (UID: \"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c\") " pod="openstack/watcher-api-0" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.768147 3552 scope.go:117] "RemoveContainer" containerID="b794f0b5d166ba4f465b9a326003f8e3d1ff9f6c1a8e7d2cfc504c8a39f60274" Mar 20 15:48:43 crc kubenswrapper[3552]: I0320 15:48:43.837385 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Mar 20 15:48:44 crc kubenswrapper[3552]: I0320 15:48:44.238250 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Mar 20 15:48:44 crc kubenswrapper[3552]: W0320 15:48:44.242574 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5cbde9b3_0a58_4706_bb8c_5a8694ddb51c.slice/crio-106a52dc4760e2da2b74e9b34a2959c7ecda7a6bba643a8852339028deab4ded WatchSource:0}: Error finding container 106a52dc4760e2da2b74e9b34a2959c7ecda7a6bba643a8852339028deab4ded: Status 404 returned error can't find the container with id 106a52dc4760e2da2b74e9b34a2959c7ecda7a6bba643a8852339028deab4ded Mar 20 15:48:44 crc kubenswrapper[3552]: I0320 15:48:44.327301 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c","Type":"ContainerStarted","Data":"106a52dc4760e2da2b74e9b34a2959c7ecda7a6bba643a8852339028deab4ded"} Mar 20 15:48:45 crc kubenswrapper[3552]: I0320 15:48:45.305742 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6f9cbcd486-gj8tz" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Mar 20 15:48:45 crc kubenswrapper[3552]: I0320 15:48:45.462746 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f102abe-1535-4233-ba10-e6ce2e4daa29" path="/var/lib/kubelet/pods/8f102abe-1535-4233-ba10-e6ce2e4daa29/volumes" Mar 20 15:48:46 crc kubenswrapper[3552]: I0320 15:48:46.383225 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c","Type":"ContainerStarted","Data":"14f01b5fc656ae3fa74fae3a1b5fa95a7169b032a4fc12847b3c55cddafa1444"} Mar 20 15:48:46 crc kubenswrapper[3552]: I0320 15:48:46.383551 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"5cbde9b3-0a58-4706-bb8c-5a8694ddb51c","Type":"ContainerStarted","Data":"08c0fa2814fafda365db69078206b5c2b7f92fa67318378ee6e7033522c9fdc0"} Mar 20 15:48:46 crc kubenswrapper[3552]: I0320 15:48:46.385635 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Mar 20 15:48:46 crc kubenswrapper[3552]: I0320 15:48:46.387257 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="5cbde9b3-0a58-4706-bb8c-5a8694ddb51c" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.172:9322/\": dial tcp 10.217.0.172:9322: connect: connection refused" Mar 20 15:48:46 crc kubenswrapper[3552]: I0320 15:48:46.390477 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerStarted","Data":"f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d"} Mar 20 15:48:46 crc kubenswrapper[3552]: I0320 15:48:46.411021 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.410972859 podStartE2EDuration="3.410972859s" podCreationTimestamp="2026-03-20 15:48:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:48:46.407721842 +0000 UTC m=+1426.101418682" watchObservedRunningTime="2026-03-20 15:48:46.410972859 +0000 UTC m=+1426.104669689" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.407813 3552 generic.go:334] "Generic (PLEG): container finished" podID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerID="bc3946586488c4ab10f05e60c67783bcc86ad32da7f7f8b7588c25f3b5bea593" exitCode=137 Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.408060 3552 generic.go:334] "Generic (PLEG): container finished" podID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerID="d7f89164ec91d06a70408407dbd90834709af88ed7e48495c533c51fc148c4d1" exitCode=137 Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.408101 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c585b5b7c-lrxqx" event={"ID":"7c90e274-fdb6-4e4a-a3af-1b48027f429e","Type":"ContainerDied","Data":"bc3946586488c4ab10f05e60c67783bcc86ad32da7f7f8b7588c25f3b5bea593"} Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.408121 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c585b5b7c-lrxqx" event={"ID":"7c90e274-fdb6-4e4a-a3af-1b48027f429e","Type":"ContainerDied","Data":"d7f89164ec91d06a70408407dbd90834709af88ed7e48495c533c51fc148c4d1"} Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.411448 3552 generic.go:334] "Generic (PLEG): container finished" podID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerID="188de2be36b062b71fd3a820aaf340a68057bb9695648564affc73b6b662a719" exitCode=137 Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.411471 3552 generic.go:334] "Generic (PLEG): container finished" podID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerID="d01d19fcbc1a03d2f56b9a99db7bda241210e995facfe5e1e8929b12d511fb2c" exitCode=137 Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.411516 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fddf585b5-c58zk" event={"ID":"30916098-d2f2-40d6-b7e8-e3c784f7169b","Type":"ContainerDied","Data":"188de2be36b062b71fd3a820aaf340a68057bb9695648564affc73b6b662a719"} Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.411536 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fddf585b5-c58zk" event={"ID":"30916098-d2f2-40d6-b7e8-e3c784f7169b","Type":"ContainerDied","Data":"d01d19fcbc1a03d2f56b9a99db7bda241210e995facfe5e1e8929b12d511fb2c"} Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.420790 3552 generic.go:334] "Generic (PLEG): container finished" podID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerID="cf8852c52014a54cbdb5e746421b4f3503ddcbe1740912196f825aea876b1e3b" exitCode=137 Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.420828 3552 generic.go:334] "Generic (PLEG): container finished" podID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerID="6c1e1e01ba6adee7fbd18035f095d777ff3a61806c29e3571d2b3d0b51f1da12" exitCode=137 Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.422274 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5cf7d98dbf-jv84z" event={"ID":"451d55ac-e9ef-427a-aba6-b85a62a08e59","Type":"ContainerDied","Data":"cf8852c52014a54cbdb5e746421b4f3503ddcbe1740912196f825aea876b1e3b"} Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.422306 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5cf7d98dbf-jv84z" event={"ID":"451d55ac-e9ef-427a-aba6-b85a62a08e59","Type":"ContainerDied","Data":"6c1e1e01ba6adee7fbd18035f095d777ff3a61806c29e3571d2b3d0b51f1da12"} Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.698240 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.809685 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/451d55ac-e9ef-427a-aba6-b85a62a08e59-horizon-secret-key\") pod \"451d55ac-e9ef-427a-aba6-b85a62a08e59\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.809762 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451d55ac-e9ef-427a-aba6-b85a62a08e59-logs\") pod \"451d55ac-e9ef-427a-aba6-b85a62a08e59\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.809826 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-scripts\") pod \"451d55ac-e9ef-427a-aba6-b85a62a08e59\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.809870 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g662\" (UniqueName: \"kubernetes.io/projected/451d55ac-e9ef-427a-aba6-b85a62a08e59-kube-api-access-9g662\") pod \"451d55ac-e9ef-427a-aba6-b85a62a08e59\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.809960 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-config-data\") pod \"451d55ac-e9ef-427a-aba6-b85a62a08e59\" (UID: \"451d55ac-e9ef-427a-aba6-b85a62a08e59\") " Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.811541 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/451d55ac-e9ef-427a-aba6-b85a62a08e59-logs" (OuterVolumeSpecName: "logs") pod "451d55ac-e9ef-427a-aba6-b85a62a08e59" (UID: "451d55ac-e9ef-427a-aba6-b85a62a08e59"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.818951 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/451d55ac-e9ef-427a-aba6-b85a62a08e59-kube-api-access-9g662" (OuterVolumeSpecName: "kube-api-access-9g662") pod "451d55ac-e9ef-427a-aba6-b85a62a08e59" (UID: "451d55ac-e9ef-427a-aba6-b85a62a08e59"). InnerVolumeSpecName "kube-api-access-9g662". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.830760 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/451d55ac-e9ef-427a-aba6-b85a62a08e59-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "451d55ac-e9ef-427a-aba6-b85a62a08e59" (UID: "451d55ac-e9ef-427a-aba6-b85a62a08e59"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.836039 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-config-data" (OuterVolumeSpecName: "config-data") pod "451d55ac-e9ef-427a-aba6-b85a62a08e59" (UID: "451d55ac-e9ef-427a-aba6-b85a62a08e59"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.848805 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-scripts" (OuterVolumeSpecName: "scripts") pod "451d55ac-e9ef-427a-aba6-b85a62a08e59" (UID: "451d55ac-e9ef-427a-aba6-b85a62a08e59"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.912567 3552 reconciler_common.go:300] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/451d55ac-e9ef-427a-aba6-b85a62a08e59-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.912598 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451d55ac-e9ef-427a-aba6-b85a62a08e59-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.912608 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.912618 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-9g662\" (UniqueName: \"kubernetes.io/projected/451d55ac-e9ef-427a-aba6-b85a62a08e59-kube-api-access-9g662\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:47 crc kubenswrapper[3552]: I0320 15:48:47.912627 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/451d55ac-e9ef-427a-aba6-b85a62a08e59-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.160542 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.204472 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.217931 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-scripts\") pod \"30916098-d2f2-40d6-b7e8-e3c784f7169b\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.218027 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30916098-d2f2-40d6-b7e8-e3c784f7169b-logs\") pod \"30916098-d2f2-40d6-b7e8-e3c784f7169b\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.218158 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq6fj\" (UniqueName: \"kubernetes.io/projected/30916098-d2f2-40d6-b7e8-e3c784f7169b-kube-api-access-wq6fj\") pod \"30916098-d2f2-40d6-b7e8-e3c784f7169b\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.218259 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-config-data\") pod \"30916098-d2f2-40d6-b7e8-e3c784f7169b\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.218306 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/30916098-d2f2-40d6-b7e8-e3c784f7169b-horizon-secret-key\") pod \"30916098-d2f2-40d6-b7e8-e3c784f7169b\" (UID: \"30916098-d2f2-40d6-b7e8-e3c784f7169b\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.218464 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30916098-d2f2-40d6-b7e8-e3c784f7169b-logs" (OuterVolumeSpecName: "logs") pod "30916098-d2f2-40d6-b7e8-e3c784f7169b" (UID: "30916098-d2f2-40d6-b7e8-e3c784f7169b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.218756 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30916098-d2f2-40d6-b7e8-e3c784f7169b-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.225952 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30916098-d2f2-40d6-b7e8-e3c784f7169b-kube-api-access-wq6fj" (OuterVolumeSpecName: "kube-api-access-wq6fj") pod "30916098-d2f2-40d6-b7e8-e3c784f7169b" (UID: "30916098-d2f2-40d6-b7e8-e3c784f7169b"). InnerVolumeSpecName "kube-api-access-wq6fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.229588 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30916098-d2f2-40d6-b7e8-e3c784f7169b-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "30916098-d2f2-40d6-b7e8-e3c784f7169b" (UID: "30916098-d2f2-40d6-b7e8-e3c784f7169b"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.252771 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-config-data" (OuterVolumeSpecName: "config-data") pod "30916098-d2f2-40d6-b7e8-e3c784f7169b" (UID: "30916098-d2f2-40d6-b7e8-e3c784f7169b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.254553 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-scripts" (OuterVolumeSpecName: "scripts") pod "30916098-d2f2-40d6-b7e8-e3c784f7169b" (UID: "30916098-d2f2-40d6-b7e8-e3c784f7169b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.320367 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c90e274-fdb6-4e4a-a3af-1b48027f429e-logs\") pod \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.320487 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsnwr\" (UniqueName: \"kubernetes.io/projected/7c90e274-fdb6-4e4a-a3af-1b48027f429e-kube-api-access-zsnwr\") pod \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.320548 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-scripts\") pod \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.320589 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-config-data\") pod \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.320624 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7c90e274-fdb6-4e4a-a3af-1b48027f429e-horizon-secret-key\") pod \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\" (UID: \"7c90e274-fdb6-4e4a-a3af-1b48027f429e\") " Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.320725 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c90e274-fdb6-4e4a-a3af-1b48027f429e-logs" (OuterVolumeSpecName: "logs") pod "7c90e274-fdb6-4e4a-a3af-1b48027f429e" (UID: "7c90e274-fdb6-4e4a-a3af-1b48027f429e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.321160 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wq6fj\" (UniqueName: \"kubernetes.io/projected/30916098-d2f2-40d6-b7e8-e3c784f7169b-kube-api-access-wq6fj\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.321189 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.321219 3552 reconciler_common.go:300] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/30916098-d2f2-40d6-b7e8-e3c784f7169b-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.321232 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c90e274-fdb6-4e4a-a3af-1b48027f429e-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.321244 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30916098-d2f2-40d6-b7e8-e3c784f7169b-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.324139 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c90e274-fdb6-4e4a-a3af-1b48027f429e-kube-api-access-zsnwr" (OuterVolumeSpecName: "kube-api-access-zsnwr") pod "7c90e274-fdb6-4e4a-a3af-1b48027f429e" (UID: "7c90e274-fdb6-4e4a-a3af-1b48027f429e"). InnerVolumeSpecName "kube-api-access-zsnwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.329505 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c90e274-fdb6-4e4a-a3af-1b48027f429e-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7c90e274-fdb6-4e4a-a3af-1b48027f429e" (UID: "7c90e274-fdb6-4e4a-a3af-1b48027f429e"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.344280 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-config-data" (OuterVolumeSpecName: "config-data") pod "7c90e274-fdb6-4e4a-a3af-1b48027f429e" (UID: "7c90e274-fdb6-4e4a-a3af-1b48027f429e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.346669 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-scripts" (OuterVolumeSpecName: "scripts") pod "7c90e274-fdb6-4e4a-a3af-1b48027f429e" (UID: "7c90e274-fdb6-4e4a-a3af-1b48027f429e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.423142 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.423455 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c90e274-fdb6-4e4a-a3af-1b48027f429e-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.423473 3552 reconciler_common.go:300] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7c90e274-fdb6-4e4a-a3af-1b48027f429e-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.423489 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zsnwr\" (UniqueName: \"kubernetes.io/projected/7c90e274-fdb6-4e4a-a3af-1b48027f429e-kube-api-access-zsnwr\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.430119 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5cf7d98dbf-jv84z" event={"ID":"451d55ac-e9ef-427a-aba6-b85a62a08e59","Type":"ContainerDied","Data":"a174f6d54970fce0c9d2e5555288841d2b162fe95feb6484c7a348ae2723a42f"} Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.430156 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5cf7d98dbf-jv84z" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.430170 3552 scope.go:117] "RemoveContainer" containerID="cf8852c52014a54cbdb5e746421b4f3503ddcbe1740912196f825aea876b1e3b" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.432765 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c585b5b7c-lrxqx" event={"ID":"7c90e274-fdb6-4e4a-a3af-1b48027f429e","Type":"ContainerDied","Data":"911891d394d12c2444c6ad6bef03c620ed7594199b83cc5adb486de0aa7fd313"} Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.432794 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c585b5b7c-lrxqx" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.437166 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fddf585b5-c58zk" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.437924 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fddf585b5-c58zk" event={"ID":"30916098-d2f2-40d6-b7e8-e3c784f7169b","Type":"ContainerDied","Data":"41b21e3d83869d74023a36290c7ba426597e60daa17e8a73d6169fa9517d904d"} Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.551009 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fddf585b5-c58zk"] Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.564035 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fddf585b5-c58zk"] Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.571794 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5cf7d98dbf-jv84z"] Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.578451 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5cf7d98dbf-jv84z"] Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.585948 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c585b5b7c-lrxqx"] Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.599187 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-c585b5b7c-lrxqx"] Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.682083 3552 scope.go:117] "RemoveContainer" containerID="6c1e1e01ba6adee7fbd18035f095d777ff3a61806c29e3571d2b3d0b51f1da12" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.713948 3552 scope.go:117] "RemoveContainer" containerID="bc3946586488c4ab10f05e60c67783bcc86ad32da7f7f8b7588c25f3b5bea593" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.796537 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.838534 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.928609 3552 scope.go:117] "RemoveContainer" containerID="d7f89164ec91d06a70408407dbd90834709af88ed7e48495c533c51fc148c4d1" Mar 20 15:48:48 crc kubenswrapper[3552]: I0320 15:48:48.951567 3552 scope.go:117] "RemoveContainer" containerID="188de2be36b062b71fd3a820aaf340a68057bb9695648564affc73b6b662a719" Mar 20 15:48:49 crc kubenswrapper[3552]: I0320 15:48:49.144141 3552 scope.go:117] "RemoveContainer" containerID="d01d19fcbc1a03d2f56b9a99db7bda241210e995facfe5e1e8929b12d511fb2c" Mar 20 15:48:49 crc kubenswrapper[3552]: I0320 15:48:49.439176 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" path="/var/lib/kubelet/pods/30916098-d2f2-40d6-b7e8-e3c784f7169b/volumes" Mar 20 15:48:49 crc kubenswrapper[3552]: I0320 15:48:49.440075 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" path="/var/lib/kubelet/pods/451d55ac-e9ef-427a-aba6-b85a62a08e59/volumes" Mar 20 15:48:49 crc kubenswrapper[3552]: I0320 15:48:49.440704 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" path="/var/lib/kubelet/pods/7c90e274-fdb6-4e4a-a3af-1b48027f429e/volumes" Mar 20 15:48:49 crc kubenswrapper[3552]: I0320 15:48:49.445752 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:48:49 crc kubenswrapper[3552]: I0320 15:48:49.996906 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.459613 3552 generic.go:334] "Generic (PLEG): container finished" podID="8ae12b08-a678-4f33-8b7c-d23c6aca08fe" containerID="f8494ea855028bd40e206f89694c62395409d373aa19726fdd6e6f3ee407ab65" exitCode=0 Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.459937 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vbvt2" event={"ID":"8ae12b08-a678-4f33-8b7c-d23c6aca08fe","Type":"ContainerDied","Data":"f8494ea855028bd40e206f89694c62395409d373aa19726fdd6e6f3ee407ab65"} Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.607029 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-9c7459748-mvczs" Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.676450 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6f9cbcd486-gj8tz"] Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.676667 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-6f9cbcd486-gj8tz" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon-log" containerID="cri-o://3f4231a6bdea50fa6b04a73dff18ca3f8de0f2b3d05f0819f7ee3850852822ca" gracePeriod=30 Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.677256 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/horizon-6f9cbcd486-gj8tz" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon" containerID="cri-o://75c1164319928057684acf21c1fff47d01d37d6c03dc88c0b8893dd430f2ca09" gracePeriod=30 Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.684022 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-66bd8f794-2dbwx" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.684243 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-66bd8f794-2dbwx" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:50 crc kubenswrapper[3552]: I0320 15:48:50.686822 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-66bd8f794-2dbwx" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.474195 3552 generic.go:334] "Generic (PLEG): container finished" podID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerID="75c1164319928057684acf21c1fff47d01d37d6c03dc88c0b8893dd430f2ca09" exitCode=0 Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.474263 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f9cbcd486-gj8tz" event={"ID":"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a","Type":"ContainerDied","Data":"75c1164319928057684acf21c1fff47d01d37d6c03dc88c0b8893dd430f2ca09"} Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.921878 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vbvt2" Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.990834 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-config-data\") pod \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.990911 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-combined-ca-bundle\") pod \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.991023 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hkmh\" (UniqueName: \"kubernetes.io/projected/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-kube-api-access-4hkmh\") pod \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.991182 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-logs\") pod \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.991225 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-scripts\") pod \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\" (UID: \"8ae12b08-a678-4f33-8b7c-d23c6aca08fe\") " Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.991981 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-logs" (OuterVolumeSpecName: "logs") pod "8ae12b08-a678-4f33-8b7c-d23c6aca08fe" (UID: "8ae12b08-a678-4f33-8b7c-d23c6aca08fe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.997708 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-scripts" (OuterVolumeSpecName: "scripts") pod "8ae12b08-a678-4f33-8b7c-d23c6aca08fe" (UID: "8ae12b08-a678-4f33-8b7c-d23c6aca08fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:51 crc kubenswrapper[3552]: I0320 15:48:51.997882 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-kube-api-access-4hkmh" (OuterVolumeSpecName: "kube-api-access-4hkmh") pod "8ae12b08-a678-4f33-8b7c-d23c6aca08fe" (UID: "8ae12b08-a678-4f33-8b7c-d23c6aca08fe"). InnerVolumeSpecName "kube-api-access-4hkmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.020604 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ae12b08-a678-4f33-8b7c-d23c6aca08fe" (UID: "8ae12b08-a678-4f33-8b7c-d23c6aca08fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.029040 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-config-data" (OuterVolumeSpecName: "config-data") pod "8ae12b08-a678-4f33-8b7c-d23c6aca08fe" (UID: "8ae12b08-a678-4f33-8b7c-d23c6aca08fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.093742 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.093783 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-4hkmh\" (UniqueName: \"kubernetes.io/projected/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-kube-api-access-4hkmh\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.093801 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.093812 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.093824 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ae12b08-a678-4f33-8b7c-d23c6aca08fe-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.484451 3552 generic.go:334] "Generic (PLEG): container finished" podID="739e2036-5958-4ee3-9fe3-4734696fdc6a" containerID="60fd76bb1637a9197587ef589c085e368feed4145a6273d9e562f9fcd4d7a941" exitCode=0 Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.484635 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z758k" event={"ID":"739e2036-5958-4ee3-9fe3-4734696fdc6a","Type":"ContainerDied","Data":"60fd76bb1637a9197587ef589c085e368feed4145a6273d9e562f9fcd4d7a941"} Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.488187 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vbvt2" event={"ID":"8ae12b08-a678-4f33-8b7c-d23c6aca08fe","Type":"ContainerDied","Data":"de3e903bb96db6d557a3fe8546a98dcb9c755376204dd7da8791955b5d310bfe"} Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.488225 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de3e903bb96db6d557a3fe8546a98dcb9c755376204dd7da8791955b5d310bfe" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.488305 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vbvt2" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.616555 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/placement-6c5fc7598d-gk2l6"] Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.616734 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5410b014-55f4-4359-98fd-a7e4bca67721" podNamespace="openstack" podName="placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618151 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618170 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618194 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618204 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618220 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618227 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618248 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618257 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618267 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8ae12b08-a678-4f33-8b7c-d23c6aca08fe" containerName="placement-db-sync" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618274 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ae12b08-a678-4f33-8b7c-d23c6aca08fe" containerName="placement-db-sync" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618289 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618298 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: E0320 15:48:52.618308 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618314 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618614 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618634 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="30916098-d2f2-40d6-b7e8-e3c784f7169b" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618644 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618654 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618668 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c90e274-fdb6-4e4a-a3af-1b48027f429e" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618684 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="451d55ac-e9ef-427a-aba6-b85a62a08e59" containerName="horizon-log" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.618695 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ae12b08-a678-4f33-8b7c-d23c6aca08fe" containerName="placement-db-sync" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.619598 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.622816 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.623265 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.623422 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-98wcw" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.626331 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.626787 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.632988 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6c5fc7598d-gk2l6"] Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.705443 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pln8l\" (UniqueName: \"kubernetes.io/projected/5410b014-55f4-4359-98fd-a7e4bca67721-kube-api-access-pln8l\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.705558 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-config-data\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.705598 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-internal-tls-certs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.705636 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5410b014-55f4-4359-98fd-a7e4bca67721-logs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.705655 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-scripts\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.705678 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-public-tls-certs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.706014 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-combined-ca-bundle\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.792431 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.803878 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-bb7976659-5s6lm" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.803879 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-bb7976659-5s6lm" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807525 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pln8l\" (UniqueName: \"kubernetes.io/projected/5410b014-55f4-4359-98fd-a7e4bca67721-kube-api-access-pln8l\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807660 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-config-data\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807742 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-internal-tls-certs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807831 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5410b014-55f4-4359-98fd-a7e4bca67721-logs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807861 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-scripts\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807893 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-public-tls-certs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.807958 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-combined-ca-bundle\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.808732 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-bb7976659-5s6lm" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.809192 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5410b014-55f4-4359-98fd-a7e4bca67721-logs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.814351 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-scripts\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.815059 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-internal-tls-certs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.815076 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-config-data\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.815587 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-bb7976659-5s6lm" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.817570 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-public-tls-certs\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.831276 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5410b014-55f4-4359-98fd-a7e4bca67721-combined-ca-bundle\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.834225 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pln8l\" (UniqueName: \"kubernetes.io/projected/5410b014-55f4-4359-98fd-a7e4bca67721-kube-api-access-pln8l\") pod \"placement-6c5fc7598d-gk2l6\" (UID: \"5410b014-55f4-4359-98fd-a7e4bca67721\") " pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:52 crc kubenswrapper[3552]: I0320 15:48:52.983842 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:48:53 crc kubenswrapper[3552]: I0320 15:48:53.837834 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Mar 20 15:48:53 crc kubenswrapper[3552]: I0320 15:48:53.845610 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.514110 3552 generic.go:334] "Generic (PLEG): container finished" podID="625d76ad-7531-4be1-ab6d-769f15e6a7e5" containerID="8ae763f3fbcc142bbe20ff962a7dae27043bae90a1b8f2a09bb3e5af65f18c80" exitCode=0 Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.514731 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dncfs" event={"ID":"625d76ad-7531-4be1-ab6d-769f15e6a7e5","Type":"ContainerDied","Data":"8ae763f3fbcc142bbe20ff962a7dae27043bae90a1b8f2a09bb3e5af65f18c80"} Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.532832 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.630213 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2bkk6"] Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.630369 3552 topology_manager.go:215] "Topology Admit Handler" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" podNamespace="openshift-marketplace" podName="redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.631976 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.647120 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-catalog-content\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.647525 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gfxt\" (UniqueName: \"kubernetes.io/projected/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-kube-api-access-7gfxt\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.647568 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-utilities\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.660437 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2bkk6"] Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.749818 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-catalog-content\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.750062 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-7gfxt\" (UniqueName: \"kubernetes.io/projected/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-kube-api-access-7gfxt\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.750140 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-utilities\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.750778 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-utilities\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.751937 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-catalog-content\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.775420 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gfxt\" (UniqueName: \"kubernetes.io/projected/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-kube-api-access-7gfxt\") pod \"redhat-operators-2bkk6\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:54 crc kubenswrapper[3552]: I0320 15:48:54.948964 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.153363 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z758k" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.257947 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-db-sync-config-data\") pod \"739e2036-5958-4ee3-9fe3-4734696fdc6a\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.258013 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-combined-ca-bundle\") pod \"739e2036-5958-4ee3-9fe3-4734696fdc6a\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.258171 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz2tf\" (UniqueName: \"kubernetes.io/projected/739e2036-5958-4ee3-9fe3-4734696fdc6a-kube-api-access-lz2tf\") pod \"739e2036-5958-4ee3-9fe3-4734696fdc6a\" (UID: \"739e2036-5958-4ee3-9fe3-4734696fdc6a\") " Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.261752 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "739e2036-5958-4ee3-9fe3-4734696fdc6a" (UID: "739e2036-5958-4ee3-9fe3-4734696fdc6a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.263481 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/739e2036-5958-4ee3-9fe3-4734696fdc6a-kube-api-access-lz2tf" (OuterVolumeSpecName: "kube-api-access-lz2tf") pod "739e2036-5958-4ee3-9fe3-4734696fdc6a" (UID: "739e2036-5958-4ee3-9fe3-4734696fdc6a"). InnerVolumeSpecName "kube-api-access-lz2tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.284516 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "739e2036-5958-4ee3-9fe3-4734696fdc6a" (UID: "739e2036-5958-4ee3-9fe3-4734696fdc6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.360614 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-lz2tf\" (UniqueName: \"kubernetes.io/projected/739e2036-5958-4ee3-9fe3-4734696fdc6a-kube-api-access-lz2tf\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.360657 3552 reconciler_common.go:300] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.360672 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/739e2036-5958-4ee3-9fe3-4734696fdc6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.526231 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z758k" event={"ID":"739e2036-5958-4ee3-9fe3-4734696fdc6a","Type":"ContainerDied","Data":"94f719e569f905261f976d1bdba0c6999965d1f963db67698e8fa12c3fb8c275"} Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.526577 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94f719e569f905261f976d1bdba0c6999965d1f963db67698e8fa12c3fb8c275" Mar 20 15:48:55 crc kubenswrapper[3552]: I0320 15:48:55.526344 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z758k" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.368634 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6bf8c5549-pmzmz"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.369083 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775" podNamespace="openstack" podName="barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: E0320 15:48:56.369311 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="739e2036-5958-4ee3-9fe3-4734696fdc6a" containerName="barbican-db-sync" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.369325 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="739e2036-5958-4ee3-9fe3-4734696fdc6a" containerName="barbican-db-sync" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.369524 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="739e2036-5958-4ee3-9fe3-4734696fdc6a" containerName="barbican-db-sync" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.370445 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.374257 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.374308 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-zxnl5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.374497 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.385767 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6bf8c5549-pmzmz"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.388629 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f25mw\" (UniqueName: \"kubernetes.io/projected/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-kube-api-access-f25mw\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.388690 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-logs\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.388729 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-config-data-custom\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.388776 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-config-data\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.388814 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-combined-ca-bundle\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.491351 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-combined-ca-bundle\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.491533 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f25mw\" (UniqueName: \"kubernetes.io/projected/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-kube-api-access-f25mw\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.491572 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-logs\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.491611 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-config-data-custom\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.491689 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-config-data\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.499812 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-69d7546cf6-hwprx"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.499945 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-logs\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.500023 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b5733b56-3f0c-4b1f-9075-22590d21d3b4" podNamespace="openstack" podName="barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.501999 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.514233 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.514818 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-69d7546cf6-hwprx"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.567796 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f25mw\" (UniqueName: \"kubernetes.io/projected/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-kube-api-access-f25mw\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.572540 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-config-data\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.580045 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-combined-ca-bundle\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.592225 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-config-data\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.592316 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-config-data-custom\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.592367 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-combined-ca-bundle\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.592440 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5733b56-3f0c-4b1f-9075-22590d21d3b4-logs\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.592471 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx62s\" (UniqueName: \"kubernetes.io/projected/b5733b56-3f0c-4b1f-9075-22590d21d3b4-kube-api-access-sx62s\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.634007 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775-config-data-custom\") pod \"barbican-worker-6bf8c5549-pmzmz\" (UID: \"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775\") " pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.664233 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b57cdffc5-qrq5m"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.664480 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a93a319c-d470-4c91-8fcc-288bb6b6119d" podNamespace="openstack" podName="dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.665916 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.690869 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b57cdffc5-qrq5m"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.697358 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5733b56-3f0c-4b1f-9075-22590d21d3b4-logs\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.697475 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sx62s\" (UniqueName: \"kubernetes.io/projected/b5733b56-3f0c-4b1f-9075-22590d21d3b4-kube-api-access-sx62s\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.697515 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-config-data\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.697575 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-config-data-custom\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.697626 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-combined-ca-bundle\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.699582 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6bf8c5549-pmzmz" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.699754 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5733b56-3f0c-4b1f-9075-22590d21d3b4-logs\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.723196 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-combined-ca-bundle\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.771607 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-config-data\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.771680 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5733b56-3f0c-4b1f-9075-22590d21d3b4-config-data-custom\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.778423 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx62s\" (UniqueName: \"kubernetes.io/projected/b5733b56-3f0c-4b1f-9075-22590d21d3b4-kube-api-access-sx62s\") pod \"barbican-keystone-listener-69d7546cf6-hwprx\" (UID: \"b5733b56-3f0c-4b1f-9075-22590d21d3b4\") " pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.804152 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-646bf87fb6-8jzf5"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.804279 3552 topology_manager.go:215] "Topology Admit Handler" podUID="20f75298-10c5-4400-870b-472a9f870f0b" podNamespace="openstack" podName="barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.806798 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-nb\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.806856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.806936 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-swift-storage-0\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.807010 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-config\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.807059 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrnjw\" (UniqueName: \"kubernetes.io/projected/a93a319c-d470-4c91-8fcc-288bb6b6119d-kube-api-access-nrnjw\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.807123 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-svc\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.807179 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-sb\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.811367 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.825772 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-646bf87fb6-8jzf5"] Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.844892 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909560 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f75298-10c5-4400-870b-472a9f870f0b-logs\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909641 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-swift-storage-0\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909662 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcltb\" (UniqueName: \"kubernetes.io/projected/20f75298-10c5-4400-870b-472a9f870f0b-kube-api-access-wcltb\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909708 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-config\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909737 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-combined-ca-bundle\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909780 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nrnjw\" (UniqueName: \"kubernetes.io/projected/a93a319c-d470-4c91-8fcc-288bb6b6119d-kube-api-access-nrnjw\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909810 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data-custom\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909835 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-svc\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909865 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-sb\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909905 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.909931 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-nb\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.910800 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-nb\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.911607 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-swift-storage-0\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.916874 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-config\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.917101 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-sb\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.930063 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrnjw\" (UniqueName: \"kubernetes.io/projected/a93a319c-d470-4c91-8fcc-288bb6b6119d-kube-api-access-nrnjw\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:56 crc kubenswrapper[3552]: I0320 15:48:56.917493 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-svc\") pod \"dnsmasq-dns-7b57cdffc5-qrq5m\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.011014 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.012326 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f75298-10c5-4400-870b-472a9f870f0b-logs\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.012477 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wcltb\" (UniqueName: \"kubernetes.io/projected/20f75298-10c5-4400-870b-472a9f870f0b-kube-api-access-wcltb\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.012635 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-combined-ca-bundle\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.012746 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data-custom\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.012859 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f75298-10c5-4400-870b-472a9f870f0b-logs\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.015866 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.016483 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-combined-ca-bundle\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.017088 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data-custom\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.031085 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcltb\" (UniqueName: \"kubernetes.io/projected/20f75298-10c5-4400-870b-472a9f870f0b-kube-api-access-wcltb\") pod \"barbican-api-646bf87fb6-8jzf5\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.120029 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.154596 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.335709 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dncfs" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522155 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw299\" (UniqueName: \"kubernetes.io/projected/625d76ad-7531-4be1-ab6d-769f15e6a7e5-kube-api-access-bw299\") pod \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522521 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/625d76ad-7531-4be1-ab6d-769f15e6a7e5-etc-machine-id\") pod \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522574 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/625d76ad-7531-4be1-ab6d-769f15e6a7e5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "625d76ad-7531-4be1-ab6d-769f15e6a7e5" (UID: "625d76ad-7531-4be1-ab6d-769f15e6a7e5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522719 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-combined-ca-bundle\") pod \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522789 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-config-data\") pod \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522848 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-db-sync-config-data\") pod \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.522883 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-scripts\") pod \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\" (UID: \"625d76ad-7531-4be1-ab6d-769f15e6a7e5\") " Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.523391 3552 reconciler_common.go:300] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/625d76ad-7531-4be1-ab6d-769f15e6a7e5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.532564 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/625d76ad-7531-4be1-ab6d-769f15e6a7e5-kube-api-access-bw299" (OuterVolumeSpecName: "kube-api-access-bw299") pod "625d76ad-7531-4be1-ab6d-769f15e6a7e5" (UID: "625d76ad-7531-4be1-ab6d-769f15e6a7e5"). InnerVolumeSpecName "kube-api-access-bw299". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.532579 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "625d76ad-7531-4be1-ab6d-769f15e6a7e5" (UID: "625d76ad-7531-4be1-ab6d-769f15e6a7e5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.545169 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-scripts" (OuterVolumeSpecName: "scripts") pod "625d76ad-7531-4be1-ab6d-769f15e6a7e5" (UID: "625d76ad-7531-4be1-ab6d-769f15e6a7e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.573004 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dncfs" event={"ID":"625d76ad-7531-4be1-ab6d-769f15e6a7e5","Type":"ContainerDied","Data":"bdb0ad4c71a27c27082f840e51fe132d70f96d4eb123c67324bb8b46cff0ca8c"} Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.573029 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dncfs" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.573044 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bdb0ad4c71a27c27082f840e51fe132d70f96d4eb123c67324bb8b46cff0ca8c" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.586957 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "625d76ad-7531-4be1-ab6d-769f15e6a7e5" (UID: "625d76ad-7531-4be1-ab6d-769f15e6a7e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.625014 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-bw299\" (UniqueName: \"kubernetes.io/projected/625d76ad-7531-4be1-ab6d-769f15e6a7e5-kube-api-access-bw299\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.625080 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.625095 3552 reconciler_common.go:300] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.625170 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.626082 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-config-data" (OuterVolumeSpecName: "config-data") pod "625d76ad-7531-4be1-ab6d-769f15e6a7e5" (UID: "625d76ad-7531-4be1-ab6d-769f15e6a7e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:48:57 crc kubenswrapper[3552]: I0320 15:48:57.740695 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625d76ad-7531-4be1-ab6d-769f15e6a7e5-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.029977 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-69d7546cf6-hwprx"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.272483 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6c5fc7598d-gk2l6"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.282325 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2bkk6"] Mar 20 15:48:58 crc kubenswrapper[3552]: W0320 15:48:58.304383 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20f75298_10c5_4400_870b_472a9f870f0b.slice/crio-aeea39515df71ede20717ff24d3b369b8c2cb67df9909630a08148a07cabdc9a WatchSource:0}: Error finding container aeea39515df71ede20717ff24d3b369b8c2cb67df9909630a08148a07cabdc9a: Status 404 returned error can't find the container with id aeea39515df71ede20717ff24d3b369b8c2cb67df9909630a08148a07cabdc9a Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.304746 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-646bf87fb6-8jzf5"] Mar 20 15:48:58 crc kubenswrapper[3552]: W0320 15:48:58.469761 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d8b0bf5_dba9_46ad_9ec9_c95f4f5dc775.slice/crio-bede58cbedf8bb5e53ece8a8790b6ab92418a4d2304cec7767eaa4a80aac5572 WatchSource:0}: Error finding container bede58cbedf8bb5e53ece8a8790b6ab92418a4d2304cec7767eaa4a80aac5572: Status 404 returned error can't find the container with id bede58cbedf8bb5e53ece8a8790b6ab92418a4d2304cec7767eaa4a80aac5572 Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.470253 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6bf8c5549-pmzmz"] Mar 20 15:48:58 crc kubenswrapper[3552]: W0320 15:48:58.477668 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda93a319c_d470_4c91_8fcc_288bb6b6119d.slice/crio-49c7b7b03c8530ab601837cf3bcae1977f9dd034fae58ff57e7ea82a701efae8 WatchSource:0}: Error finding container 49c7b7b03c8530ab601837cf3bcae1977f9dd034fae58ff57e7ea82a701efae8: Status 404 returned error can't find the container with id 49c7b7b03c8530ab601837cf3bcae1977f9dd034fae58ff57e7ea82a701efae8 Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.502056 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b57cdffc5-qrq5m"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.585814 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.585984 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" podNamespace="openstack" podName="cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: E0320 15:48:58.586247 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="625d76ad-7531-4be1-ab6d-769f15e6a7e5" containerName="cinder-db-sync" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.586259 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="625d76ad-7531-4be1-ab6d-769f15e6a7e5" containerName="cinder-db-sync" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.586491 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="625d76ad-7531-4be1-ab6d-769f15e6a7e5" containerName="cinder-db-sync" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.601685 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.604813 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.606374 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.606628 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.606745 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.620905 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-w2wb9" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.641014 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerStarted","Data":"f8fb98b9156b1b93e9e665053f1979871b60f954e28d0cb78069a9791cce7774"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.667672 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" event={"ID":"a93a319c-d470-4c91-8fcc-288bb6b6119d","Type":"ContainerStarted","Data":"49c7b7b03c8530ab601837cf3bcae1977f9dd034fae58ff57e7ea82a701efae8"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.688216 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerStarted","Data":"6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.688612 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-central-agent" containerID="cri-o://48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728" gracePeriod=30 Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.688827 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.689076 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="proxy-httpd" containerID="cri-o://6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa" gracePeriod=30 Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.689118 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="sg-core" containerID="cri-o://f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d" gracePeriod=30 Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.689158 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-notification-agent" containerID="cri-o://2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791" gracePeriod=30 Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.718290 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c5fc7598d-gk2l6" event={"ID":"5410b014-55f4-4359-98fd-a7e4bca67721","Type":"ContainerStarted","Data":"3d0701f08d4c161a3dcd2dffc4f39dd4b9e0556b60922707dd58bf4d6c78c843"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.719675 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-646bf87fb6-8jzf5" event={"ID":"20f75298-10c5-4400-870b-472a9f870f0b","Type":"ContainerStarted","Data":"aeea39515df71ede20717ff24d3b369b8c2cb67df9909630a08148a07cabdc9a"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.729751 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6bf8c5549-pmzmz" event={"ID":"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775","Type":"ContainerStarted","Data":"bede58cbedf8bb5e53ece8a8790b6ab92418a4d2304cec7767eaa4a80aac5572"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.769619 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" event={"ID":"b5733b56-3f0c-4b1f-9075-22590d21d3b4","Type":"ContainerStarted","Data":"ed179da16cc3f1ce87b00ff7220ef6bb588a4a0cb3fb3534414a620e86e6bb88"} Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.789859 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.789915 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad03da73-fd35-414a-b640-6dc7bcc40c24-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.789944 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m2td\" (UniqueName: \"kubernetes.io/projected/ad03da73-fd35-414a-b640-6dc7bcc40c24-kube-api-access-7m2td\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.789964 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.790025 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-scripts\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.790072 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.794136 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.381015498 podStartE2EDuration="1m23.794096724s" podCreationTimestamp="2026-03-20 15:47:35 +0000 UTC" firstStartedPulling="2026-03-20 15:47:39.003085184 +0000 UTC m=+1358.696782004" lastFinishedPulling="2026-03-20 15:48:57.4161664 +0000 UTC m=+1437.109863230" observedRunningTime="2026-03-20 15:48:58.737214615 +0000 UTC m=+1438.430911465" watchObservedRunningTime="2026-03-20 15:48:58.794096724 +0000 UTC m=+1438.487793554" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.802742 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b57cdffc5-qrq5m"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.833470 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c899bf5cf-7lhp5"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.833680 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" podNamespace="openstack" podName="dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.835033 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.845209 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c899bf5cf-7lhp5"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916048 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-7m2td\" (UniqueName: \"kubernetes.io/projected/ad03da73-fd35-414a-b640-6dc7bcc40c24-kube-api-access-7m2td\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916129 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916201 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-nb\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916238 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-svc\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916274 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-swift-storage-0\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916321 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-config\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916358 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-scripts\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916440 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zxnd\" (UniqueName: \"kubernetes.io/projected/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-kube-api-access-6zxnd\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916494 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916537 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-sb\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916586 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916636 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad03da73-fd35-414a-b640-6dc7bcc40c24-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.916800 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad03da73-fd35-414a-b640-6dc7bcc40c24-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.922070 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.922292 3552 topology_manager.go:215] "Topology Admit Handler" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" podNamespace="openstack" podName="cinder-api-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.924856 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.933364 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-scripts\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.941311 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.954912 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.962490 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.968802 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.975005 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:58 crc kubenswrapper[3552]: I0320 15:48:58.982288 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m2td\" (UniqueName: \"kubernetes.io/projected/ad03da73-fd35-414a-b640-6dc7bcc40c24-kube-api-access-7m2td\") pod \"cinder-scheduler-0\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " pod="openstack/cinder-scheduler-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020300 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-scripts\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020361 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020384 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dc4c2a50-03a3-449c-a797-9f582a546642-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-nb\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020452 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f87b8\" (UniqueName: \"kubernetes.io/projected/dc4c2a50-03a3-449c-a797-9f582a546642-kube-api-access-f87b8\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020474 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-svc\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020498 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-swift-storage-0\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020528 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-config\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020569 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6zxnd\" (UniqueName: \"kubernetes.io/projected/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-kube-api-access-6zxnd\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020609 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc4c2a50-03a3-449c-a797-9f582a546642-logs\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020637 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-sb\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020778 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.020826 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data-custom\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.021640 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-swift-storage-0\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.021693 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-nb\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.022163 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-config\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.022256 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-sb\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.022783 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-svc\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.057955 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zxnd\" (UniqueName: \"kubernetes.io/projected/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-kube-api-access-6zxnd\") pod \"dnsmasq-dns-c899bf5cf-7lhp5\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124038 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-scripts\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124091 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124116 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dc4c2a50-03a3-449c-a797-9f582a546642-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124144 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-f87b8\" (UniqueName: \"kubernetes.io/projected/dc4c2a50-03a3-449c-a797-9f582a546642-kube-api-access-f87b8\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124228 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc4c2a50-03a3-449c-a797-9f582a546642-logs\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124261 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.124280 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data-custom\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.125535 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dc4c2a50-03a3-449c-a797-9f582a546642-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.126610 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc4c2a50-03a3-449c-a797-9f582a546642-logs\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.135285 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.154086 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-scripts\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.162599 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-f87b8\" (UniqueName: \"kubernetes.io/projected/dc4c2a50-03a3-449c-a797-9f582a546642-kube-api-access-f87b8\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.167380 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data-custom\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.170447 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.172806 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.193572 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.257080 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.827242 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c899bf5cf-7lhp5"] Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.843180 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c5fc7598d-gk2l6" event={"ID":"5410b014-55f4-4359-98fd-a7e4bca67721","Type":"ContainerStarted","Data":"d64f4a066ac92303758bf271aa98a9070e0d16483a5623afeedc9ee6e018a265"} Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.874748 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-646bf87fb6-8jzf5" event={"ID":"20f75298-10c5-4400-870b-472a9f870f0b","Type":"ContainerStarted","Data":"383291e8f72b47ee8f91223e260e8b26fff6b0bacbf3ddb6b409283fce1ea761"} Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.879443 3552 generic.go:334] "Generic (PLEG): container finished" podID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerID="79e1658cd1b2219496a780218a3274b5a548e188b7dda7130a5ef80fc1d1edd0" exitCode=0 Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.879515 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerDied","Data":"79e1658cd1b2219496a780218a3274b5a548e188b7dda7130a5ef80fc1d1edd0"} Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.920688 3552 generic.go:334] "Generic (PLEG): container finished" podID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerID="6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa" exitCode=0 Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.920715 3552 generic.go:334] "Generic (PLEG): container finished" podID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerID="f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d" exitCode=2 Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.920727 3552 generic.go:334] "Generic (PLEG): container finished" podID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerID="48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728" exitCode=0 Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.920751 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerDied","Data":"6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa"} Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.920768 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerDied","Data":"f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d"} Mar 20 15:48:59 crc kubenswrapper[3552]: I0320 15:48:59.920777 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerDied","Data":"48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728"} Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.142956 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.154825 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.950274 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" event={"ID":"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d","Type":"ContainerStarted","Data":"9ed0651aa5ec84a51c8358987b69ff022603d11ee13c5ac268b658ff4ebfe7d7"} Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.958930 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c5fc7598d-gk2l6" event={"ID":"5410b014-55f4-4359-98fd-a7e4bca67721","Type":"ContainerStarted","Data":"21106799d80bfcafb3783bd9e364f8f5cdcceb8b75a2116b1d52cefe50cea46c"} Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.959654 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.959698 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.964425 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-646bf87fb6-8jzf5" event={"ID":"20f75298-10c5-4400-870b-472a9f870f0b","Type":"ContainerStarted","Data":"1a25d46c2c3847c500e9ffebda613dd5c4d882950b3daa4abbd3fb18d1e3ac5b"} Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.969701 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerStarted","Data":"fa7d1fdea4c505d6227d252bd086fcdefa3963ac1c01396e743a4ac49b423e7a"} Mar 20 15:49:00 crc kubenswrapper[3552]: I0320 15:49:00.993751 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/placement-6c5fc7598d-gk2l6" podStartSLOduration=8.993692184 podStartE2EDuration="8.993692184s" podCreationTimestamp="2026-03-20 15:48:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:00.988855664 +0000 UTC m=+1440.682552494" watchObservedRunningTime="2026-03-20 15:49:00.993692184 +0000 UTC m=+1440.687389014" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.005908 3552 generic.go:334] "Generic (PLEG): container finished" podID="a93a319c-d470-4c91-8fcc-288bb6b6119d" containerID="103cdea9d9e83bd68f03d0aabb975b5b0f8e1ab348e4ee41dea8b5028cbc85bb" exitCode=0 Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.006062 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" event={"ID":"a93a319c-d470-4c91-8fcc-288bb6b6119d","Type":"ContainerDied","Data":"103cdea9d9e83bd68f03d0aabb975b5b0f8e1ab348e4ee41dea8b5028cbc85bb"} Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.026701 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dc4c2a50-03a3-449c-a797-9f582a546642","Type":"ContainerStarted","Data":"470bc527668e30261663928b259f44b0d2dfe0e26e6889b891db1d37852ba3f5"} Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.054615 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad03da73-fd35-414a-b640-6dc7bcc40c24","Type":"ContainerStarted","Data":"78313e4ada6a532081093799dc0fdc861f376f47241485ba6e2988a647aeea35"} Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.058115 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/barbican-api-646bf87fb6-8jzf5" podStartSLOduration=5.058053203 podStartE2EDuration="5.058053203s" podCreationTimestamp="2026-03-20 15:48:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:01.007970225 +0000 UTC m=+1440.701667055" watchObservedRunningTime="2026-03-20 15:49:01.058053203 +0000 UTC m=+1440.751750043" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.313536 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.313854 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.313926 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.313969 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.314012 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.529917 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.648350 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-nb\") pod \"a93a319c-d470-4c91-8fcc-288bb6b6119d\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.648425 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-sb\") pod \"a93a319c-d470-4c91-8fcc-288bb6b6119d\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.648477 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-config\") pod \"a93a319c-d470-4c91-8fcc-288bb6b6119d\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.648545 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrnjw\" (UniqueName: \"kubernetes.io/projected/a93a319c-d470-4c91-8fcc-288bb6b6119d-kube-api-access-nrnjw\") pod \"a93a319c-d470-4c91-8fcc-288bb6b6119d\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.648575 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-swift-storage-0\") pod \"a93a319c-d470-4c91-8fcc-288bb6b6119d\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.648632 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-svc\") pod \"a93a319c-d470-4c91-8fcc-288bb6b6119d\" (UID: \"a93a319c-d470-4c91-8fcc-288bb6b6119d\") " Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.655530 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a93a319c-d470-4c91-8fcc-288bb6b6119d-kube-api-access-nrnjw" (OuterVolumeSpecName: "kube-api-access-nrnjw") pod "a93a319c-d470-4c91-8fcc-288bb6b6119d" (UID: "a93a319c-d470-4c91-8fcc-288bb6b6119d"). InnerVolumeSpecName "kube-api-access-nrnjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.676562 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a93a319c-d470-4c91-8fcc-288bb6b6119d" (UID: "a93a319c-d470-4c91-8fcc-288bb6b6119d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.676580 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a93a319c-d470-4c91-8fcc-288bb6b6119d" (UID: "a93a319c-d470-4c91-8fcc-288bb6b6119d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.678480 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a93a319c-d470-4c91-8fcc-288bb6b6119d" (UID: "a93a319c-d470-4c91-8fcc-288bb6b6119d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.704029 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a93a319c-d470-4c91-8fcc-288bb6b6119d" (UID: "a93a319c-d470-4c91-8fcc-288bb6b6119d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.705719 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-config" (OuterVolumeSpecName: "config") pod "a93a319c-d470-4c91-8fcc-288bb6b6119d" (UID: "a93a319c-d470-4c91-8fcc-288bb6b6119d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.754318 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.754349 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nrnjw\" (UniqueName: \"kubernetes.io/projected/a93a319c-d470-4c91-8fcc-288bb6b6119d-kube-api-access-nrnjw\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.754362 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.754372 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.754382 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:01 crc kubenswrapper[3552]: I0320 15:49:01.754391 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a93a319c-d470-4c91-8fcc-288bb6b6119d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.067383 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" event={"ID":"a93a319c-d470-4c91-8fcc-288bb6b6119d","Type":"ContainerDied","Data":"49c7b7b03c8530ab601837cf3bcae1977f9dd034fae58ff57e7ea82a701efae8"} Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.067449 3552 scope.go:117] "RemoveContainer" containerID="103cdea9d9e83bd68f03d0aabb975b5b0f8e1ab348e4ee41dea8b5028cbc85bb" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.067380 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b57cdffc5-qrq5m" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.070928 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dc4c2a50-03a3-449c-a797-9f582a546642","Type":"ContainerStarted","Data":"faa13e4909c7f27c43f26d7faf1ff882bc40db36a83156367863990e5c4ad153"} Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.076733 3552 generic.go:334] "Generic (PLEG): container finished" podID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerID="d446b4066c75e04d83452bc4f98c7effa25d72eaa75e07e919bec5d8c6ee7c94" exitCode=0 Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.076976 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" event={"ID":"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d","Type":"ContainerDied","Data":"d446b4066c75e04d83452bc4f98c7effa25d72eaa75e07e919bec5d8c6ee7c94"} Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.087010 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" event={"ID":"b5733b56-3f0c-4b1f-9075-22590d21d3b4","Type":"ContainerStarted","Data":"b51666f68a491387765d21d31da5fd01b9265037a8ce665958d4e09432a683b8"} Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.087058 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" event={"ID":"b5733b56-3f0c-4b1f-9075-22590d21d3b4","Type":"ContainerStarted","Data":"482c1936dd3512d6a60b5a9fbf4499f2145c56c87d9ccc8171aed43ad31ff22c"} Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.087707 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.087839 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.171031 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b57cdffc5-qrq5m"] Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.182473 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b57cdffc5-qrq5m"] Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.218550 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-69d7546cf6-hwprx" podStartSLOduration=3.832907409 podStartE2EDuration="6.218497068s" podCreationTimestamp="2026-03-20 15:48:56 +0000 UTC" firstStartedPulling="2026-03-20 15:48:58.064326812 +0000 UTC m=+1437.758023642" lastFinishedPulling="2026-03-20 15:49:00.449916471 +0000 UTC m=+1440.143613301" observedRunningTime="2026-03-20 15:49:02.203809845 +0000 UTC m=+1441.897506675" watchObservedRunningTime="2026-03-20 15:49:02.218497068 +0000 UTC m=+1441.912193898" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.562050 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.915689 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5675b7c4bb-854mw"] Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.916154 3552 topology_manager.go:215] "Topology Admit Handler" podUID="798beca9-b89f-42b2-9e24-cf98a854b880" podNamespace="openstack" podName="barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: E0320 15:49:02.916450 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a93a319c-d470-4c91-8fcc-288bb6b6119d" containerName="init" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.916461 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a93a319c-d470-4c91-8fcc-288bb6b6119d" containerName="init" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.916642 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a93a319c-d470-4c91-8fcc-288bb6b6119d" containerName="init" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.917777 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.919565 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.919854 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.932208 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5675b7c4bb-854mw"] Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987498 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-public-tls-certs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987590 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-config-data-custom\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987612 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-combined-ca-bundle\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987633 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwrld\" (UniqueName: \"kubernetes.io/projected/798beca9-b89f-42b2-9e24-cf98a854b880-kube-api-access-cwrld\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987665 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-config-data\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987705 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-internal-tls-certs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:02 crc kubenswrapper[3552]: I0320 15:49:02.987810 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/798beca9-b89f-42b2-9e24-cf98a854b880-logs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.064724 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089057 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-config-data-custom\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089107 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-combined-ca-bundle\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089133 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-cwrld\" (UniqueName: \"kubernetes.io/projected/798beca9-b89f-42b2-9e24-cf98a854b880-kube-api-access-cwrld\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089165 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-config-data\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089203 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-internal-tls-certs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/798beca9-b89f-42b2-9e24-cf98a854b880-logs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.089305 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-public-tls-certs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.090761 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/798beca9-b89f-42b2-9e24-cf98a854b880-logs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.098318 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-internal-tls-certs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.099044 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-config-data-custom\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.099913 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-combined-ca-bundle\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.106918 3552 generic.go:334] "Generic (PLEG): container finished" podID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerID="2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791" exitCode=0 Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.107605 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerDied","Data":"2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791"} Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.107656 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be47322e-8d8e-47c7-b0c6-c0de22806d81","Type":"ContainerDied","Data":"593cad958dbecccd3c4bf332b462932ff17406de70c6565c09110aa9482ebf0a"} Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.107682 3552 scope.go:117] "RemoveContainer" containerID="6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.107827 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.109029 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-public-tls-certs\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.109976 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/798beca9-b89f-42b2-9e24-cf98a854b880-config-data\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.115770 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwrld\" (UniqueName: \"kubernetes.io/projected/798beca9-b89f-42b2-9e24-cf98a854b880-kube-api-access-cwrld\") pod \"barbican-api-5675b7c4bb-854mw\" (UID: \"798beca9-b89f-42b2-9e24-cf98a854b880\") " pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.172612 3552 scope.go:117] "RemoveContainer" containerID="f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.189928 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-combined-ca-bundle\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.190028 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgzgd\" (UniqueName: \"kubernetes.io/projected/be47322e-8d8e-47c7-b0c6-c0de22806d81-kube-api-access-zgzgd\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.190140 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-scripts\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.190226 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-run-httpd\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.190249 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-log-httpd\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.190275 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-config-data\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.190322 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-sg-core-conf-yaml\") pod \"be47322e-8d8e-47c7-b0c6-c0de22806d81\" (UID: \"be47322e-8d8e-47c7-b0c6-c0de22806d81\") " Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.200474 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be47322e-8d8e-47c7-b0c6-c0de22806d81-kube-api-access-zgzgd" (OuterVolumeSpecName: "kube-api-access-zgzgd") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "kube-api-access-zgzgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.202606 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.203041 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.226660 3552 scope.go:117] "RemoveContainer" containerID="2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.226852 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-scripts" (OuterVolumeSpecName: "scripts") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.241478 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-8487bc87b9-4thvb" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.248692 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.280674 3552 scope.go:117] "RemoveContainer" containerID="48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.293318 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.293374 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.293384 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be47322e-8d8e-47c7-b0c6-c0de22806d81-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.293392 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.293474 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zgzgd\" (UniqueName: \"kubernetes.io/projected/be47322e-8d8e-47c7-b0c6-c0de22806d81-kube-api-access-zgzgd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.389582 3552 scope.go:117] "RemoveContainer" containerID="6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.390048 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.390268 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa\": container with ID starting with 6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa not found: ID does not exist" containerID="6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.390315 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa"} err="failed to get container status \"6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa\": rpc error: code = NotFound desc = could not find container \"6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa\": container with ID starting with 6e507b0452fc136ce1072b89534271c1177f5c9b2704b9d6427baf656d3a0caa not found: ID does not exist" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.390331 3552 scope.go:117] "RemoveContainer" containerID="f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.393302 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d\": container with ID starting with f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d not found: ID does not exist" containerID="f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.393356 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d"} err="failed to get container status \"f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d\": rpc error: code = NotFound desc = could not find container \"f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d\": container with ID starting with f2058014957f287c6dcdb2f37b66a56924fb85079c86cd4b937f1fc262ca271d not found: ID does not exist" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.393372 3552 scope.go:117] "RemoveContainer" containerID="2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.396425 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791\": container with ID starting with 2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791 not found: ID does not exist" containerID="2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.396619 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791"} err="failed to get container status \"2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791\": rpc error: code = NotFound desc = could not find container \"2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791\": container with ID starting with 2f7046f5d22ac0cb67fe1936d69d96d4c2dbfbbe941f68c7e7c6bf70765a5791 not found: ID does not exist" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.396702 3552 scope.go:117] "RemoveContainer" containerID="48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.399515 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728\": container with ID starting with 48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728 not found: ID does not exist" containerID="48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.399557 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728"} err="failed to get container status \"48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728\": rpc error: code = NotFound desc = could not find container \"48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728\": container with ID starting with 48d98707b5ae3569111bc8c74adf702b6600acd681729a49b5d89106352cd728 not found: ID does not exist" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.402572 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.417120 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-config-data" (OuterVolumeSpecName: "config-data") pod "be47322e-8d8e-47c7-b0c6-c0de22806d81" (UID: "be47322e-8d8e-47c7-b0c6-c0de22806d81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.458977 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a93a319c-d470-4c91-8fcc-288bb6b6119d" path="/var/lib/kubelet/pods/a93a319c-d470-4c91-8fcc-288bb6b6119d/volumes" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.507557 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.507618 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be47322e-8d8e-47c7-b0c6-c0de22806d81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.796824 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.866755 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.886474 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.886696 3552 topology_manager.go:215] "Topology Admit Handler" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.887043 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="sg-core" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887056 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="sg-core" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.887112 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-central-agent" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887124 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-central-agent" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.887149 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="proxy-httpd" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887157 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="proxy-httpd" Mar 20 15:49:03 crc kubenswrapper[3552]: E0320 15:49:03.887167 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-notification-agent" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887176 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-notification-agent" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887383 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-notification-agent" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887433 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="ceilometer-central-agent" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887449 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="sg-core" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.887472 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" containerName="proxy-httpd" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.891529 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.898833 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.905813 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:49:03 crc kubenswrapper[3552]: I0320 15:49:03.906021 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.045935 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.046324 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-run-httpd\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.046367 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-log-httpd\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.046427 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-config-data\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.046717 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc2g8\" (UniqueName: \"kubernetes.io/projected/eb9e0409-75f5-47bf-a85f-27c8ed63faec-kube-api-access-gc2g8\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.046811 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.046887 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-scripts\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.128794 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dc4c2a50-03a3-449c-a797-9f582a546642","Type":"ContainerStarted","Data":"8f5c9b67b5f5b8e7408be1dd5ce4379ec13dcd233c3c719e9c200b2a858b8642"} Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.128966 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api-log" containerID="cri-o://faa13e4909c7f27c43f26d7faf1ff882bc40db36a83156367863990e5c4ad153" gracePeriod=30 Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.129187 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.129483 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api" containerID="cri-o://8f5c9b67b5f5b8e7408be1dd5ce4379ec13dcd233c3c719e9c200b2a858b8642" gracePeriod=30 Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150602 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150648 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-scripts\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150713 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150749 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-run-httpd\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150779 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-log-httpd\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150810 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-config-data\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150859 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.15081806 podStartE2EDuration="6.15081806s" podCreationTimestamp="2026-03-20 15:48:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:04.150751128 +0000 UTC m=+1443.844447978" watchObservedRunningTime="2026-03-20 15:49:04.15081806 +0000 UTC m=+1443.844514890" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.150900 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gc2g8\" (UniqueName: \"kubernetes.io/projected/eb9e0409-75f5-47bf-a85f-27c8ed63faec-kube-api-access-gc2g8\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.151565 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-run-httpd\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.151573 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-log-httpd\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.155921 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" event={"ID":"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d","Type":"ContainerStarted","Data":"723b0950289893a7d1926deee7c09bc806fb812a7c061e0b51e3f7aa3bb0aa36"} Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.156126 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.159602 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.159708 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.160768 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-scripts\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.160927 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-config-data\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.162652 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6bf8c5549-pmzmz" event={"ID":"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775","Type":"ContainerStarted","Data":"dd468d4a1825eedff3454c762d76c9954f8e3fa8d590393431b4a7fba321c88d"} Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.173184 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc2g8\" (UniqueName: \"kubernetes.io/projected/eb9e0409-75f5-47bf-a85f-27c8ed63faec-kube-api-access-gc2g8\") pod \"ceilometer-0\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.188934 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" podStartSLOduration=6.188873896 podStartE2EDuration="6.188873896s" podCreationTimestamp="2026-03-20 15:48:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:04.184747216 +0000 UTC m=+1443.878444056" watchObservedRunningTime="2026-03-20 15:49:04.188873896 +0000 UTC m=+1443.882570726" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.219447 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5675b7c4bb-854mw"] Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.268785 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.801131 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.801659 3552 topology_manager.go:215] "Topology Admit Handler" podUID="1137d7d3-a9ff-4002-9b7f-174802428ba7" podNamespace="openstack" podName="openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.804277 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.808230 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-fb844" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.811796 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.811996 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.812842 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.867522 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1137d7d3-a9ff-4002-9b7f-174802428ba7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.867729 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1137d7d3-a9ff-4002-9b7f-174802428ba7-openstack-config\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.867785 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1137d7d3-a9ff-4002-9b7f-174802428ba7-openstack-config-secret\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.867807 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzwfr\" (UniqueName: \"kubernetes.io/projected/1137d7d3-a9ff-4002-9b7f-174802428ba7-kube-api-access-pzwfr\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.940222 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.976112 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1137d7d3-a9ff-4002-9b7f-174802428ba7-openstack-config-secret\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.976329 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pzwfr\" (UniqueName: \"kubernetes.io/projected/1137d7d3-a9ff-4002-9b7f-174802428ba7-kube-api-access-pzwfr\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.976670 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1137d7d3-a9ff-4002-9b7f-174802428ba7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.977072 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1137d7d3-a9ff-4002-9b7f-174802428ba7-openstack-config\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.981637 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1137d7d3-a9ff-4002-9b7f-174802428ba7-openstack-config\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:04 crc kubenswrapper[3552]: I0320 15:49:04.987260 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1137d7d3-a9ff-4002-9b7f-174802428ba7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:04.995906 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzwfr\" (UniqueName: \"kubernetes.io/projected/1137d7d3-a9ff-4002-9b7f-174802428ba7-kube-api-access-pzwfr\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:04.996270 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1137d7d3-a9ff-4002-9b7f-174802428ba7-openstack-config-secret\") pod \"openstackclient\" (UID: \"1137d7d3-a9ff-4002-9b7f-174802428ba7\") " pod="openstack/openstackclient" Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.143190 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.193015 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5675b7c4bb-854mw" event={"ID":"798beca9-b89f-42b2-9e24-cf98a854b880","Type":"ContainerStarted","Data":"f7351ccf22bf7e548fe9d79ef58b40c25e2e64722e572ddacc9ebe4e987b6de3"} Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.193050 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5675b7c4bb-854mw" event={"ID":"798beca9-b89f-42b2-9e24-cf98a854b880","Type":"ContainerStarted","Data":"b490090fcc64f2cfefdf0476b55477039fc921d5c93f6f0819e7eb511b91e486"} Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.195172 3552 generic.go:334] "Generic (PLEG): container finished" podID="dc4c2a50-03a3-449c-a797-9f582a546642" containerID="faa13e4909c7f27c43f26d7faf1ff882bc40db36a83156367863990e5c4ad153" exitCode=143 Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.195220 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dc4c2a50-03a3-449c-a797-9f582a546642","Type":"ContainerDied","Data":"faa13e4909c7f27c43f26d7faf1ff882bc40db36a83156367863990e5c4ad153"} Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.197338 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad03da73-fd35-414a-b640-6dc7bcc40c24","Type":"ContainerStarted","Data":"bd435adf5f23060bbe321642c8084bd50728647eb3c84680d2daeceb0f2ac56d"} Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.212865 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerStarted","Data":"13d60e5202a12cd945a4c9f5f0a11327ad51a4eb1e264c22e9ee84c845d0e38b"} Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.217710 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6bf8c5549-pmzmz" event={"ID":"6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775","Type":"ContainerStarted","Data":"2439cd43b97175897594246619cca24f2f4e001887964935db7ac1481f46eaf9"} Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.270749 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/barbican-worker-6bf8c5549-pmzmz" podStartSLOduration=4.92990575 podStartE2EDuration="9.2706989s" podCreationTimestamp="2026-03-20 15:48:56 +0000 UTC" firstStartedPulling="2026-03-20 15:48:58.471818186 +0000 UTC m=+1438.165515016" lastFinishedPulling="2026-03-20 15:49:02.812611336 +0000 UTC m=+1442.506308166" observedRunningTime="2026-03-20 15:49:05.265157142 +0000 UTC m=+1444.958853972" watchObservedRunningTime="2026-03-20 15:49:05.2706989 +0000 UTC m=+1444.964395730" Mar 20 15:49:05 crc kubenswrapper[3552]: I0320 15:49:05.480019 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be47322e-8d8e-47c7-b0c6-c0de22806d81" path="/var/lib/kubelet/pods/be47322e-8d8e-47c7-b0c6-c0de22806d81/volumes" Mar 20 15:49:06 crc kubenswrapper[3552]: I0320 15:49:06.122661 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Mar 20 15:49:06 crc kubenswrapper[3552]: W0320 15:49:06.180815 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1137d7d3_a9ff_4002_9b7f_174802428ba7.slice/crio-58e7b34dbd57a4ae1bd9cb7cf5c21bd3a4a5d10e6993d97633937c80dd511c0f WatchSource:0}: Error finding container 58e7b34dbd57a4ae1bd9cb7cf5c21bd3a4a5d10e6993d97633937c80dd511c0f: Status 404 returned error can't find the container with id 58e7b34dbd57a4ae1bd9cb7cf5c21bd3a4a5d10e6993d97633937c80dd511c0f Mar 20 15:49:06 crc kubenswrapper[3552]: I0320 15:49:06.241907 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"1137d7d3-a9ff-4002-9b7f-174802428ba7","Type":"ContainerStarted","Data":"58e7b34dbd57a4ae1bd9cb7cf5c21bd3a4a5d10e6993d97633937c80dd511c0f"} Mar 20 15:49:06 crc kubenswrapper[3552]: I0320 15:49:06.249582 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5675b7c4bb-854mw" event={"ID":"798beca9-b89f-42b2-9e24-cf98a854b880","Type":"ContainerStarted","Data":"d811bec71c434a5d8273a6ca5df7f22016f00f6ddd43a890bb222f79052904ee"} Mar 20 15:49:06 crc kubenswrapper[3552]: I0320 15:49:06.249642 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:06 crc kubenswrapper[3552]: I0320 15:49:06.249692 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:06 crc kubenswrapper[3552]: I0320 15:49:06.273780 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/barbican-api-5675b7c4bb-854mw" podStartSLOduration=4.273740811 podStartE2EDuration="4.273740811s" podCreationTimestamp="2026-03-20 15:49:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:06.272739784 +0000 UTC m=+1445.966436624" watchObservedRunningTime="2026-03-20 15:49:06.273740811 +0000 UTC m=+1445.967437641" Mar 20 15:49:07 crc kubenswrapper[3552]: I0320 15:49:07.276194 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad03da73-fd35-414a-b640-6dc7bcc40c24","Type":"ContainerStarted","Data":"e572c97a1f71b5e4c83ab669854c42f4c6ac81c46f5362714ad96eee8820b0a2"} Mar 20 15:49:07 crc kubenswrapper[3552]: I0320 15:49:07.283534 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerStarted","Data":"fec1b8e4712d24f429fa20c1b0ca8684d317f2d6ca8e30590c3f2c10f989a2db"} Mar 20 15:49:07 crc kubenswrapper[3552]: I0320 15:49:07.703893 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:49:08 crc kubenswrapper[3552]: I0320 15:49:08.315518 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerStarted","Data":"d26206b01dd63cec457fe26724ea78e7afed566a9ce8227dcc7b14f3b347c610"} Mar 20 15:49:08 crc kubenswrapper[3552]: I0320 15:49:08.317134 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerStarted","Data":"8379b831372e299be8e32262ac2e629065764cd1411971206a4142af3fbfab45"} Mar 20 15:49:08 crc kubenswrapper[3552]: I0320 15:49:08.340638 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.787755731 podStartE2EDuration="10.340594395s" podCreationTimestamp="2026-03-20 15:48:58 +0000 UTC" firstStartedPulling="2026-03-20 15:49:00.275580714 +0000 UTC m=+1439.969277544" lastFinishedPulling="2026-03-20 15:49:03.828419378 +0000 UTC m=+1443.522116208" observedRunningTime="2026-03-20 15:49:08.333031063 +0000 UTC m=+1448.026727893" watchObservedRunningTime="2026-03-20 15:49:08.340594395 +0000 UTC m=+1448.034291225" Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.172607 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.262026 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.279593 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fb5d4fb47-48ntc"] Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.279793 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" containerName="dnsmasq-dns" containerID="cri-o://b826c65c836ae95217d65ecde58da66d567aaa03ff58b9fb38948adbbc1a6f3d" gracePeriod=10 Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.345510 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerStarted","Data":"32dcc5ca5dc0d673a0df3d56ea3d97d04e7b45cd0f5fb38962c6086820553fc9"} Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.345596 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:49:09 crc kubenswrapper[3552]: I0320 15:49:09.391266 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7616499230000002 podStartE2EDuration="6.381120567s" podCreationTimestamp="2026-03-20 15:49:03 +0000 UTC" firstStartedPulling="2026-03-20 15:49:04.965339074 +0000 UTC m=+1444.659035904" lastFinishedPulling="2026-03-20 15:49:08.584809718 +0000 UTC m=+1448.278506548" observedRunningTime="2026-03-20 15:49:09.378544128 +0000 UTC m=+1449.072240968" watchObservedRunningTime="2026-03-20 15:49:09.381120567 +0000 UTC m=+1449.074817397" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.364338 3552 generic.go:334] "Generic (PLEG): container finished" podID="8a910859-d35b-4395-a991-3d073b07f9e2" containerID="b826c65c836ae95217d65ecde58da66d567aaa03ff58b9fb38948adbbc1a6f3d" exitCode=0 Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.365388 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" event={"ID":"8a910859-d35b-4395-a991-3d073b07f9e2","Type":"ContainerDied","Data":"b826c65c836ae95217d65ecde58da66d567aaa03ff58b9fb38948adbbc1a6f3d"} Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.580578 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.759658 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.768755 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-sb\") pod \"8a910859-d35b-4395-a991-3d073b07f9e2\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.768857 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-swift-storage-0\") pod \"8a910859-d35b-4395-a991-3d073b07f9e2\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.768937 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-config\") pod \"8a910859-d35b-4395-a991-3d073b07f9e2\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.769031 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-nb\") pod \"8a910859-d35b-4395-a991-3d073b07f9e2\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.769584 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgrfh\" (UniqueName: \"kubernetes.io/projected/8a910859-d35b-4395-a991-3d073b07f9e2-kube-api-access-dgrfh\") pod \"8a910859-d35b-4395-a991-3d073b07f9e2\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.769648 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-svc\") pod \"8a910859-d35b-4395-a991-3d073b07f9e2\" (UID: \"8a910859-d35b-4395-a991-3d073b07f9e2\") " Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.780445 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a910859-d35b-4395-a991-3d073b07f9e2-kube-api-access-dgrfh" (OuterVolumeSpecName: "kube-api-access-dgrfh") pod "8a910859-d35b-4395-a991-3d073b07f9e2" (UID: "8a910859-d35b-4395-a991-3d073b07f9e2"). InnerVolumeSpecName "kube-api-access-dgrfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.848170 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8a910859-d35b-4395-a991-3d073b07f9e2" (UID: "8a910859-d35b-4395-a991-3d073b07f9e2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.872060 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.872094 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-dgrfh\" (UniqueName: \"kubernetes.io/projected/8a910859-d35b-4395-a991-3d073b07f9e2-kube-api-access-dgrfh\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.915086 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8a910859-d35b-4395-a991-3d073b07f9e2" (UID: "8a910859-d35b-4395-a991-3d073b07f9e2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.918608 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8a910859-d35b-4395-a991-3d073b07f9e2" (UID: "8a910859-d35b-4395-a991-3d073b07f9e2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.953670 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-config" (OuterVolumeSpecName: "config") pod "8a910859-d35b-4395-a991-3d073b07f9e2" (UID: "8a910859-d35b-4395-a991-3d073b07f9e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.974763 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.974791 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:10 crc kubenswrapper[3552]: I0320 15:49:10.974801 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.002007 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8a910859-d35b-4395-a991-3d073b07f9e2" (UID: "8a910859-d35b-4395-a991-3d073b07f9e2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.082109 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a910859-d35b-4395-a991-3d073b07f9e2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.380926 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" event={"ID":"8a910859-d35b-4395-a991-3d073b07f9e2","Type":"ContainerDied","Data":"17ebec0f37f88894edad786985ff181a2016327aae9cb48ecd218b6d1aea2d3a"} Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.380933 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fb5d4fb47-48ntc" Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.380976 3552 scope.go:117] "RemoveContainer" containerID="b826c65c836ae95217d65ecde58da66d567aaa03ff58b9fb38948adbbc1a6f3d" Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.465841 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fb5d4fb47-48ntc"] Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.470561 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fb5d4fb47-48ntc"] Mar 20 15:49:11 crc kubenswrapper[3552]: I0320 15:49:11.482806 3552 scope.go:117] "RemoveContainer" containerID="c73775c4577ae22dc883186ebe48adc86671ffcc43571f66d3724b668f1584dc" Mar 20 15:49:12 crc kubenswrapper[3552]: I0320 15:49:12.070297 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Mar 20 15:49:12 crc kubenswrapper[3552]: I0320 15:49:12.779146 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:49:12 crc kubenswrapper[3552]: I0320 15:49:12.779470 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.453191 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" path="/var/lib/kubelet/pods/8a910859-d35b-4395-a991-3d073b07f9e2/volumes" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.467475 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-66bd8f794-2dbwx"] Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.469480 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/neutron-66bd8f794-2dbwx" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-api" containerID="cri-o://87ed38b7065d811248c493011799a72fd06e1ec3280d05d12fd19718f0afe37d" gracePeriod=30 Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.470132 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/neutron-66bd8f794-2dbwx" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" containerID="cri-o://b756336336337216255047984af6d67759643f9b8364310287b066f2d973badc" gracePeriod=30 Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.509894 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.521663 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-7558f7559f-rntkm"] Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.528831 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b9a18de2-c36e-4cfc-af47-d5143257da26" podNamespace="openstack" podName="neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: E0320 15:49:13.530835 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" containerName="dnsmasq-dns" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.540880 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" containerName="dnsmasq-dns" Mar 20 15:49:13 crc kubenswrapper[3552]: E0320 15:49:13.541148 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" containerName="init" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.541214 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" containerName="init" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.541634 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a910859-d35b-4395-a991-3d073b07f9e2" containerName="dnsmasq-dns" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.544562 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.589532 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7558f7559f-rntkm"] Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643231 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-combined-ca-bundle\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643302 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-httpd-config\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643330 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-public-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643373 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-ovndb-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643580 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-internal-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643713 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-config\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.643781 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92v82\" (UniqueName: \"kubernetes.io/projected/b9a18de2-c36e-4cfc-af47-d5143257da26-kube-api-access-92v82\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.746385 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-combined-ca-bundle\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.746917 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-httpd-config\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.746947 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-public-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.746971 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-ovndb-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.746994 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-internal-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.747020 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-config\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.747057 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-92v82\" (UniqueName: \"kubernetes.io/projected/b9a18de2-c36e-4cfc-af47-d5143257da26-kube-api-access-92v82\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.755331 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-config\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.759248 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-ovndb-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.760782 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-combined-ca-bundle\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.761192 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-httpd-config\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.763150 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-public-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.770072 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a18de2-c36e-4cfc-af47-d5143257da26-internal-tls-certs\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.777085 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-92v82\" (UniqueName: \"kubernetes.io/projected/b9a18de2-c36e-4cfc-af47-d5143257da26-kube-api-access-92v82\") pod \"neutron-7558f7559f-rntkm\" (UID: \"b9a18de2-c36e-4cfc-af47-d5143257da26\") " pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:13 crc kubenswrapper[3552]: I0320 15:49:13.884917 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:14 crc kubenswrapper[3552]: I0320 15:49:14.435675 3552 generic.go:334] "Generic (PLEG): container finished" podID="14da306a-fa6c-460e-af02-1180237b4366" containerID="b756336336337216255047984af6d67759643f9b8364310287b066f2d973badc" exitCode=0 Mar 20 15:49:14 crc kubenswrapper[3552]: I0320 15:49:14.436008 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66bd8f794-2dbwx" event={"ID":"14da306a-fa6c-460e-af02-1180237b4366","Type":"ContainerDied","Data":"b756336336337216255047984af6d67759643f9b8364310287b066f2d973badc"} Mar 20 15:49:14 crc kubenswrapper[3552]: I0320 15:49:14.722631 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Mar 20 15:49:14 crc kubenswrapper[3552]: I0320 15:49:14.759183 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:15 crc kubenswrapper[3552]: I0320 15:49:15.448472 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="cinder-scheduler" containerID="cri-o://bd435adf5f23060bbe321642c8084bd50728647eb3c84680d2daeceb0f2ac56d" gracePeriod=30 Mar 20 15:49:15 crc kubenswrapper[3552]: I0320 15:49:15.449199 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="probe" containerID="cri-o://e572c97a1f71b5e4c83ab669854c42f4c6ac81c46f5362714ad96eee8820b0a2" gracePeriod=30 Mar 20 15:49:15 crc kubenswrapper[3552]: I0320 15:49:15.984794 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.256527 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5675b7c4bb-854mw" Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.351032 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-646bf87fb6-8jzf5"] Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.351282 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/barbican-api-646bf87fb6-8jzf5" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api-log" containerID="cri-o://383291e8f72b47ee8f91223e260e8b26fff6b0bacbf3ddb6b409283fce1ea761" gracePeriod=30 Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.351802 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/barbican-api-646bf87fb6-8jzf5" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api" containerID="cri-o://1a25d46c2c3847c500e9ffebda613dd5c4d882950b3daa4abbd3fb18d1e3ac5b" gracePeriod=30 Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.881419 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.881924 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-central-agent" containerID="cri-o://fec1b8e4712d24f429fa20c1b0ca8684d317f2d6ca8e30590c3f2c10f989a2db" gracePeriod=30 Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.882003 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-notification-agent" containerID="cri-o://8379b831372e299be8e32262ac2e629065764cd1411971206a4142af3fbfab45" gracePeriod=30 Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.882014 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="proxy-httpd" containerID="cri-o://32dcc5ca5dc0d673a0df3d56ea3d97d04e7b45cd0f5fb38962c6086820553fc9" gracePeriod=30 Mar 20 15:49:16 crc kubenswrapper[3552]: I0320 15:49:16.882003 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="sg-core" containerID="cri-o://d26206b01dd63cec457fe26724ea78e7afed566a9ce8227dcc7b14f3b347c610" gracePeriod=30 Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.491628 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-746656bf7-ktbs6"] Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.492062 3552 topology_manager.go:215] "Topology Admit Handler" podUID="444a7139-f192-4a10-8047-c84d83d05dab" podNamespace="openstack" podName="swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.493545 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.502278 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.502684 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.502852 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.535461 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-746656bf7-ktbs6"] Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.548477 3552 generic.go:334] "Generic (PLEG): container finished" podID="20f75298-10c5-4400-870b-472a9f870f0b" containerID="383291e8f72b47ee8f91223e260e8b26fff6b0bacbf3ddb6b409283fce1ea761" exitCode=143 Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.548612 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-646bf87fb6-8jzf5" event={"ID":"20f75298-10c5-4400-870b-472a9f870f0b","Type":"ContainerDied","Data":"383291e8f72b47ee8f91223e260e8b26fff6b0bacbf3ddb6b409283fce1ea761"} Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.553917 3552 generic.go:334] "Generic (PLEG): container finished" podID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerID="e572c97a1f71b5e4c83ab669854c42f4c6ac81c46f5362714ad96eee8820b0a2" exitCode=0 Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.554013 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad03da73-fd35-414a-b640-6dc7bcc40c24","Type":"ContainerDied","Data":"e572c97a1f71b5e4c83ab669854c42f4c6ac81c46f5362714ad96eee8820b0a2"} Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.581195 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerID="32dcc5ca5dc0d673a0df3d56ea3d97d04e7b45cd0f5fb38962c6086820553fc9" exitCode=0 Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.581221 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerID="d26206b01dd63cec457fe26724ea78e7afed566a9ce8227dcc7b14f3b347c610" exitCode=2 Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.581245 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerDied","Data":"32dcc5ca5dc0d673a0df3d56ea3d97d04e7b45cd0f5fb38962c6086820553fc9"} Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.581267 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerDied","Data":"d26206b01dd63cec457fe26724ea78e7afed566a9ce8227dcc7b14f3b347c610"} Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.641947 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-public-tls-certs\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.642258 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-combined-ca-bundle\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.642385 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/444a7139-f192-4a10-8047-c84d83d05dab-run-httpd\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.642614 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-config-data\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.642788 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/444a7139-f192-4a10-8047-c84d83d05dab-log-httpd\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.642898 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-internal-tls-certs\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.644084 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6smq4\" (UniqueName: \"kubernetes.io/projected/444a7139-f192-4a10-8047-c84d83d05dab-kube-api-access-6smq4\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.644148 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/444a7139-f192-4a10-8047-c84d83d05dab-etc-swift\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: E0320 15:49:17.703961 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb9e0409_75f5_47bf_a85f_27c8ed63faec.slice/crio-fec1b8e4712d24f429fa20c1b0ca8684d317f2d6ca8e30590c3f2c10f989a2db.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb9e0409_75f5_47bf_a85f_27c8ed63faec.slice/crio-8379b831372e299be8e32262ac2e629065764cd1411971206a4142af3fbfab45.scope\": RecentStats: unable to find data in memory cache]" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748548 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-6smq4\" (UniqueName: \"kubernetes.io/projected/444a7139-f192-4a10-8047-c84d83d05dab-kube-api-access-6smq4\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748609 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/444a7139-f192-4a10-8047-c84d83d05dab-etc-swift\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748672 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-combined-ca-bundle\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748696 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-public-tls-certs\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748728 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/444a7139-f192-4a10-8047-c84d83d05dab-run-httpd\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748749 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-config-data\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748794 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/444a7139-f192-4a10-8047-c84d83d05dab-log-httpd\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.748815 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-internal-tls-certs\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.752221 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/444a7139-f192-4a10-8047-c84d83d05dab-log-httpd\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.752807 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/444a7139-f192-4a10-8047-c84d83d05dab-run-httpd\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.756750 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-internal-tls-certs\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.758364 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/444a7139-f192-4a10-8047-c84d83d05dab-etc-swift\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.759369 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-public-tls-certs\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.760127 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-config-data\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.762979 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a7139-f192-4a10-8047-c84d83d05dab-combined-ca-bundle\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.777183 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-6smq4\" (UniqueName: \"kubernetes.io/projected/444a7139-f192-4a10-8047-c84d83d05dab-kube-api-access-6smq4\") pod \"swift-proxy-746656bf7-ktbs6\" (UID: \"444a7139-f192-4a10-8047-c84d83d05dab\") " pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:17 crc kubenswrapper[3552]: I0320 15:49:17.887310 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:18 crc kubenswrapper[3552]: I0320 15:49:18.596205 3552 generic.go:334] "Generic (PLEG): container finished" podID="14da306a-fa6c-460e-af02-1180237b4366" containerID="87ed38b7065d811248c493011799a72fd06e1ec3280d05d12fd19718f0afe37d" exitCode=0 Mar 20 15:49:18 crc kubenswrapper[3552]: I0320 15:49:18.596544 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66bd8f794-2dbwx" event={"ID":"14da306a-fa6c-460e-af02-1180237b4366","Type":"ContainerDied","Data":"87ed38b7065d811248c493011799a72fd06e1ec3280d05d12fd19718f0afe37d"} Mar 20 15:49:18 crc kubenswrapper[3552]: I0320 15:49:18.599435 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerID="8379b831372e299be8e32262ac2e629065764cd1411971206a4142af3fbfab45" exitCode=0 Mar 20 15:49:18 crc kubenswrapper[3552]: I0320 15:49:18.599464 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerID="fec1b8e4712d24f429fa20c1b0ca8684d317f2d6ca8e30590c3f2c10f989a2db" exitCode=0 Mar 20 15:49:18 crc kubenswrapper[3552]: I0320 15:49:18.599482 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerDied","Data":"8379b831372e299be8e32262ac2e629065764cd1411971206a4142af3fbfab45"} Mar 20 15:49:18 crc kubenswrapper[3552]: I0320 15:49:18.599500 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerDied","Data":"fec1b8e4712d24f429fa20c1b0ca8684d317f2d6ca8e30590c3f2c10f989a2db"} Mar 20 15:49:19 crc kubenswrapper[3552]: I0320 15:49:19.851460 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-646bf87fb6-8jzf5" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": read tcp 10.217.0.2:41578->10.217.0.178:9311: read: connection reset by peer" Mar 20 15:49:19 crc kubenswrapper[3552]: I0320 15:49:19.851504 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-646bf87fb6-8jzf5" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": read tcp 10.217.0.2:41576->10.217.0.178:9311: read: connection reset by peer" Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.034311 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.034891 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" containerName="watcher-decision-engine" containerID="cri-o://d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36" gracePeriod=30 Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.616795 3552 generic.go:334] "Generic (PLEG): container finished" podID="20f75298-10c5-4400-870b-472a9f870f0b" containerID="1a25d46c2c3847c500e9ffebda613dd5c4d882950b3daa4abbd3fb18d1e3ac5b" exitCode=0 Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.616980 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-646bf87fb6-8jzf5" event={"ID":"20f75298-10c5-4400-870b-472a9f870f0b","Type":"ContainerDied","Data":"1a25d46c2c3847c500e9ffebda613dd5c4d882950b3daa4abbd3fb18d1e3ac5b"} Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.620325 3552 generic.go:334] "Generic (PLEG): container finished" podID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerID="bd435adf5f23060bbe321642c8084bd50728647eb3c84680d2daeceb0f2ac56d" exitCode=0 Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.620350 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad03da73-fd35-414a-b640-6dc7bcc40c24","Type":"ContainerDied","Data":"bd435adf5f23060bbe321642c8084bd50728647eb3c84680d2daeceb0f2ac56d"} Mar 20 15:49:20 crc kubenswrapper[3552]: I0320 15:49:20.659671 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-66bd8f794-2dbwx" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" probeResult="failure" output="Get \"http://10.217.0.168:9696/\": dial tcp 10.217.0.168:9696: connect: connection refused" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.363033 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:49:21 crc kubenswrapper[3552]: E0320 15:49:21.383672 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Mar 20 15:49:21 crc kubenswrapper[3552]: E0320 15:49:21.406700 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Mar 20 15:49:21 crc kubenswrapper[3552]: E0320 15:49:21.413552 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Mar 20 15:49:21 crc kubenswrapper[3552]: E0320 15:49:21.413606 3552 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-decision-engine-0" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" containerName="watcher-decision-engine" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.423119 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcltb\" (UniqueName: \"kubernetes.io/projected/20f75298-10c5-4400-870b-472a9f870f0b-kube-api-access-wcltb\") pod \"20f75298-10c5-4400-870b-472a9f870f0b\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.423179 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data\") pod \"20f75298-10c5-4400-870b-472a9f870f0b\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.423425 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data-custom\") pod \"20f75298-10c5-4400-870b-472a9f870f0b\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.423458 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f75298-10c5-4400-870b-472a9f870f0b-logs\") pod \"20f75298-10c5-4400-870b-472a9f870f0b\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.423480 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-combined-ca-bundle\") pod \"20f75298-10c5-4400-870b-472a9f870f0b\" (UID: \"20f75298-10c5-4400-870b-472a9f870f0b\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.429450 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20f75298-10c5-4400-870b-472a9f870f0b-logs" (OuterVolumeSpecName: "logs") pod "20f75298-10c5-4400-870b-472a9f870f0b" (UID: "20f75298-10c5-4400-870b-472a9f870f0b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.434258 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "20f75298-10c5-4400-870b-472a9f870f0b" (UID: "20f75298-10c5-4400-870b-472a9f870f0b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.446340 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20f75298-10c5-4400-870b-472a9f870f0b-kube-api-access-wcltb" (OuterVolumeSpecName: "kube-api-access-wcltb") pod "20f75298-10c5-4400-870b-472a9f870f0b" (UID: "20f75298-10c5-4400-870b-472a9f870f0b"). InnerVolumeSpecName "kube-api-access-wcltb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.531326 3552 reconciler_common.go:300] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data-custom\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.531359 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f75298-10c5-4400-870b-472a9f870f0b-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.531372 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wcltb\" (UniqueName: \"kubernetes.io/projected/20f75298-10c5-4400-870b-472a9f870f0b-kube-api-access-wcltb\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.555047 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data" (OuterVolumeSpecName: "config-data") pod "20f75298-10c5-4400-870b-472a9f870f0b" (UID: "20f75298-10c5-4400-870b-472a9f870f0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.558281 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20f75298-10c5-4400-870b-472a9f870f0b" (UID: "20f75298-10c5-4400-870b-472a9f870f0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.632914 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.632948 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20f75298-10c5-4400-870b-472a9f870f0b-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.635229 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.676819 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-646bf87fb6-8jzf5" event={"ID":"20f75298-10c5-4400-870b-472a9f870f0b","Type":"ContainerDied","Data":"aeea39515df71ede20717ff24d3b369b8c2cb67df9909630a08148a07cabdc9a"} Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.676871 3552 scope.go:117] "RemoveContainer" containerID="1a25d46c2c3847c500e9ffebda613dd5c4d882950b3daa4abbd3fb18d1e3ac5b" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.677095 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-646bf87fb6-8jzf5" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.708674 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.730189 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.730203 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad03da73-fd35-414a-b640-6dc7bcc40c24","Type":"ContainerDied","Data":"78313e4ada6a532081093799dc0fdc861f376f47241485ba6e2988a647aeea35"} Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.744243 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.765855 3552 generic.go:334] "Generic (PLEG): container finished" podID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerID="3f4231a6bdea50fa6b04a73dff18ca3f8de0f2b3d05f0819f7ee3850852822ca" exitCode=137 Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.765940 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f9cbcd486-gj8tz" event={"ID":"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a","Type":"ContainerDied","Data":"3f4231a6bdea50fa6b04a73dff18ca3f8de0f2b3d05f0819f7ee3850852822ca"} Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.787229 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb9e0409-75f5-47bf-a85f-27c8ed63faec","Type":"ContainerDied","Data":"13d60e5202a12cd945a4c9f5f0a11327ad51a4eb1e264c22e9ee84c845d0e38b"} Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.787341 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.806461 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-646bf87fb6-8jzf5"] Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.819582 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-646bf87fb6-8jzf5"] Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837167 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-combined-ca-bundle\") pod \"ad03da73-fd35-414a-b640-6dc7bcc40c24\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837245 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc2g8\" (UniqueName: \"kubernetes.io/projected/eb9e0409-75f5-47bf-a85f-27c8ed63faec-kube-api-access-gc2g8\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837281 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-scripts\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837324 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-config-data\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837351 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-scripts\") pod \"ad03da73-fd35-414a-b640-6dc7bcc40c24\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837419 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-log-httpd\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837454 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m2td\" (UniqueName: \"kubernetes.io/projected/ad03da73-fd35-414a-b640-6dc7bcc40c24-kube-api-access-7m2td\") pod \"ad03da73-fd35-414a-b640-6dc7bcc40c24\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837558 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-sg-core-conf-yaml\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837604 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-combined-ca-bundle\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837634 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data-custom\") pod \"ad03da73-fd35-414a-b640-6dc7bcc40c24\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837685 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-run-httpd\") pod \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\" (UID: \"eb9e0409-75f5-47bf-a85f-27c8ed63faec\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837747 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data\") pod \"ad03da73-fd35-414a-b640-6dc7bcc40c24\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.837798 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad03da73-fd35-414a-b640-6dc7bcc40c24-etc-machine-id\") pod \"ad03da73-fd35-414a-b640-6dc7bcc40c24\" (UID: \"ad03da73-fd35-414a-b640-6dc7bcc40c24\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.838310 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ad03da73-fd35-414a-b640-6dc7bcc40c24-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ad03da73-fd35-414a-b640-6dc7bcc40c24" (UID: "ad03da73-fd35-414a-b640-6dc7bcc40c24"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.838568 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.839116 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.854510 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ad03da73-fd35-414a-b640-6dc7bcc40c24" (UID: "ad03da73-fd35-414a-b640-6dc7bcc40c24"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.854633 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb9e0409-75f5-47bf-a85f-27c8ed63faec-kube-api-access-gc2g8" (OuterVolumeSpecName: "kube-api-access-gc2g8") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "kube-api-access-gc2g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.854698 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-scripts" (OuterVolumeSpecName: "scripts") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.855044 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-scripts" (OuterVolumeSpecName: "scripts") pod "ad03da73-fd35-414a-b640-6dc7bcc40c24" (UID: "ad03da73-fd35-414a-b640-6dc7bcc40c24"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.875859 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad03da73-fd35-414a-b640-6dc7bcc40c24-kube-api-access-7m2td" (OuterVolumeSpecName: "kube-api-access-7m2td") pod "ad03da73-fd35-414a-b640-6dc7bcc40c24" (UID: "ad03da73-fd35-414a-b640-6dc7bcc40c24"). InnerVolumeSpecName "kube-api-access-7m2td". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.916489 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.929121 3552 scope.go:117] "RemoveContainer" containerID="383291e8f72b47ee8f91223e260e8b26fff6b0bacbf3ddb6b409283fce1ea761" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.939607 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-secret-key\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.939697 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-scripts\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.939780 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-combined-ca-bundle\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.939836 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nq6rj\" (UniqueName: \"kubernetes.io/projected/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-kube-api-access-nq6rj\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.939887 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-logs\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.939953 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-tls-certs\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.940037 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-config-data\") pod \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\" (UID: \"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a\") " Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943029 3552 reconciler_common.go:300] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data-custom\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943060 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943071 3552 reconciler_common.go:300] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad03da73-fd35-414a-b640-6dc7bcc40c24-etc-machine-id\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943082 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-gc2g8\" (UniqueName: \"kubernetes.io/projected/eb9e0409-75f5-47bf-a85f-27c8ed63faec-kube-api-access-gc2g8\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943092 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943271 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943280 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb9e0409-75f5-47bf-a85f-27c8ed63faec-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943290 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-7m2td\" (UniqueName: \"kubernetes.io/projected/ad03da73-fd35-414a-b640-6dc7bcc40c24-kube-api-access-7m2td\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.943301 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.950215 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-logs" (OuterVolumeSpecName: "logs") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.958682 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-kube-api-access-nq6rj" (OuterVolumeSpecName: "kube-api-access-nq6rj") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "kube-api-access-nq6rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.960887 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.967814 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:49:21 crc kubenswrapper[3552]: I0320 15:49:21.997049 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-config-data" (OuterVolumeSpecName: "config-data") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.001468 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad03da73-fd35-414a-b640-6dc7bcc40c24" (UID: "ad03da73-fd35-414a-b640-6dc7bcc40c24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.005268 3552 scope.go:117] "RemoveContainer" containerID="e572c97a1f71b5e4c83ab669854c42f4c6ac81c46f5362714ad96eee8820b0a2" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.016706 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.045103 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.045132 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.045143 3552 reconciler_common.go:300] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.045152 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.045163 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nq6rj\" (UniqueName: \"kubernetes.io/projected/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-kube-api-access-nq6rj\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.045173 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.052945 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7558f7559f-rntkm"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.065054 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-scripts" (OuterVolumeSpecName: "scripts") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.111961 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.116988 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" (UID: "dc2127a5-f3c4-4ae0-8cf3-918fd64c689a"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.143899 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data" (OuterVolumeSpecName: "config-data") pod "ad03da73-fd35-414a-b640-6dc7bcc40c24" (UID: "ad03da73-fd35-414a-b640-6dc7bcc40c24"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.147926 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-combined-ca-bundle\") pod \"14da306a-fa6c-460e-af02-1180237b4366\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.148718 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-config\") pod \"14da306a-fa6c-460e-af02-1180237b4366\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.148796 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9d7h\" (UniqueName: \"kubernetes.io/projected/14da306a-fa6c-460e-af02-1180237b4366-kube-api-access-f9d7h\") pod \"14da306a-fa6c-460e-af02-1180237b4366\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.148959 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-ovndb-tls-certs\") pod \"14da306a-fa6c-460e-af02-1180237b4366\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.149043 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-httpd-config\") pod \"14da306a-fa6c-460e-af02-1180237b4366\" (UID: \"14da306a-fa6c-460e-af02-1180237b4366\") " Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.150539 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad03da73-fd35-414a-b640-6dc7bcc40c24-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.150583 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.150595 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.150606 3552 reconciler_common.go:300] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.166104 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14da306a-fa6c-460e-af02-1180237b4366-kube-api-access-f9d7h" (OuterVolumeSpecName: "kube-api-access-f9d7h") pod "14da306a-fa6c-460e-af02-1180237b4366" (UID: "14da306a-fa6c-460e-af02-1180237b4366"). InnerVolumeSpecName "kube-api-access-f9d7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.172841 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "14da306a-fa6c-460e-af02-1180237b4366" (UID: "14da306a-fa6c-460e-af02-1180237b4366"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.251933 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-f9d7h\" (UniqueName: \"kubernetes.io/projected/14da306a-fa6c-460e-af02-1180237b4366-kube-api-access-f9d7h\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.251965 3552 reconciler_common.go:300] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-httpd-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.258497 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-config" (OuterVolumeSpecName: "config") pod "14da306a-fa6c-460e-af02-1180237b4366" (UID: "14da306a-fa6c-460e-af02-1180237b4366"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.275174 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-config-data" (OuterVolumeSpecName: "config-data") pod "eb9e0409-75f5-47bf-a85f-27c8ed63faec" (UID: "eb9e0409-75f5-47bf-a85f-27c8ed63faec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.298135 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14da306a-fa6c-460e-af02-1180237b4366" (UID: "14da306a-fa6c-460e-af02-1180237b4366"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.305824 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "14da306a-fa6c-460e-af02-1180237b4366" (UID: "14da306a-fa6c-460e-af02-1180237b4366"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.354031 3552 reconciler_common.go:300] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.354062 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9e0409-75f5-47bf-a85f-27c8ed63faec-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.354073 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.354084 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/14da306a-fa6c-460e-af02-1180237b4366-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.368519 3552 scope.go:117] "RemoveContainer" containerID="bd435adf5f23060bbe321642c8084bd50728647eb3c84680d2daeceb0f2ac56d" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.412494 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.416211 3552 scope.go:117] "RemoveContainer" containerID="75c1164319928057684acf21c1fff47d01d37d6c03dc88c0b8893dd430f2ca09" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.421905 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.442887 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443085 3552 topology_manager.go:215] "Topology Admit Handler" podUID="dd0f7af7-1f3c-4738-900a-d19e917f9a37" podNamespace="openstack" podName="cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443374 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="probe" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443391 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="probe" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443422 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-api" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443429 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-api" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443444 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443454 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443472 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="sg-core" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443479 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="sg-core" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443493 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon-log" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443499 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon-log" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443511 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-central-agent" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443517 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-central-agent" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443529 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="proxy-httpd" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443535 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="proxy-httpd" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443544 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443550 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443561 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="cinder-scheduler" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443567 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="cinder-scheduler" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443576 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api-log" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443583 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api-log" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443594 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443600 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api" Mar 20 15:49:22 crc kubenswrapper[3552]: E0320 15:49:22.443611 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-notification-agent" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443619 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-notification-agent" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443786 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-httpd" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443796 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="sg-core" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443812 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon-log" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443842 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="14da306a-fa6c-460e-af02-1180237b4366" containerName="neutron-api" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443856 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="cinder-scheduler" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443867 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="proxy-httpd" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443877 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443885 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="20f75298-10c5-4400-870b-472a9f870f0b" containerName="barbican-api-log" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443895 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-central-agent" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443904 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" containerName="ceilometer-notification-agent" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443924 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" containerName="probe" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.443934 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" containerName="horizon" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.444886 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.457779 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.474479 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.504351 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.521924 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.533185 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.533343 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.535274 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.538575 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.538809 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.553301 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565475 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-run-httpd\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565517 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-config-data\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565610 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2r2b\" (UniqueName: \"kubernetes.io/projected/dd0f7af7-1f3c-4738-900a-d19e917f9a37-kube-api-access-l2r2b\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565682 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565704 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-scripts\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565724 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd0f7af7-1f3c-4738-900a-d19e917f9a37-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565788 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565812 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-config-data\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565874 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.565912 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.577677 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv9cv\" (UniqueName: \"kubernetes.io/projected/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-kube-api-access-mv9cv\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.577792 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-log-httpd\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.577904 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-scripts\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679229 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679526 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mv9cv\" (UniqueName: \"kubernetes.io/projected/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-kube-api-access-mv9cv\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679568 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-log-httpd\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679600 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-scripts\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679627 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-run-httpd\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679648 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-config-data\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679677 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-l2r2b\" (UniqueName: \"kubernetes.io/projected/dd0f7af7-1f3c-4738-900a-d19e917f9a37-kube-api-access-l2r2b\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679709 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679759 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-scripts\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679778 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd0f7af7-1f3c-4738-900a-d19e917f9a37-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679812 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679831 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-config-data\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.679860 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.681144 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-log-httpd\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.682291 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-run-httpd\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.682365 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd0f7af7-1f3c-4738-900a-d19e917f9a37-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.702920 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-scripts\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.710532 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.710860 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv9cv\" (UniqueName: \"kubernetes.io/projected/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-kube-api-access-mv9cv\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.714853 3552 scope.go:117] "RemoveContainer" containerID="3f4231a6bdea50fa6b04a73dff18ca3f8de0f2b3d05f0819f7ee3850852822ca" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.714945 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-config-data\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.716053 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2r2b\" (UniqueName: \"kubernetes.io/projected/dd0f7af7-1f3c-4738-900a-d19e917f9a37-kube-api-access-l2r2b\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.716677 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-config-data\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.717347 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.718156 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-scripts\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.722306 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.722863 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd0f7af7-1f3c-4738-900a-d19e917f9a37-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"dd0f7af7-1f3c-4738-900a-d19e917f9a37\") " pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.754516 3552 scope.go:117] "RemoveContainer" containerID="32dcc5ca5dc0d673a0df3d56ea3d97d04e7b45cd0f5fb38962c6086820553fc9" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.781878 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.799288 3552 generic.go:334] "Generic (PLEG): container finished" podID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" containerID="d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36" exitCode=0 Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.799337 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1","Type":"ContainerDied","Data":"d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36"} Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.811956 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-66bd8f794-2dbwx" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.812681 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-66bd8f794-2dbwx" event={"ID":"14da306a-fa6c-460e-af02-1180237b4366","Type":"ContainerDied","Data":"73650634b93f463b262573cce5e93045172c5c898c98d827b34e5e85afb10b93"} Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.820706 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"1137d7d3-a9ff-4002-9b7f-174802428ba7","Type":"ContainerStarted","Data":"3f606a0be6e055bbc90ec57caea80278ab38dcab2e48b7fa02fc466fbdba7ea7"} Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.822077 3552 scope.go:117] "RemoveContainer" containerID="d26206b01dd63cec457fe26724ea78e7afed566a9ce8227dcc7b14f3b347c610" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.831687 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f9cbcd486-gj8tz" event={"ID":"dc2127a5-f3c4-4ae0-8cf3-918fd64c689a","Type":"ContainerDied","Data":"c3015cdf04e3af83985921e5784c19bce490d7d0e0f4ac1dfdb9630d2aac750e"} Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.831772 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f9cbcd486-gj8tz" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.842722 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7558f7559f-rntkm" event={"ID":"b9a18de2-c36e-4cfc-af47-d5143257da26","Type":"ContainerStarted","Data":"ef2a317c1f26c6690c693a461d7cd17cf4f1c38082a3b1979a31a07545a408e7"} Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.842771 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7558f7559f-rntkm" event={"ID":"b9a18de2-c36e-4cfc-af47-d5143257da26","Type":"ContainerStarted","Data":"5a85e6a793dc978420cd470e5ddbd37e72eb24ba78e70faba5ee6ca8b6a6c700"} Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.863122 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.895252 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.895682 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.991616245 podStartE2EDuration="18.895608891s" podCreationTimestamp="2026-03-20 15:49:04 +0000 UTC" firstStartedPulling="2026-03-20 15:49:06.200643569 +0000 UTC m=+1445.894340399" lastFinishedPulling="2026-03-20 15:49:21.104636215 +0000 UTC m=+1460.798333045" observedRunningTime="2026-03-20 15:49:22.874650771 +0000 UTC m=+1462.568347611" watchObservedRunningTime="2026-03-20 15:49:22.895608891 +0000 UTC m=+1462.589305721" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.902450 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.909617 3552 scope.go:117] "RemoveContainer" containerID="8379b831372e299be8e32262ac2e629065764cd1411971206a4142af3fbfab45" Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.923739 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-66bd8f794-2dbwx"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.931716 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-66bd8f794-2dbwx"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.969033 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6f9cbcd486-gj8tz"] Mar 20 15:49:22 crc kubenswrapper[3552]: I0320 15:49:22.984318 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6f9cbcd486-gj8tz"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.000235 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-config-data\") pod \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.000341 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-combined-ca-bundle\") pod \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.000426 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-logs\") pod \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.000496 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-custom-prometheus-ca\") pod \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.000527 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfhxb\" (UniqueName: \"kubernetes.io/projected/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-kube-api-access-cfhxb\") pod \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\" (UID: \"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1\") " Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.001014 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-logs" (OuterVolumeSpecName: "logs") pod "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" (UID: "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.032417 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-kube-api-access-cfhxb" (OuterVolumeSpecName: "kube-api-access-cfhxb") pod "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" (UID: "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1"). InnerVolumeSpecName "kube-api-access-cfhxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.032554 3552 scope.go:117] "RemoveContainer" containerID="fec1b8e4712d24f429fa20c1b0ca8684d317f2d6ca8e30590c3f2c10f989a2db" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.091654 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" (UID: "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.099580 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" (UID: "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.102305 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.102338 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.102350 3552 reconciler_common.go:300] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.102362 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-cfhxb\" (UniqueName: \"kubernetes.io/projected/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-kube-api-access-cfhxb\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:23 crc kubenswrapper[3552]: W0320 15:49:23.132632 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod444a7139_f192_4a10_8047_c84d83d05dab.slice/crio-f9c9525dd4729571ad5bfd62efe687f441eed29fb7ab6dc4ed47ccd068cd5e7f WatchSource:0}: Error finding container f9c9525dd4729571ad5bfd62efe687f441eed29fb7ab6dc4ed47ccd068cd5e7f: Status 404 returned error can't find the container with id f9c9525dd4729571ad5bfd62efe687f441eed29fb7ab6dc4ed47ccd068cd5e7f Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.150203 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-config-data" (OuterVolumeSpecName: "config-data") pod "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" (UID: "82f2b93b-606f-4a25-a2c8-0d8c3caf69c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.157984 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-746656bf7-ktbs6"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.210269 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.331959 3552 scope.go:117] "RemoveContainer" containerID="b756336336337216255047984af6d67759643f9b8364310287b066f2d973badc" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.408955 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.446618 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14da306a-fa6c-460e-af02-1180237b4366" path="/var/lib/kubelet/pods/14da306a-fa6c-460e-af02-1180237b4366/volumes" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.447631 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20f75298-10c5-4400-870b-472a9f870f0b" path="/var/lib/kubelet/pods/20f75298-10c5-4400-870b-472a9f870f0b/volumes" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.448166 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad03da73-fd35-414a-b640-6dc7bcc40c24" path="/var/lib/kubelet/pods/ad03da73-fd35-414a-b640-6dc7bcc40c24/volumes" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.450218 3552 scope.go:117] "RemoveContainer" containerID="87ed38b7065d811248c493011799a72fd06e1ec3280d05d12fd19718f0afe37d" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.458232 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc2127a5-f3c4-4ae0-8cf3-918fd64c689a" path="/var/lib/kubelet/pods/dc2127a5-f3c4-4ae0-8cf3-918fd64c689a/volumes" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.458841 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb9e0409-75f5-47bf-a85f-27c8ed63faec" path="/var/lib/kubelet/pods/eb9e0409-75f5-47bf-a85f-27c8ed63faec/volumes" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.572239 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:23 crc kubenswrapper[3552]: W0320 15:49:23.582777 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ba0e60f_3d3c_49d7_b574_d96d2d3d5ecc.slice/crio-2708e9552dc8b382af3b027d2eb7d6166ff2ab4a2a46597ceb0affe0cba50062 WatchSource:0}: Error finding container 2708e9552dc8b382af3b027d2eb7d6166ff2ab4a2a46597ceb0affe0cba50062: Status 404 returned error can't find the container with id 2708e9552dc8b382af3b027d2eb7d6166ff2ab4a2a46597ceb0affe0cba50062 Mar 20 15:49:23 crc kubenswrapper[3552]: W0320 15:49:23.591881 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd0f7af7_1f3c_4738_900a_d19e917f9a37.slice/crio-768652d0d46cea55e1d04702fc4a71695c76f4d95b4a02688696385a613af7ca WatchSource:0}: Error finding container 768652d0d46cea55e1d04702fc4a71695c76f4d95b4a02688696385a613af7ca: Status 404 returned error can't find the container with id 768652d0d46cea55e1d04702fc4a71695c76f4d95b4a02688696385a613af7ca Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.597791 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.870941 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7558f7559f-rntkm" event={"ID":"b9a18de2-c36e-4cfc-af47-d5143257da26","Type":"ContainerStarted","Data":"dc8cd36d414124d6c4c1afd824378718cfcb4f80b261e6b49988623a0802c091"} Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.875250 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"82f2b93b-606f-4a25-a2c8-0d8c3caf69c1","Type":"ContainerDied","Data":"d1b0126f16122e2e91fed6fbd4282f3807557eaad107a59ffab1bd18b451ce86"} Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.875324 3552 scope.go:117] "RemoveContainer" containerID="d0623a62d476c43f73bed98b9434b45bcf7986bff50f9c1946dba42e2e660e36" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.875262 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.879220 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-746656bf7-ktbs6" event={"ID":"444a7139-f192-4a10-8047-c84d83d05dab","Type":"ContainerStarted","Data":"f9c9525dd4729571ad5bfd62efe687f441eed29fb7ab6dc4ed47ccd068cd5e7f"} Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.881838 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerStarted","Data":"2708e9552dc8b382af3b027d2eb7d6166ff2ab4a2a46597ceb0affe0cba50062"} Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.884793 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"dd0f7af7-1f3c-4738-900a-d19e917f9a37","Type":"ContainerStarted","Data":"768652d0d46cea55e1d04702fc4a71695c76f4d95b4a02688696385a613af7ca"} Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.899021 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-7558f7559f-rntkm" podStartSLOduration=10.8989721 podStartE2EDuration="10.8989721s" podCreationTimestamp="2026-03-20 15:49:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:23.887128724 +0000 UTC m=+1463.580825574" watchObservedRunningTime="2026-03-20 15:49:23.8989721 +0000 UTC m=+1463.592668930" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.939261 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.951238 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.965881 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.966072 3552 topology_manager.go:215] "Topology Admit Handler" podUID="598e089a-dfab-47cc-89bc-f70192f43beb" podNamespace="openstack" podName="watcher-decision-engine-0" Mar 20 15:49:23 crc kubenswrapper[3552]: E0320 15:49:23.966325 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" containerName="watcher-decision-engine" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.966342 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" containerName="watcher-decision-engine" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.966579 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" containerName="watcher-decision-engine" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.967293 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.972051 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Mar 20 15:49:23 crc kubenswrapper[3552]: I0320 15:49:23.978291 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.035815 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.036216 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-config-data\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.036266 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82cv9\" (UniqueName: \"kubernetes.io/projected/598e089a-dfab-47cc-89bc-f70192f43beb-kube-api-access-82cv9\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.036307 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/598e089a-dfab-47cc-89bc-f70192f43beb-logs\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.036326 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.138089 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.138216 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-config-data\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.138459 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-82cv9\" (UniqueName: \"kubernetes.io/projected/598e089a-dfab-47cc-89bc-f70192f43beb-kube-api-access-82cv9\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.138537 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.138565 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/598e089a-dfab-47cc-89bc-f70192f43beb-logs\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.153211 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/598e089a-dfab-47cc-89bc-f70192f43beb-logs\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.169734 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.170737 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.176269 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-82cv9\" (UniqueName: \"kubernetes.io/projected/598e089a-dfab-47cc-89bc-f70192f43beb-kube-api-access-82cv9\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.176347 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/598e089a-dfab-47cc-89bc-f70192f43beb-config-data\") pod \"watcher-decision-engine-0\" (UID: \"598e089a-dfab-47cc-89bc-f70192f43beb\") " pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.335237 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.893354 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-746656bf7-ktbs6" event={"ID":"444a7139-f192-4a10-8047-c84d83d05dab","Type":"ContainerStarted","Data":"8d9a7c3cd184d5c5d151e7219da2c8db9fc4a3efeb979f393445b78975950559"} Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.893848 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:49:24 crc kubenswrapper[3552]: I0320 15:49:24.947191 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Mar 20 15:49:25 crc kubenswrapper[3552]: I0320 15:49:25.442942 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82f2b93b-606f-4a25-a2c8-0d8c3caf69c1" path="/var/lib/kubelet/pods/82f2b93b-606f-4a25-a2c8-0d8c3caf69c1/volumes" Mar 20 15:49:25 crc kubenswrapper[3552]: I0320 15:49:25.901978 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"598e089a-dfab-47cc-89bc-f70192f43beb","Type":"ContainerStarted","Data":"67a6e29703abdfa958184bd5e2f433aee02b82da7bbed82d16af424822d590cf"} Mar 20 15:49:25 crc kubenswrapper[3552]: I0320 15:49:25.904221 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerStarted","Data":"6b06961b87fff350f8e1e811a7d6226de2eabef4054309c44e264665ca0185e7"} Mar 20 15:49:25 crc kubenswrapper[3552]: I0320 15:49:25.905870 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"dd0f7af7-1f3c-4738-900a-d19e917f9a37","Type":"ContainerStarted","Data":"f32f12bd46a9aeac0520600b5898a10c69b3e442baec752afcd11c6e2359be2d"} Mar 20 15:49:26 crc kubenswrapper[3552]: I0320 15:49:26.913424 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"598e089a-dfab-47cc-89bc-f70192f43beb","Type":"ContainerStarted","Data":"d25d02e4eff85427525f9f292d73b812f9ce0f7b6ce5dd9f3a6321183626e4b4"} Mar 20 15:49:26 crc kubenswrapper[3552]: I0320 15:49:26.915804 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-746656bf7-ktbs6" event={"ID":"444a7139-f192-4a10-8047-c84d83d05dab","Type":"ContainerStarted","Data":"c73bcb6c829d1bdef860751a52b70aab984cd17d3d02600b84465c54c4c402a2"} Mar 20 15:49:26 crc kubenswrapper[3552]: I0320 15:49:26.954159 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=3.954107331 podStartE2EDuration="3.954107331s" podCreationTimestamp="2026-03-20 15:49:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:26.930106659 +0000 UTC m=+1466.623803489" watchObservedRunningTime="2026-03-20 15:49:26.954107331 +0000 UTC m=+1466.647804161" Mar 20 15:49:26 crc kubenswrapper[3552]: I0320 15:49:26.955602 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/swift-proxy-746656bf7-ktbs6" podStartSLOduration=9.95557008 podStartE2EDuration="9.95557008s" podCreationTimestamp="2026-03-20 15:49:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:26.952659622 +0000 UTC m=+1466.646356472" watchObservedRunningTime="2026-03-20 15:49:26.95557008 +0000 UTC m=+1466.649266920" Mar 20 15:49:27 crc kubenswrapper[3552]: I0320 15:49:27.887507 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:27 crc kubenswrapper[3552]: I0320 15:49:27.887839 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:28 crc kubenswrapper[3552]: I0320 15:49:28.930949 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerStarted","Data":"0fe99c1c947582875d271ee8ae566c6b31bbc2af1845f45ae0ba9f8342013256"} Mar 20 15:49:28 crc kubenswrapper[3552]: I0320 15:49:28.932833 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"dd0f7af7-1f3c-4738-900a-d19e917f9a37","Type":"ContainerStarted","Data":"29067d192c8fc2afb9d46e22f7ab785274885dc4a237ca79b1ec1a9875ae0d28"} Mar 20 15:49:29 crc kubenswrapper[3552]: I0320 15:49:29.326741 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.181:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:49:29 crc kubenswrapper[3552]: I0320 15:49:29.895140 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-746656bf7-ktbs6" podUID="444a7139-f192-4a10-8047-c84d83d05dab" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:49:30 crc kubenswrapper[3552]: I0320 15:49:29.971504 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.971464632 podStartE2EDuration="7.971464632s" podCreationTimestamp="2026-03-20 15:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:29.966261753 +0000 UTC m=+1469.659958593" watchObservedRunningTime="2026-03-20 15:49:29.971464632 +0000 UTC m=+1469.665161462" Mar 20 15:49:30 crc kubenswrapper[3552]: I0320 15:49:30.948639 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerStarted","Data":"5152369b254db431d4376432fbc82d909f50a86617cdb0aa4336783c2c4b3827"} Mar 20 15:49:32 crc kubenswrapper[3552]: I0320 15:49:32.782829 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Mar 20 15:49:32 crc kubenswrapper[3552]: I0320 15:49:32.980262 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerStarted","Data":"41d47c61ea3bd3af09d328d7d913cad23f1562e990b1bd784bfd52b9d623c1cf"} Mar 20 15:49:33 crc kubenswrapper[3552]: I0320 15:49:33.104780 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:33 crc kubenswrapper[3552]: I0320 15:49:33.159912 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-746656bf7-ktbs6" Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.007351 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-central-agent" containerID="cri-o://6b06961b87fff350f8e1e811a7d6226de2eabef4054309c44e264665ca0185e7" gracePeriod=30 Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.007756 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="proxy-httpd" containerID="cri-o://41d47c61ea3bd3af09d328d7d913cad23f1562e990b1bd784bfd52b9d623c1cf" gracePeriod=30 Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.007794 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="sg-core" containerID="cri-o://5152369b254db431d4376432fbc82d909f50a86617cdb0aa4336783c2c4b3827" gracePeriod=30 Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.007831 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-notification-agent" containerID="cri-o://0fe99c1c947582875d271ee8ae566c6b31bbc2af1845f45ae0ba9f8342013256" gracePeriod=30 Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.007828 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.039763 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.08210998 podStartE2EDuration="12.039713473s" podCreationTimestamp="2026-03-20 15:49:22 +0000 UTC" firstStartedPulling="2026-03-20 15:49:23.592025721 +0000 UTC m=+1463.285722551" lastFinishedPulling="2026-03-20 15:49:31.549629194 +0000 UTC m=+1471.243326044" observedRunningTime="2026-03-20 15:49:34.033796865 +0000 UTC m=+1473.727493705" watchObservedRunningTime="2026-03-20 15:49:34.039713473 +0000 UTC m=+1473.733410293" Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.212280 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.181:8776/healthcheck\": read tcp 10.217.0.2:56788->10.217.0.181:8776: read: connection reset by peer" Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.335926 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:34 crc kubenswrapper[3552]: I0320 15:49:34.401538 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020120 3552 generic.go:334] "Generic (PLEG): container finished" podID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerID="41d47c61ea3bd3af09d328d7d913cad23f1562e990b1bd784bfd52b9d623c1cf" exitCode=0 Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020152 3552 generic.go:334] "Generic (PLEG): container finished" podID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerID="5152369b254db431d4376432fbc82d909f50a86617cdb0aa4336783c2c4b3827" exitCode=2 Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020163 3552 generic.go:334] "Generic (PLEG): container finished" podID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerID="0fe99c1c947582875d271ee8ae566c6b31bbc2af1845f45ae0ba9f8342013256" exitCode=0 Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020172 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerDied","Data":"41d47c61ea3bd3af09d328d7d913cad23f1562e990b1bd784bfd52b9d623c1cf"} Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020225 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerDied","Data":"5152369b254db431d4376432fbc82d909f50a86617cdb0aa4336783c2c4b3827"} Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020247 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerDied","Data":"0fe99c1c947582875d271ee8ae566c6b31bbc2af1845f45ae0ba9f8342013256"} Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.020269 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:35 crc kubenswrapper[3552]: I0320 15:49:35.083818 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Mar 20 15:49:37 crc kubenswrapper[3552]: I0320 15:49:37.824665 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="dd0f7af7-1f3c-4738-900a-d19e917f9a37" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.187:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:49:38 crc kubenswrapper[3552]: I0320 15:49:38.659650 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Mar 20 15:49:38 crc kubenswrapper[3552]: I0320 15:49:38.726933 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:49:38 crc kubenswrapper[3552]: I0320 15:49:38.737642 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6c5fc7598d-gk2l6" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.054762 3552 generic.go:334] "Generic (PLEG): container finished" podID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerID="fa7d1fdea4c505d6227d252bd086fcdefa3963ac1c01396e743a4ac49b423e7a" exitCode=0 Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.054959 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerDied","Data":"fa7d1fdea4c505d6227d252bd086fcdefa3963ac1c01396e743a4ac49b423e7a"} Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.057241 3552 generic.go:334] "Generic (PLEG): container finished" podID="dc4c2a50-03a3-449c-a797-9f582a546642" containerID="8f5c9b67b5f5b8e7408be1dd5ce4379ec13dcd233c3c719e9c200b2a858b8642" exitCode=137 Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.057281 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dc4c2a50-03a3-449c-a797-9f582a546642","Type":"ContainerDied","Data":"8f5c9b67b5f5b8e7408be1dd5ce4379ec13dcd233c3c719e9c200b2a858b8642"} Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.057303 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dc4c2a50-03a3-449c-a797-9f582a546642","Type":"ContainerDied","Data":"470bc527668e30261663928b259f44b0d2dfe0e26e6889b891db1d37852ba3f5"} Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.057313 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="470bc527668e30261663928b259f44b0d2dfe0e26e6889b891db1d37852ba3f5" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.117227 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.237817 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.237980 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-scripts\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.238016 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f87b8\" (UniqueName: \"kubernetes.io/projected/dc4c2a50-03a3-449c-a797-9f582a546642-kube-api-access-f87b8\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.238048 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dc4c2a50-03a3-449c-a797-9f582a546642-etc-machine-id\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.238146 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data-custom\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.238232 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-combined-ca-bundle\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.238258 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc4c2a50-03a3-449c-a797-9f582a546642-logs\") pod \"dc4c2a50-03a3-449c-a797-9f582a546642\" (UID: \"dc4c2a50-03a3-449c-a797-9f582a546642\") " Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.239196 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc4c2a50-03a3-449c-a797-9f582a546642-logs" (OuterVolumeSpecName: "logs") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.239648 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc4c2a50-03a3-449c-a797-9f582a546642-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.253636 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc4c2a50-03a3-449c-a797-9f582a546642-kube-api-access-f87b8" (OuterVolumeSpecName: "kube-api-access-f87b8") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "kube-api-access-f87b8". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.256571 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-scripts" (OuterVolumeSpecName: "scripts") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.258540 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.334389 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data" (OuterVolumeSpecName: "config-data") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.340564 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc4c2a50-03a3-449c-a797-9f582a546642-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.340599 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.340610 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.340621 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-f87b8\" (UniqueName: \"kubernetes.io/projected/dc4c2a50-03a3-449c-a797-9f582a546642-kube-api-access-f87b8\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.340631 3552 reconciler_common.go:300] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dc4c2a50-03a3-449c-a797-9f582a546642-etc-machine-id\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.340640 3552 reconciler_common.go:300] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-config-data-custom\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.347581 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc4c2a50-03a3-449c-a797-9f582a546642" (UID: "dc4c2a50-03a3-449c-a797-9f582a546642"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:39 crc kubenswrapper[3552]: I0320 15:49:39.442072 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc4c2a50-03a3-449c-a797-9f582a546642-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.072538 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.072681 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerStarted","Data":"f4174c6f20afe7a5e89a0ed16176254d9f455ae0b806478a25ff6eb301e03ec6"} Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.093343 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2bkk6" podStartSLOduration=6.610028997 podStartE2EDuration="46.09328935s" podCreationTimestamp="2026-03-20 15:48:54 +0000 UTC" firstStartedPulling="2026-03-20 15:48:59.883680237 +0000 UTC m=+1439.577377067" lastFinishedPulling="2026-03-20 15:49:39.36694059 +0000 UTC m=+1479.060637420" observedRunningTime="2026-03-20 15:49:40.088114522 +0000 UTC m=+1479.781811362" watchObservedRunningTime="2026-03-20 15:49:40.09328935 +0000 UTC m=+1479.786986180" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.126137 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.133766 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.144317 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.144509 3552 topology_manager.go:215] "Topology Admit Handler" podUID="2a2a764d-837e-455a-9404-b306cc90147d" podNamespace="openstack" podName="cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: E0320 15:49:40.144761 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.144776 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api" Mar 20 15:49:40 crc kubenswrapper[3552]: E0320 15:49:40.144796 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api-log" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.144803 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api-log" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.145008 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api-log" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.145023 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" containerName="cinder-api" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.146017 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.148534 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.148536 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.152195 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.152420 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261104 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-scripts\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261279 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261312 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-config-data\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261391 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261436 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-config-data-custom\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261476 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbmfr\" (UniqueName: \"kubernetes.io/projected/2a2a764d-837e-455a-9404-b306cc90147d-kube-api-access-vbmfr\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261540 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a2a764d-837e-455a-9404-b306cc90147d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261574 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.261597 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a2a764d-837e-455a-9404-b306cc90147d-logs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363601 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363672 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-config-data\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363718 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363747 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-config-data-custom\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363776 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-vbmfr\" (UniqueName: \"kubernetes.io/projected/2a2a764d-837e-455a-9404-b306cc90147d-kube-api-access-vbmfr\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363811 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a2a764d-837e-455a-9404-b306cc90147d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363849 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363874 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a2a764d-837e-455a-9404-b306cc90147d-logs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.363960 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-scripts\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.369917 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a2a764d-837e-455a-9404-b306cc90147d-logs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.377480 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a2a764d-837e-455a-9404-b306cc90147d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.379278 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.379713 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.380563 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-config-data\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.382028 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-scripts\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.388986 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.399673 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a2a764d-837e-455a-9404-b306cc90147d-config-data-custom\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.405600 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbmfr\" (UniqueName: \"kubernetes.io/projected/2a2a764d-837e-455a-9404-b306cc90147d-kube-api-access-vbmfr\") pod \"cinder-api-0\" (UID: \"2a2a764d-837e-455a-9404-b306cc90147d\") " pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.464955 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.886330 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5dp9z"] Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.886921 3552 topology_manager.go:215] "Topology Admit Handler" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" podNamespace="openshift-marketplace" podName="community-operators-5dp9z" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.890899 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.897881 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5dp9z"] Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.976241 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-utilities\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.976449 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9dpg\" (UniqueName: \"kubernetes.io/projected/eb12b794-eb41-45ba-9acf-0c42cada176c-kube-api-access-r9dpg\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:40 crc kubenswrapper[3552]: I0320 15:49:40.976514 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-catalog-content\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.011249 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.079457 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-utilities\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.079581 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r9dpg\" (UniqueName: \"kubernetes.io/projected/eb12b794-eb41-45ba-9acf-0c42cada176c-kube-api-access-r9dpg\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.079622 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-catalog-content\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.080686 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-utilities\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.080699 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-catalog-content\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.088066 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2a2a764d-837e-455a-9404-b306cc90147d","Type":"ContainerStarted","Data":"c44f73ace336b5dda317b908f95f4503acda87f4e859baa88be3f77f4113e89a"} Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.106455 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9dpg\" (UniqueName: \"kubernetes.io/projected/eb12b794-eb41-45ba-9acf-0c42cada176c-kube-api-access-r9dpg\") pod \"community-operators-5dp9z\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.214187 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.462092 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc4c2a50-03a3-449c-a797-9f582a546642" path="/var/lib/kubelet/pods/dc4c2a50-03a3-449c-a797-9f582a546642/volumes" Mar 20 15:49:41 crc kubenswrapper[3552]: I0320 15:49:41.827517 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5dp9z"] Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.120045 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2a2a764d-837e-455a-9404-b306cc90147d","Type":"ContainerStarted","Data":"0afd9a77bed0df22fb08ab5d1a798e12ec86aee57a0812a09bc4329856b88948"} Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.157114 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerStarted","Data":"1be282b2427dcc016b84a629dd562d6bb2360aca6cd32a5794745dbf2956e021"} Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.528130 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.528529 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-log" containerID="cri-o://8644ecfa8b1ecf6234deef254f61f78ad7a80a11d4e2ae93140b9aa6c981f7bc" gracePeriod=30 Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.528945 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-httpd" containerID="cri-o://92375a2be83b6898fc8603064f22745660193225a002ac5103aa08281c6f908f" gracePeriod=30 Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.779248 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.779604 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.779647 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.781517 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3e2f09fb5251918a00eedeadde2f6289e4a42a00c71de21ae5afa976b5070f51"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:49:42 crc kubenswrapper[3552]: I0320 15:49:42.781752 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://3e2f09fb5251918a00eedeadde2f6289e4a42a00c71de21ae5afa976b5070f51" gracePeriod=600 Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.196137 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2a2a764d-837e-455a-9404-b306cc90147d","Type":"ContainerStarted","Data":"42bef2fd44907d940798ceed0a2a5b4ac382a9959f6185bc4d2707b23b475eda"} Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.197858 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.205523 3552 generic.go:334] "Generic (PLEG): container finished" podID="0d151a0e-e371-44af-a237-0d70a5876ace" containerID="8644ecfa8b1ecf6234deef254f61f78ad7a80a11d4e2ae93140b9aa6c981f7bc" exitCode=143 Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.205804 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0d151a0e-e371-44af-a237-0d70a5876ace","Type":"ContainerDied","Data":"8644ecfa8b1ecf6234deef254f61f78ad7a80a11d4e2ae93140b9aa6c981f7bc"} Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.223323 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.22327306 podStartE2EDuration="3.22327306s" podCreationTimestamp="2026-03-20 15:49:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:43.220371703 +0000 UTC m=+1482.914068553" watchObservedRunningTime="2026-03-20 15:49:43.22327306 +0000 UTC m=+1482.916969890" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.226308 3552 generic.go:334] "Generic (PLEG): container finished" podID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerID="6b06961b87fff350f8e1e811a7d6226de2eabef4054309c44e264665ca0185e7" exitCode=0 Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.226338 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerDied","Data":"6b06961b87fff350f8e1e811a7d6226de2eabef4054309c44e264665ca0185e7"} Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.229938 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="3e2f09fb5251918a00eedeadde2f6289e4a42a00c71de21ae5afa976b5070f51" exitCode=0 Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.230107 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"3e2f09fb5251918a00eedeadde2f6289e4a42a00c71de21ae5afa976b5070f51"} Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.230218 3552 scope.go:117] "RemoveContainer" containerID="7f985d384e5c2566938c7d880ef875555c77af84a50df3c9a9abd4000ed8661c" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.239976 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerID="5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257" exitCode=0 Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.240018 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerDied","Data":"5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257"} Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.389880 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549179 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-config-data\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549367 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mv9cv\" (UniqueName: \"kubernetes.io/projected/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-kube-api-access-mv9cv\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549482 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-log-httpd\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549521 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-scripts\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549578 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-combined-ca-bundle\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549638 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-run-httpd\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549694 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-sg-core-conf-yaml\") pod \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\" (UID: \"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc\") " Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.549982 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.550218 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.550280 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.557579 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-kube-api-access-mv9cv" (OuterVolumeSpecName: "kube-api-access-mv9cv") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "kube-api-access-mv9cv". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.559063 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-scripts" (OuterVolumeSpecName: "scripts") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.589512 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.653383 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.653445 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mv9cv\" (UniqueName: \"kubernetes.io/projected/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-kube-api-access-mv9cv\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.653462 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.653477 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.662652 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.709671 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-config-data" (OuterVolumeSpecName: "config-data") pod "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" (UID: "2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.754974 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.755023 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.899216 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-7558f7559f-rntkm" podUID="b9a18de2-c36e-4cfc-af47-d5143257da26" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.904863 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-7558f7559f-rntkm" podUID="b9a18de2-c36e-4cfc-af47-d5143257da26" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:49:43 crc kubenswrapper[3552]: I0320 15:49:43.905454 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-7558f7559f-rntkm" podUID="b9a18de2-c36e-4cfc-af47-d5143257da26" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.249032 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc","Type":"ContainerDied","Data":"2708e9552dc8b382af3b027d2eb7d6166ff2ab4a2a46597ceb0affe0cba50062"} Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.249340 3552 scope.go:117] "RemoveContainer" containerID="41d47c61ea3bd3af09d328d7d913cad23f1562e990b1bd784bfd52b9d623c1cf" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.249456 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.271872 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b"} Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.280534 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerStarted","Data":"1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87"} Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.311620 3552 scope.go:117] "RemoveContainer" containerID="5152369b254db431d4376432fbc82d909f50a86617cdb0aa4336783c2c4b3827" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.329690 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.356419 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.384020 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.384304 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0546ebe6-d55c-4542-b70d-e04125549482" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: E0320 15:49:44.394390 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="sg-core" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.394458 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="sg-core" Mar 20 15:49:44 crc kubenswrapper[3552]: E0320 15:49:44.394688 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-central-agent" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.394700 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-central-agent" Mar 20 15:49:44 crc kubenswrapper[3552]: E0320 15:49:44.394747 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="proxy-httpd" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.394755 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="proxy-httpd" Mar 20 15:49:44 crc kubenswrapper[3552]: E0320 15:49:44.394792 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-notification-agent" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.394801 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-notification-agent" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.407375 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-central-agent" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.407567 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="proxy-httpd" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.407599 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="sg-core" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.407641 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" containerName="ceilometer-notification-agent" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.423102 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.426955 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.428860 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.472867 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-config-data\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.472977 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.473053 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.473100 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjk4h\" (UniqueName: \"kubernetes.io/projected/0546ebe6-d55c-4542-b70d-e04125549482-kube-api-access-pjk4h\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.473137 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-scripts\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.473224 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-run-httpd\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.473288 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-log-httpd\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.490645 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575037 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-run-httpd\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575121 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-log-httpd\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575164 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-config-data\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575232 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575277 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575302 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pjk4h\" (UniqueName: \"kubernetes.io/projected/0546ebe6-d55c-4542-b70d-e04125549482-kube-api-access-pjk4h\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.575334 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-scripts\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.576614 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.577271 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-log-httpd\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.577625 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-run-httpd\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.582435 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-config-data\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.582978 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.583551 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.592074 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-scripts\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: E0320 15:49:44.599496 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-pjk4h], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="0546ebe6-d55c-4542-b70d-e04125549482" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.604253 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjk4h\" (UniqueName: \"kubernetes.io/projected/0546ebe6-d55c-4542-b70d-e04125549482-kube-api-access-pjk4h\") pod \"ceilometer-0\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " pod="openstack/ceilometer-0" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.605882 3552 scope.go:117] "RemoveContainer" containerID="0fe99c1c947582875d271ee8ae566c6b31bbc2af1845f45ae0ba9f8342013256" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.638044 3552 scope.go:117] "RemoveContainer" containerID="6b06961b87fff350f8e1e811a7d6226de2eabef4054309c44e264665ca0185e7" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.950028 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:49:44 crc kubenswrapper[3552]: I0320 15:49:44.950075 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.293249 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.309667 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388051 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjk4h\" (UniqueName: \"kubernetes.io/projected/0546ebe6-d55c-4542-b70d-e04125549482-kube-api-access-pjk4h\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388200 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-log-httpd\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388266 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-combined-ca-bundle\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388319 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-scripts\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388496 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-config-data\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388549 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-sg-core-conf-yaml\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388610 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-run-httpd\") pod \"0546ebe6-d55c-4542-b70d-e04125549482\" (UID: \"0546ebe6-d55c-4542-b70d-e04125549482\") " Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.388823 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.389160 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.389320 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.392616 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-config-data" (OuterVolumeSpecName: "config-data") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.395071 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0546ebe6-d55c-4542-b70d-e04125549482-kube-api-access-pjk4h" (OuterVolumeSpecName: "kube-api-access-pjk4h") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "kube-api-access-pjk4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.395329 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-scripts" (OuterVolumeSpecName: "scripts") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.397907 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.398035 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0546ebe6-d55c-4542-b70d-e04125549482" (UID: "0546ebe6-d55c-4542-b70d-e04125549482"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.442610 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc" path="/var/lib/kubelet/pods/2ba0e60f-3d3c-49d7-b574-d96d2d3d5ecc/volumes" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.490972 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.491002 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.491020 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0546ebe6-d55c-4542-b70d-e04125549482-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.491035 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-pjk4h\" (UniqueName: \"kubernetes.io/projected/0546ebe6-d55c-4542-b70d-e04125549482-kube-api-access-pjk4h\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.491049 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:45 crc kubenswrapper[3552]: I0320 15:49:45.491061 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0546ebe6-d55c-4542-b70d-e04125549482-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:46 crc kubenswrapper[3552]: I0320 15:49:46.301747 3552 generic.go:334] "Generic (PLEG): container finished" podID="0d151a0e-e371-44af-a237-0d70a5876ace" containerID="92375a2be83b6898fc8603064f22745660193225a002ac5103aa08281c6f908f" exitCode=0 Mar 20 15:49:46 crc kubenswrapper[3552]: I0320 15:49:46.301811 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:46 crc kubenswrapper[3552]: I0320 15:49:46.301890 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0d151a0e-e371-44af-a237-0d70a5876ace","Type":"ContainerDied","Data":"92375a2be83b6898fc8603064f22745660193225a002ac5103aa08281c6f908f"} Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.341918 3552 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.342058 3552 patch_prober.go:28] interesting pod/openshift-config-operator-77658b5b66-dq5sc container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.343051 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.342930 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-77658b5b66-dq5sc" podUID="530553aa-0a1d-423e-8a22-f5eb4bdbb883" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.678372 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2bkk6" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" probeResult="failure" output=< Mar 20 15:49:47 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:49:47 crc kubenswrapper[3552]: > Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.803272 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.823332 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.828760 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.828946 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.832795 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.835683 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.835882 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.839944 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.975482 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-config-data\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.975539 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-run-httpd\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.975560 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-scripts\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.975900 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-log-httpd\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.975997 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.976038 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qblcl\" (UniqueName: \"kubernetes.io/projected/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-kube-api-access-qblcl\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:47 crc kubenswrapper[3552]: I0320 15:49:47.976158 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.077476 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-config-data\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.077586 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-run-httpd\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.077691 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-scripts\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.078222 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-run-httpd\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.078385 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-log-httpd\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.078707 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-log-httpd\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.078862 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.079215 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qblcl\" (UniqueName: \"kubernetes.io/projected/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-kube-api-access-qblcl\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.079622 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.089943 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.090725 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.098940 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-config-data\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.099667 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-scripts\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.102963 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-qblcl\" (UniqueName: \"kubernetes.io/projected/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-kube-api-access-qblcl\") pod \"ceilometer-0\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.162774 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.306865 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.362088 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0d151a0e-e371-44af-a237-0d70a5876ace","Type":"ContainerDied","Data":"cc7c42c207d50a83c05b50f9504c0a6185c6727daee4cbdd5c6f2c5e5f6c1ef7"} Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.362424 3552 scope.go:117] "RemoveContainer" containerID="92375a2be83b6898fc8603064f22745660193225a002ac5103aa08281c6f908f" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.362222 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388385 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-internal-tls-certs\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388504 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-httpd-run\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388575 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6bhq\" (UniqueName: \"kubernetes.io/projected/0d151a0e-e371-44af-a237-0d70a5876ace-kube-api-access-l6bhq\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388612 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-config-data\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388691 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-logs\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388748 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388848 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-scripts\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.388883 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-combined-ca-bundle\") pod \"0d151a0e-e371-44af-a237-0d70a5876ace\" (UID: \"0d151a0e-e371-44af-a237-0d70a5876ace\") " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.395463 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.396260 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-logs" (OuterVolumeSpecName: "logs") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.414609 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-scripts" (OuterVolumeSpecName: "scripts") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.426202 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d151a0e-e371-44af-a237-0d70a5876ace-kube-api-access-l6bhq" (OuterVolumeSpecName: "kube-api-access-l6bhq") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "kube-api-access-l6bhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.428638 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.495540 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.495582 3552 reconciler_common.go:300] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-httpd-run\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.495594 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-l6bhq\" (UniqueName: \"kubernetes.io/projected/0d151a0e-e371-44af-a237-0d70a5876ace-kube-api-access-l6bhq\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.495605 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d151a0e-e371-44af-a237-0d70a5876ace-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.503268 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.505461 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.548144 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.549161 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.572072 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.608705 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.609238 3552 reconciler_common.go:300] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.609255 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.611438 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-config-data" (OuterVolumeSpecName: "config-data") pod "0d151a0e-e371-44af-a237-0d70a5876ace" (UID: "0d151a0e-e371-44af-a237-0d70a5876ace"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.698288 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.708358 3552 scope.go:117] "RemoveContainer" containerID="8644ecfa8b1ecf6234deef254f61f78ad7a80a11d4e2ae93140b9aa6c981f7bc" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.708885 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.710939 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d151a0e-e371-44af-a237-0d70a5876ace-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.727055 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.727231 3552 topology_manager.go:215] "Topology Admit Handler" podUID="39006f12-301b-4c3d-a0bd-8b19313c4843" podNamespace="openstack" podName="glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: E0320 15:49:48.727500 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-httpd" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.727516 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-httpd" Mar 20 15:49:48 crc kubenswrapper[3552]: E0320 15:49:48.727553 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-log" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.727562 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-log" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.727733 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-log" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.727756 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" containerName="glance-httpd" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.735158 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.737044 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.746217 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.747315 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914641 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/39006f12-301b-4c3d-a0bd-8b19313c4843-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914684 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39006f12-301b-4c3d-a0bd-8b19313c4843-logs\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914779 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rvtb\" (UniqueName: \"kubernetes.io/projected/39006f12-301b-4c3d-a0bd-8b19313c4843-kube-api-access-4rvtb\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914809 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-scripts\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914828 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914850 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914872 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-config-data\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:48 crc kubenswrapper[3552]: I0320 15:49:48.914900 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.015922 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/39006f12-301b-4c3d-a0bd-8b19313c4843-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.015967 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39006f12-301b-4c3d-a0bd-8b19313c4843-logs\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016062 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-4rvtb\" (UniqueName: \"kubernetes.io/projected/39006f12-301b-4c3d-a0bd-8b19313c4843-kube-api-access-4rvtb\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016106 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-scripts\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016131 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016185 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-config-data\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016213 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.016876 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39006f12-301b-4c3d-a0bd-8b19313c4843-logs\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.017145 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/39006f12-301b-4c3d-a0bd-8b19313c4843-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.017529 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.019892 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.024051 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-scripts\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.025197 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.030195 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39006f12-301b-4c3d-a0bd-8b19313c4843-config-data\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.040098 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rvtb\" (UniqueName: \"kubernetes.io/projected/39006f12-301b-4c3d-a0bd-8b19313c4843-kube-api-access-4rvtb\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.052964 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"39006f12-301b-4c3d-a0bd-8b19313c4843\") " pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.064428 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.380374 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerStarted","Data":"a4a25cfa7a43f59640bb4e6a2cf252ef8cb228cacb4026d2ba7230212c049d4b"} Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.488991 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0546ebe6-d55c-4542-b70d-e04125549482" path="/var/lib/kubelet/pods/0546ebe6-d55c-4542-b70d-e04125549482/volumes" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.690959 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d151a0e-e371-44af-a237-0d70a5876ace" path="/var/lib/kubelet/pods/0d151a0e-e371-44af-a237-0d70a5876ace/volumes" Mar 20 15:49:49 crc kubenswrapper[3552]: I0320 15:49:49.947572 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Mar 20 15:49:50 crc kubenswrapper[3552]: I0320 15:49:50.400801 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerStarted","Data":"b80b8ec5cad1d28a1dff25182854b1685ab8b85bef41a1962ea0e96efa4e9a0b"} Mar 20 15:49:50 crc kubenswrapper[3552]: I0320 15:49:50.402457 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"39006f12-301b-4c3d-a0bd-8b19313c4843","Type":"ContainerStarted","Data":"6cc0472a5b8e21cf39085b7eba582f87b15788fd410cc83f07fb026cc5d2adda"} Mar 20 15:49:51 crc kubenswrapper[3552]: I0320 15:49:51.411574 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerStarted","Data":"63ceba0122d25399c06a7b83eae90ad20bc955b9cff10be60bfddff37c95fcb8"} Mar 20 15:49:52 crc kubenswrapper[3552]: I0320 15:49:52.419702 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"39006f12-301b-4c3d-a0bd-8b19313c4843","Type":"ContainerStarted","Data":"6eae5bde102e4182b8fefc17e5a72db9993dfd09da20d3593ba7b852e760d1c6"} Mar 20 15:49:52 crc kubenswrapper[3552]: I0320 15:49:52.421574 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerStarted","Data":"e068bf2ecd023f7795aad15010779be31db2a8425ed634607ca538bcefd071c7"} Mar 20 15:49:52 crc kubenswrapper[3552]: I0320 15:49:52.521380 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:49:52 crc kubenswrapper[3552]: I0320 15:49:52.521597 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-log" containerID="cri-o://9b723fd8ce90c7dfda83c7ec544843f705baf74280b9dc2949273e67b902b3de" gracePeriod=30 Mar 20 15:49:52 crc kubenswrapper[3552]: I0320 15:49:52.521985 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-httpd" containerID="cri-o://5b287d58f9489ee09695211f3aaaa8e7ef67ef45fb8f2cc4dbdfeb21aa6999ad" gracePeriod=30 Mar 20 15:49:53 crc kubenswrapper[3552]: I0320 15:49:53.453920 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerStarted","Data":"578dd7c4e2fc1a8366dd179b8237b860edd7836bf608e1b1260886d3f672521f"} Mar 20 15:49:53 crc kubenswrapper[3552]: I0320 15:49:53.455945 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"39006f12-301b-4c3d-a0bd-8b19313c4843","Type":"ContainerStarted","Data":"ae3cdfc154c9de1388d8ea5335895bb3077bb158f5b1c01f5511802bde67d831"} Mar 20 15:49:53 crc kubenswrapper[3552]: I0320 15:49:53.459316 3552 generic.go:334] "Generic (PLEG): container finished" podID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerID="9b723fd8ce90c7dfda83c7ec544843f705baf74280b9dc2949273e67b902b3de" exitCode=143 Mar 20 15:49:53 crc kubenswrapper[3552]: I0320 15:49:53.459361 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a4b8a27-5d4a-44d2-8553-124cd38ec665","Type":"ContainerDied","Data":"9b723fd8ce90c7dfda83c7ec544843f705baf74280b9dc2949273e67b902b3de"} Mar 20 15:49:53 crc kubenswrapper[3552]: I0320 15:49:53.501703 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.501639378 podStartE2EDuration="5.501639378s" podCreationTimestamp="2026-03-20 15:49:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:49:53.491045019 +0000 UTC m=+1493.184741849" watchObservedRunningTime="2026-03-20 15:49:53.501639378 +0000 UTC m=+1493.195336218" Mar 20 15:49:54 crc kubenswrapper[3552]: I0320 15:49:54.472614 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="2a2a764d-837e-455a-9404-b306cc90147d" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.190:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:49:54 crc kubenswrapper[3552]: I0320 15:49:54.490225 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.335364529 podStartE2EDuration="7.490181486s" podCreationTimestamp="2026-03-20 15:49:47 +0000 UTC" firstStartedPulling="2026-03-20 15:49:48.560520764 +0000 UTC m=+1488.254217594" lastFinishedPulling="2026-03-20 15:49:52.715337721 +0000 UTC m=+1492.409034551" observedRunningTime="2026-03-20 15:49:54.482819909 +0000 UTC m=+1494.176516759" watchObservedRunningTime="2026-03-20 15:49:54.490181486 +0000 UTC m=+1494.183878316" Mar 20 15:49:55 crc kubenswrapper[3552]: I0320 15:49:55.471843 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="2a2a764d-837e-455a-9404-b306cc90147d" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.190:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.049954 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2bkk6" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" probeResult="failure" output=< Mar 20 15:49:56 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:49:56 crc kubenswrapper[3552]: > Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.490563 3552 generic.go:334] "Generic (PLEG): container finished" podID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerID="5b287d58f9489ee09695211f3aaaa8e7ef67ef45fb8f2cc4dbdfeb21aa6999ad" exitCode=0 Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.490759 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a4b8a27-5d4a-44d2-8553-124cd38ec665","Type":"ContainerDied","Data":"5b287d58f9489ee09695211f3aaaa8e7ef67ef45fb8f2cc4dbdfeb21aa6999ad"} Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.911357 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996182 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-httpd-run\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996254 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996294 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhj84\" (UniqueName: \"kubernetes.io/projected/4a4b8a27-5d4a-44d2-8553-124cd38ec665-kube-api-access-hhj84\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996323 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-combined-ca-bundle\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996363 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-config-data\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996395 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-scripts\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996447 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-logs\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:56 crc kubenswrapper[3552]: I0320 15:49:56.996613 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-public-tls-certs\") pod \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\" (UID: \"4a4b8a27-5d4a-44d2-8553-124cd38ec665\") " Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.008071 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.008095 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-logs" (OuterVolumeSpecName: "logs") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.018589 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.018921 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-scripts" (OuterVolumeSpecName: "scripts") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.019563 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a4b8a27-5d4a-44d2-8553-124cd38ec665-kube-api-access-hhj84" (OuterVolumeSpecName: "kube-api-access-hhj84") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "kube-api-access-hhj84". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.073635 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-config-data" (OuterVolumeSpecName: "config-data") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.077419 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.100818 3552 reconciler_common.go:300] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-public-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.101116 3552 reconciler_common.go:300] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-httpd-run\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.101148 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.101161 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hhj84\" (UniqueName: \"kubernetes.io/projected/4a4b8a27-5d4a-44d2-8553-124cd38ec665-kube-api-access-hhj84\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.101174 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.101189 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.101200 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a4b8a27-5d4a-44d2-8553-124cd38ec665-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.148938 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.162466 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a4b8a27-5d4a-44d2-8553-124cd38ec665" (UID: "4a4b8a27-5d4a-44d2-8553-124cd38ec665"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.203118 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.203171 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a4b8a27-5d4a-44d2-8553-124cd38ec665-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.500414 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a4b8a27-5d4a-44d2-8553-124cd38ec665","Type":"ContainerDied","Data":"34e646c1b17ccaafb75e346fe4fc6d4dc82d942e73a0716a4e85b6342cc658d7"} Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.500461 3552 scope.go:117] "RemoveContainer" containerID="5b287d58f9489ee09695211f3aaaa8e7ef67ef45fb8f2cc4dbdfeb21aa6999ad" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.500478 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.545060 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.555528 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.579544 3552 scope.go:117] "RemoveContainer" containerID="9b723fd8ce90c7dfda83c7ec544843f705baf74280b9dc2949273e67b902b3de" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.583243 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.583511 3552 topology_manager.go:215] "Topology Admit Handler" podUID="21994c9d-6fc9-428c-8242-f05b41d74c68" podNamespace="openstack" podName="glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: E0320 15:49:57.583731 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-httpd" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.583742 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-httpd" Mar 20 15:49:57 crc kubenswrapper[3552]: E0320 15:49:57.583759 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-log" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.583765 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-log" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.583957 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-httpd" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.583980 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" containerName="glance-log" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.584982 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.589381 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.589798 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.598913 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710070 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710250 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btq22\" (UniqueName: \"kubernetes.io/projected/21994c9d-6fc9-428c-8242-f05b41d74c68-kube-api-access-btq22\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710321 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-config-data\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710476 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-scripts\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710511 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21994c9d-6fc9-428c-8242-f05b41d74c68-logs\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710576 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710686 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.710873 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/21994c9d-6fc9-428c-8242-f05b41d74c68-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.812890 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-btq22\" (UniqueName: \"kubernetes.io/projected/21994c9d-6fc9-428c-8242-f05b41d74c68-kube-api-access-btq22\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.812941 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-config-data\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.812980 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-scripts\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.812998 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21994c9d-6fc9-428c-8242-f05b41d74c68-logs\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.813601 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21994c9d-6fc9-428c-8242-f05b41d74c68-logs\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.813681 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.813762 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.813799 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/21994c9d-6fc9-428c-8242-f05b41d74c68-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.813830 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.813913 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.821941 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-config-data\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.822579 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/21994c9d-6fc9-428c-8242-f05b41d74c68-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.822608 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.823933 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.827848 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21994c9d-6fc9-428c-8242-f05b41d74c68-scripts\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.836122 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-btq22\" (UniqueName: \"kubernetes.io/projected/21994c9d-6fc9-428c-8242-f05b41d74c68-kube-api-access-btq22\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.870649 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"21994c9d-6fc9-428c-8242-f05b41d74c68\") " pod="openstack/glance-default-external-api-0" Mar 20 15:49:57 crc kubenswrapper[3552]: I0320 15:49:57.922967 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Mar 20 15:49:58 crc kubenswrapper[3552]: I0320 15:49:58.683438 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Mar 20 15:49:58 crc kubenswrapper[3552]: I0320 15:49:58.894145 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Mar 20 15:49:58 crc kubenswrapper[3552]: W0320 15:49:58.901118 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21994c9d_6fc9_428c_8242_f05b41d74c68.slice/crio-73ddafdc19283499b5cc5d9dbcddc77f782855c296d7334d4de048bafc7f39a1 WatchSource:0}: Error finding container 73ddafdc19283499b5cc5d9dbcddc77f782855c296d7334d4de048bafc7f39a1: Status 404 returned error can't find the container with id 73ddafdc19283499b5cc5d9dbcddc77f782855c296d7334d4de048bafc7f39a1 Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.067694 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.068001 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.196796 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.246596 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.468119 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a4b8a27-5d4a-44d2-8553-124cd38ec665" path="/var/lib/kubelet/pods/4a4b8a27-5d4a-44d2-8553-124cd38ec665/volumes" Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.541749 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"21994c9d-6fc9-428c-8242-f05b41d74c68","Type":"ContainerStarted","Data":"73ddafdc19283499b5cc5d9dbcddc77f782855c296d7334d4de048bafc7f39a1"} Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.542479 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Mar 20 15:49:59 crc kubenswrapper[3552]: I0320 15:49:59.543695 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.315178 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.315535 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.315581 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.315617 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.315644 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.559293 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.559569 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.559348 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"21994c9d-6fc9-428c-8242-f05b41d74c68","Type":"ContainerStarted","Data":"1742e1c70fa5112ab351b8be2634041795908e3f349d9fafbd688d40a92a8835"} Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.559772 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"21994c9d-6fc9-428c-8242-f05b41d74c68","Type":"ContainerStarted","Data":"da9c6f44d6fda892423f486642dd36e657fdc70e687a577509bcb597df01491b"} Mar 20 15:50:01 crc kubenswrapper[3552]: I0320 15:50:01.582164 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.582125919 podStartE2EDuration="4.582125919s" podCreationTimestamp="2026-03-20 15:49:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:50:01.578386485 +0000 UTC m=+1501.272083315" watchObservedRunningTime="2026-03-20 15:50:01.582125919 +0000 UTC m=+1501.275822749" Mar 20 15:50:02 crc kubenswrapper[3552]: I0320 15:50:02.703456 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:02 crc kubenswrapper[3552]: I0320 15:50:02.703973 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-central-agent" containerID="cri-o://b80b8ec5cad1d28a1dff25182854b1685ab8b85bef41a1962ea0e96efa4e9a0b" gracePeriod=30 Mar 20 15:50:02 crc kubenswrapper[3552]: I0320 15:50:02.704053 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:50:02 crc kubenswrapper[3552]: I0320 15:50:02.704075 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="proxy-httpd" containerID="cri-o://578dd7c4e2fc1a8366dd179b8237b860edd7836bf608e1b1260886d3f672521f" gracePeriod=30 Mar 20 15:50:02 crc kubenswrapper[3552]: I0320 15:50:02.704113 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-notification-agent" containerID="cri-o://63ceba0122d25399c06a7b83eae90ad20bc955b9cff10be60bfddff37c95fcb8" gracePeriod=30 Mar 20 15:50:02 crc kubenswrapper[3552]: I0320 15:50:02.704060 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="sg-core" containerID="cri-o://e068bf2ecd023f7795aad15010779be31db2a8425ed634607ca538bcefd071c7" gracePeriod=30 Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.595672 3552 generic.go:334] "Generic (PLEG): container finished" podID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerID="578dd7c4e2fc1a8366dd179b8237b860edd7836bf608e1b1260886d3f672521f" exitCode=0 Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.595994 3552 generic.go:334] "Generic (PLEG): container finished" podID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerID="e068bf2ecd023f7795aad15010779be31db2a8425ed634607ca538bcefd071c7" exitCode=2 Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.596005 3552 generic.go:334] "Generic (PLEG): container finished" podID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerID="63ceba0122d25399c06a7b83eae90ad20bc955b9cff10be60bfddff37c95fcb8" exitCode=0 Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.596018 3552 generic.go:334] "Generic (PLEG): container finished" podID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerID="b80b8ec5cad1d28a1dff25182854b1685ab8b85bef41a1962ea0e96efa4e9a0b" exitCode=0 Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.596043 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerDied","Data":"578dd7c4e2fc1a8366dd179b8237b860edd7836bf608e1b1260886d3f672521f"} Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.596063 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerDied","Data":"e068bf2ecd023f7795aad15010779be31db2a8425ed634607ca538bcefd071c7"} Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.596073 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerDied","Data":"63ceba0122d25399c06a7b83eae90ad20bc955b9cff10be60bfddff37c95fcb8"} Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.596082 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerDied","Data":"b80b8ec5cad1d28a1dff25182854b1685ab8b85bef41a1962ea0e96efa4e9a0b"} Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.679703 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747374 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qblcl\" (UniqueName: \"kubernetes.io/projected/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-kube-api-access-qblcl\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747525 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-log-httpd\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747583 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-sg-core-conf-yaml\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747687 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-config-data\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747729 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-combined-ca-bundle\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747765 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-run-httpd\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.747801 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-scripts\") pod \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\" (UID: \"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091\") " Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.748043 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.749021 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.757527 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-kube-api-access-qblcl" (OuterVolumeSpecName: "kube-api-access-qblcl") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "kube-api-access-qblcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.761326 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-scripts" (OuterVolumeSpecName: "scripts") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.780376 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.850806 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.850837 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.850848 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-qblcl\" (UniqueName: \"kubernetes.io/projected/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-kube-api-access-qblcl\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.850859 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.850869 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.871299 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.889652 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-config-data" (OuterVolumeSpecName: "config-data") pod "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" (UID: "ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.922343 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.922465 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.925517 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.952893 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:03 crc kubenswrapper[3552]: I0320 15:50:03.952939 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.607576 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.607760 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091","Type":"ContainerDied","Data":"a4a25cfa7a43f59640bb4e6a2cf252ef8cb228cacb4026d2ba7230212c049d4b"} Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.608956 3552 scope.go:117] "RemoveContainer" containerID="578dd7c4e2fc1a8366dd179b8237b860edd7836bf608e1b1260886d3f672521f" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.675560 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.677867 3552 scope.go:117] "RemoveContainer" containerID="e068bf2ecd023f7795aad15010779be31db2a8425ed634607ca538bcefd071c7" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.686464 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.720926 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721146 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: E0320 15:50:04.721493 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="proxy-httpd" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721515 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="proxy-httpd" Mar 20 15:50:04 crc kubenswrapper[3552]: E0320 15:50:04.721529 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-central-agent" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721539 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-central-agent" Mar 20 15:50:04 crc kubenswrapper[3552]: E0320 15:50:04.721581 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-notification-agent" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721592 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-notification-agent" Mar 20 15:50:04 crc kubenswrapper[3552]: E0320 15:50:04.721612 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="sg-core" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721621 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="sg-core" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721870 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-notification-agent" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721898 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="sg-core" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721919 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="ceilometer-central-agent" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.721929 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" containerName="proxy-httpd" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.724023 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.726626 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.727505 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.733199 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.738455 3552 scope.go:117] "RemoveContainer" containerID="63ceba0122d25399c06a7b83eae90ad20bc955b9cff10be60bfddff37c95fcb8" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.818349 3552 scope.go:117] "RemoveContainer" containerID="b80b8ec5cad1d28a1dff25182854b1685ab8b85bef41a1962ea0e96efa4e9a0b" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.878944 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn7k7\" (UniqueName: \"kubernetes.io/projected/6d54798f-cf78-4d76-93d7-c750389a56ef-kube-api-access-kn7k7\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.879023 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-config-data\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.879072 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-log-httpd\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.879091 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-scripts\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.879115 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-run-httpd\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.879189 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.879233 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981627 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981697 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981795 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kn7k7\" (UniqueName: \"kubernetes.io/projected/6d54798f-cf78-4d76-93d7-c750389a56ef-kube-api-access-kn7k7\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981855 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-config-data\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981901 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-log-httpd\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981918 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-scripts\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.981939 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-run-httpd\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.982558 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-run-httpd\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.983007 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-log-httpd\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.987553 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-scripts\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.991119 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.996164 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:04 crc kubenswrapper[3552]: I0320 15:50:04.998742 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-config-data\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:05 crc kubenswrapper[3552]: I0320 15:50:05.034207 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn7k7\" (UniqueName: \"kubernetes.io/projected/6d54798f-cf78-4d76-93d7-c750389a56ef-kube-api-access-kn7k7\") pod \"ceilometer-0\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " pod="openstack/ceilometer-0" Mar 20 15:50:05 crc kubenswrapper[3552]: I0320 15:50:05.077113 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:05 crc kubenswrapper[3552]: I0320 15:50:05.446173 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091" path="/var/lib/kubelet/pods/ba5c69b2-cc4b-4de0-b4bd-e129d2d7b091/volumes" Mar 20 15:50:05 crc kubenswrapper[3552]: I0320 15:50:05.631116 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:05 crc kubenswrapper[3552]: W0320 15:50:05.631209 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d54798f_cf78_4d76_93d7_c750389a56ef.slice/crio-aef665afac6bd5e52054a7bae14bb08923266c1bc305f26685e4c6c31601d8d2 WatchSource:0}: Error finding container aef665afac6bd5e52054a7bae14bb08923266c1bc305f26685e4c6c31601d8d2: Status 404 returned error can't find the container with id aef665afac6bd5e52054a7bae14bb08923266c1bc305f26685e4c6c31601d8d2 Mar 20 15:50:05 crc kubenswrapper[3552]: I0320 15:50:05.639502 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 15:50:06 crc kubenswrapper[3552]: I0320 15:50:06.213866 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2bkk6" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" probeResult="failure" output=< Mar 20 15:50:06 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:50:06 crc kubenswrapper[3552]: > Mar 20 15:50:06 crc kubenswrapper[3552]: I0320 15:50:06.505551 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:06 crc kubenswrapper[3552]: I0320 15:50:06.625204 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerStarted","Data":"2b8a5936718b6d1697b1ebff9cc7c18b94d099d30df4be65ef9bfd16940e8bca"} Mar 20 15:50:06 crc kubenswrapper[3552]: I0320 15:50:06.625240 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerStarted","Data":"aef665afac6bd5e52054a7bae14bb08923266c1bc305f26685e4c6c31601d8d2"} Mar 20 15:50:07 crc kubenswrapper[3552]: I0320 15:50:07.649684 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerStarted","Data":"5f8c3cf550d8ef34d533693e0dcd2e0040bcd974fa417f0715cb8b48f9906109"} Mar 20 15:50:07 crc kubenswrapper[3552]: I0320 15:50:07.923312 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Mar 20 15:50:07 crc kubenswrapper[3552]: I0320 15:50:07.923723 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Mar 20 15:50:08 crc kubenswrapper[3552]: I0320 15:50:08.058457 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Mar 20 15:50:08 crc kubenswrapper[3552]: I0320 15:50:08.101147 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Mar 20 15:50:08 crc kubenswrapper[3552]: I0320 15:50:08.663093 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerStarted","Data":"50dae54f02da9e300e6d56f253f319e2a6dd546bc3994a5b206177495ad97a7e"} Mar 20 15:50:08 crc kubenswrapper[3552]: I0320 15:50:08.663779 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Mar 20 15:50:08 crc kubenswrapper[3552]: I0320 15:50:08.664334 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.676465 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-central-agent" containerID="cri-o://2b8a5936718b6d1697b1ebff9cc7c18b94d099d30df4be65ef9bfd16940e8bca" gracePeriod=30 Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.676569 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="proxy-httpd" containerID="cri-o://36a8e5daffc0ba2fa859692b09141c1230ad5a69794a2cf64c593a5c1b19ed49" gracePeriod=30 Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.676582 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="sg-core" containerID="cri-o://50dae54f02da9e300e6d56f253f319e2a6dd546bc3994a5b206177495ad97a7e" gracePeriod=30 Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.676610 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-notification-agent" containerID="cri-o://5f8c3cf550d8ef34d533693e0dcd2e0040bcd974fa417f0715cb8b48f9906109" gracePeriod=30 Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.676428 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerStarted","Data":"36a8e5daffc0ba2fa859692b09141c1230ad5a69794a2cf64c593a5c1b19ed49"} Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.676963 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:50:09 crc kubenswrapper[3552]: I0320 15:50:09.718083 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7089907909999997 podStartE2EDuration="5.71803706s" podCreationTimestamp="2026-03-20 15:50:04 +0000 UTC" firstStartedPulling="2026-03-20 15:50:05.63610306 +0000 UTC m=+1505.329799890" lastFinishedPulling="2026-03-20 15:50:08.645149319 +0000 UTC m=+1508.338846159" observedRunningTime="2026-03-20 15:50:09.707989995 +0000 UTC m=+1509.401686835" watchObservedRunningTime="2026-03-20 15:50:09.71803706 +0000 UTC m=+1509.411733880" Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.685478 3552 generic.go:334] "Generic (PLEG): container finished" podID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerID="36a8e5daffc0ba2fa859692b09141c1230ad5a69794a2cf64c593a5c1b19ed49" exitCode=0 Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.685899 3552 generic.go:334] "Generic (PLEG): container finished" podID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerID="50dae54f02da9e300e6d56f253f319e2a6dd546bc3994a5b206177495ad97a7e" exitCode=2 Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.685916 3552 generic.go:334] "Generic (PLEG): container finished" podID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerID="5f8c3cf550d8ef34d533693e0dcd2e0040bcd974fa417f0715cb8b48f9906109" exitCode=0 Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.686023 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.686034 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.687711 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerDied","Data":"36a8e5daffc0ba2fa859692b09141c1230ad5a69794a2cf64c593a5c1b19ed49"} Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.687767 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerDied","Data":"50dae54f02da9e300e6d56f253f319e2a6dd546bc3994a5b206177495ad97a7e"} Mar 20 15:50:10 crc kubenswrapper[3552]: I0320 15:50:10.687778 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerDied","Data":"5f8c3cf550d8ef34d533693e0dcd2e0040bcd974fa417f0715cb8b48f9906109"} Mar 20 15:50:11 crc kubenswrapper[3552]: I0320 15:50:11.481915 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Mar 20 15:50:11 crc kubenswrapper[3552]: I0320 15:50:11.692204 3552 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Mar 20 15:50:11 crc kubenswrapper[3552]: I0320 15:50:11.993469 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.700877 3552 generic.go:334] "Generic (PLEG): container finished" podID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerID="2b8a5936718b6d1697b1ebff9cc7c18b94d099d30df4be65ef9bfd16940e8bca" exitCode=0 Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.702522 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerDied","Data":"2b8a5936718b6d1697b1ebff9cc7c18b94d099d30df4be65ef9bfd16940e8bca"} Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.702567 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6d54798f-cf78-4d76-93d7-c750389a56ef","Type":"ContainerDied","Data":"aef665afac6bd5e52054a7bae14bb08923266c1bc305f26685e4c6c31601d8d2"} Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.702582 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aef665afac6bd5e52054a7bae14bb08923266c1bc305f26685e4c6c31601d8d2" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.711251 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766002 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-log-httpd\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766061 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-sg-core-conf-yaml\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766097 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-run-httpd\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766331 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-config-data\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766375 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-combined-ca-bundle\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766418 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn7k7\" (UniqueName: \"kubernetes.io/projected/6d54798f-cf78-4d76-93d7-c750389a56ef-kube-api-access-kn7k7\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766453 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-scripts\") pod \"6d54798f-cf78-4d76-93d7-c750389a56ef\" (UID: \"6d54798f-cf78-4d76-93d7-c750389a56ef\") " Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.766733 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.767299 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.768464 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.773122 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-scripts" (OuterVolumeSpecName: "scripts") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.792575 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d54798f-cf78-4d76-93d7-c750389a56ef-kube-api-access-kn7k7" (OuterVolumeSpecName: "kube-api-access-kn7k7") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "kube-api-access-kn7k7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.858856 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.868925 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kn7k7\" (UniqueName: \"kubernetes.io/projected/6d54798f-cf78-4d76-93d7-c750389a56ef-kube-api-access-kn7k7\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.868960 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.868972 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.868982 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6d54798f-cf78-4d76-93d7-c750389a56ef-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.883568 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.921842 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-config-data" (OuterVolumeSpecName: "config-data") pod "6d54798f-cf78-4d76-93d7-c750389a56ef" (UID: "6d54798f-cf78-4d76-93d7-c750389a56ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.970192 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:12 crc kubenswrapper[3552]: I0320 15:50:12.970232 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d54798f-cf78-4d76-93d7-c750389a56ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.707213 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.748531 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.755012 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.764288 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.764527 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c51a3720-c11a-48ac-ae77-777471c21a51" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: E0320 15:50:13.764877 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="proxy-httpd" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.764893 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="proxy-httpd" Mar 20 15:50:13 crc kubenswrapper[3552]: E0320 15:50:13.764918 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="sg-core" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.764930 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="sg-core" Mar 20 15:50:13 crc kubenswrapper[3552]: E0320 15:50:13.764973 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-central-agent" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.764983 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-central-agent" Mar 20 15:50:13 crc kubenswrapper[3552]: E0320 15:50:13.765002 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-notification-agent" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.765013 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-notification-agent" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.765261 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-central-agent" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.765281 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="sg-core" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.765297 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="ceilometer-notification-agent" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.765316 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" containerName="proxy-httpd" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.767749 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.770325 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.770622 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.777154 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:13 crc kubenswrapper[3552]: E0320 15:50:13.785057 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-k76jq log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[combined-ca-bundle config-data kube-api-access-k76jq log-httpd run-httpd scripts sg-core-conf-yaml]: context canceled" pod="openstack/ceilometer-0" podUID="c51a3720-c11a-48ac-ae77-777471c21a51" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.818210 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.887816 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-log-httpd\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.887881 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k76jq\" (UniqueName: \"kubernetes.io/projected/c51a3720-c11a-48ac-ae77-777471c21a51-kube-api-access-k76jq\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.887905 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-run-httpd\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.887941 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-scripts\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.888170 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.888229 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.888357 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-config-data\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.904122 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7558f7559f-rntkm" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.978148 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-bb7976659-5s6lm"] Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.978461 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/neutron-bb7976659-5s6lm" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-api" containerID="cri-o://f914ef14fce3809ef7373ba9424927ac2926dc300a9140af5c324c48646876a2" gracePeriod=30 Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.978694 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/neutron-bb7976659-5s6lm" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" containerID="cri-o://2d0120992c2347bcb66495ba1126b75dcaeaf0922d715ab79159442ca390bcab" gracePeriod=30 Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.990753 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-k76jq\" (UniqueName: \"kubernetes.io/projected/c51a3720-c11a-48ac-ae77-777471c21a51-kube-api-access-k76jq\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.990828 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-run-httpd\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.990900 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-scripts\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.990954 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.990984 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.991017 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-config-data\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.992230 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-run-httpd\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.992359 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-log-httpd\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.992974 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-log-httpd\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:13 crc kubenswrapper[3552]: I0320 15:50:13.999719 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-scripts\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.000750 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-config-data\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.010389 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.021437 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-k76jq\" (UniqueName: \"kubernetes.io/projected/c51a3720-c11a-48ac-ae77-777471c21a51-kube-api-access-k76jq\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.021775 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.720510 3552 generic.go:334] "Generic (PLEG): container finished" podID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerID="2d0120992c2347bcb66495ba1126b75dcaeaf0922d715ab79159442ca390bcab" exitCode=0 Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.720583 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bb7976659-5s6lm" event={"ID":"4b220040-ff08-4478-8b58-bc3ccc670c86","Type":"ContainerDied","Data":"2d0120992c2347bcb66495ba1126b75dcaeaf0922d715ab79159442ca390bcab"} Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.720865 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.731017 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815466 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-log-httpd\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815602 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-sg-core-conf-yaml\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815637 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-scripts\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815697 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-config-data\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815747 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-run-httpd\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815860 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815877 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k76jq\" (UniqueName: \"kubernetes.io/projected/c51a3720-c11a-48ac-ae77-777471c21a51-kube-api-access-k76jq\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.815976 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-combined-ca-bundle\") pod \"c51a3720-c11a-48ac-ae77-777471c21a51\" (UID: \"c51a3720-c11a-48ac-ae77-777471c21a51\") " Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.816032 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.816935 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.816964 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c51a3720-c11a-48ac-ae77-777471c21a51-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.824025 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.824460 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.825137 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c51a3720-c11a-48ac-ae77-777471c21a51-kube-api-access-k76jq" (OuterVolumeSpecName: "kube-api-access-k76jq") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "kube-api-access-k76jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.825496 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-config-data" (OuterVolumeSpecName: "config-data") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.832283 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-scripts" (OuterVolumeSpecName: "scripts") pod "c51a3720-c11a-48ac-ae77-777471c21a51" (UID: "c51a3720-c11a-48ac-ae77-777471c21a51"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.918238 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.918291 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-k76jq\" (UniqueName: \"kubernetes.io/projected/c51a3720-c11a-48ac-ae77-777471c21a51-kube-api-access-k76jq\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.918308 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.918322 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:14 crc kubenswrapper[3552]: I0320 15:50:14.918336 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c51a3720-c11a-48ac-ae77-777471c21a51-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.466637 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d54798f-cf78-4d76-93d7-c750389a56ef" path="/var/lib/kubelet/pods/6d54798f-cf78-4d76-93d7-c750389a56ef/volumes" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.728960 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.778728 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.799572 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.841589 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.841799 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.844040 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.847035 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.848840 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.853008 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.934804 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-scripts\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.934858 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.934892 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-log-httpd\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.935043 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.935068 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-run-httpd\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.935119 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-config-data\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:15 crc kubenswrapper[3552]: I0320 15:50:15.935139 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxxsm\" (UniqueName: \"kubernetes.io/projected/8c38cad6-beb6-473e-aafc-d4912c1a4072-kube-api-access-xxxsm\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.034863 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2bkk6" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" probeResult="failure" output=< Mar 20 15:50:16 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:50:16 crc kubenswrapper[3552]: > Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.036933 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037001 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-run-httpd\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037082 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-config-data\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037112 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-xxxsm\" (UniqueName: \"kubernetes.io/projected/8c38cad6-beb6-473e-aafc-d4912c1a4072-kube-api-access-xxxsm\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-scripts\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037196 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037237 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-log-httpd\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.037972 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-log-httpd\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.042025 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-run-httpd\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.044138 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.045454 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-config-data\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.046307 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-scripts\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.048890 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.058720 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxxsm\" (UniqueName: \"kubernetes.io/projected/8c38cad6-beb6-473e-aafc-d4912c1a4072-kube-api-access-xxxsm\") pod \"ceilometer-0\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.159137 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.649382 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.737138 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerStarted","Data":"53814e25e4466959e71527d585284a984c78a04390bdf00bc6ccda9f16db5e7e"} Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.740314 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerID="1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87" exitCode=0 Mar 20 15:50:16 crc kubenswrapper[3552]: I0320 15:50:16.740497 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerDied","Data":"1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87"} Mar 20 15:50:17 crc kubenswrapper[3552]: I0320 15:50:17.442343 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c51a3720-c11a-48ac-ae77-777471c21a51" path="/var/lib/kubelet/pods/c51a3720-c11a-48ac-ae77-777471c21a51/volumes" Mar 20 15:50:17 crc kubenswrapper[3552]: I0320 15:50:17.751256 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerStarted","Data":"80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398"} Mar 20 15:50:18 crc kubenswrapper[3552]: I0320 15:50:18.760071 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerStarted","Data":"9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4"} Mar 20 15:50:18 crc kubenswrapper[3552]: I0320 15:50:18.763799 3552 generic.go:334] "Generic (PLEG): container finished" podID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerID="f914ef14fce3809ef7373ba9424927ac2926dc300a9140af5c324c48646876a2" exitCode=0 Mar 20 15:50:18 crc kubenswrapper[3552]: I0320 15:50:18.763854 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bb7976659-5s6lm" event={"ID":"4b220040-ff08-4478-8b58-bc3ccc670c86","Type":"ContainerDied","Data":"f914ef14fce3809ef7373ba9424927ac2926dc300a9140af5c324c48646876a2"} Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.149110 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.186831 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5dp9z" podStartSLOduration=5.330816198 podStartE2EDuration="39.18678354s" podCreationTimestamp="2026-03-20 15:49:40 +0000 UTC" firstStartedPulling="2026-03-20 15:49:43.24235073 +0000 UTC m=+1482.936047560" lastFinishedPulling="2026-03-20 15:50:17.098318072 +0000 UTC m=+1516.792014902" observedRunningTime="2026-03-20 15:50:18.808783711 +0000 UTC m=+1518.502480551" watchObservedRunningTime="2026-03-20 15:50:19.18678354 +0000 UTC m=+1518.880480370" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221068 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-public-tls-certs\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221214 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-combined-ca-bundle\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221306 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-httpd-config\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221328 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-ovndb-tls-certs\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221358 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvkt7\" (UniqueName: \"kubernetes.io/projected/4b220040-ff08-4478-8b58-bc3ccc670c86-kube-api-access-xvkt7\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221383 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-internal-tls-certs\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.221437 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-config\") pod \"4b220040-ff08-4478-8b58-bc3ccc670c86\" (UID: \"4b220040-ff08-4478-8b58-bc3ccc670c86\") " Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.249103 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b220040-ff08-4478-8b58-bc3ccc670c86-kube-api-access-xvkt7" (OuterVolumeSpecName: "kube-api-access-xvkt7") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "kube-api-access-xvkt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.261370 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.330121 3552 reconciler_common.go:300] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-httpd-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.334509 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-xvkt7\" (UniqueName: \"kubernetes.io/projected/4b220040-ff08-4478-8b58-bc3ccc670c86-kube-api-access-xvkt7\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.336921 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.344205 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.356274 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-config" (OuterVolumeSpecName: "config") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.368876 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.384488 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "4b220040-ff08-4478-8b58-bc3ccc670c86" (UID: "4b220040-ff08-4478-8b58-bc3ccc670c86"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.437373 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.437717 3552 reconciler_common.go:300] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.437729 3552 reconciler_common.go:300] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.437739 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.437748 3552 reconciler_common.go:300] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b220040-ff08-4478-8b58-bc3ccc670c86-public-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.493720 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-zvnmw"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.493924 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4103ddf5-a1fa-417e-8948-6f54835b9ff2" podNamespace="openstack" podName="nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: E0320 15:50:19.502079 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.502117 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" Mar 20 15:50:19 crc kubenswrapper[3552]: E0320 15:50:19.502168 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-api" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.502177 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-api" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.502455 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-httpd" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.502505 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" containerName="neutron-api" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.503505 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.553212 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-zvnmw"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.609926 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-f5d68"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.610117 3552 topology_manager.go:215] "Topology Admit Handler" podUID="084c549c-51f1-4d1d-92bf-1e18655e07f8" podNamespace="openstack" podName="nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.621235 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.647520 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4103ddf5-a1fa-417e-8948-6f54835b9ff2-operator-scripts\") pod \"nova-api-db-create-zvnmw\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.647597 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5488\" (UniqueName: \"kubernetes.io/projected/4103ddf5-a1fa-417e-8948-6f54835b9ff2-kube-api-access-c5488\") pod \"nova-api-db-create-zvnmw\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.701220 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-f5d68"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.705789 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-lbvs5"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.705963 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8a1a135a-ae66-4349-80ed-bef49b8632e4" podNamespace="openstack" podName="nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.707016 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.724365 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-lbvs5"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.750225 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-api-adfe-account-create-update-7t7fl"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.750447 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b6543e96-134c-4d66-8410-a2f517b9d896" podNamespace="openstack" podName="nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.751815 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.753760 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8jlc\" (UniqueName: \"kubernetes.io/projected/084c549c-51f1-4d1d-92bf-1e18655e07f8-kube-api-access-j8jlc\") pod \"nova-cell0-db-create-f5d68\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.753839 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4103ddf5-a1fa-417e-8948-6f54835b9ff2-operator-scripts\") pod \"nova-api-db-create-zvnmw\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.753875 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/084c549c-51f1-4d1d-92bf-1e18655e07f8-operator-scripts\") pod \"nova-cell0-db-create-f5d68\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.753905 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-c5488\" (UniqueName: \"kubernetes.io/projected/4103ddf5-a1fa-417e-8948-6f54835b9ff2-kube-api-access-c5488\") pod \"nova-api-db-create-zvnmw\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.757019 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-adfe-account-create-update-7t7fl"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.758802 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.759180 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4103ddf5-a1fa-417e-8948-6f54835b9ff2-operator-scripts\") pod \"nova-api-db-create-zvnmw\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.788299 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5488\" (UniqueName: \"kubernetes.io/projected/4103ddf5-a1fa-417e-8948-6f54835b9ff2-kube-api-access-c5488\") pod \"nova-api-db-create-zvnmw\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.850494 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-aae5-account-create-update-xncvz"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.850895 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" podNamespace="openstack" podName="nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.852006 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.853180 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bb7976659-5s6lm" event={"ID":"4b220040-ff08-4478-8b58-bc3ccc670c86","Type":"ContainerDied","Data":"811eff1f82a058833fc017ec49f079e854f8464ab3a9fce611fb2587e0824642"} Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.853221 3552 scope.go:117] "RemoveContainer" containerID="2d0120992c2347bcb66495ba1126b75dcaeaf0922d715ab79159442ca390bcab" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.853329 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bb7976659-5s6lm" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.854710 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6543e96-134c-4d66-8410-a2f517b9d896-operator-scripts\") pod \"nova-api-adfe-account-create-update-7t7fl\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.854792 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a1a135a-ae66-4349-80ed-bef49b8632e4-operator-scripts\") pod \"nova-cell1-db-create-lbvs5\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.854836 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j8jlc\" (UniqueName: \"kubernetes.io/projected/084c549c-51f1-4d1d-92bf-1e18655e07f8-kube-api-access-j8jlc\") pod \"nova-cell0-db-create-f5d68\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.854864 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7vnf\" (UniqueName: \"kubernetes.io/projected/b6543e96-134c-4d66-8410-a2f517b9d896-kube-api-access-k7vnf\") pod \"nova-api-adfe-account-create-update-7t7fl\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.854894 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmlv9\" (UniqueName: \"kubernetes.io/projected/8a1a135a-ae66-4349-80ed-bef49b8632e4-kube-api-access-wmlv9\") pod \"nova-cell1-db-create-lbvs5\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.854946 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/084c549c-51f1-4d1d-92bf-1e18655e07f8-operator-scripts\") pod \"nova-cell0-db-create-f5d68\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.855587 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/084c549c-51f1-4d1d-92bf-1e18655e07f8-operator-scripts\") pod \"nova-cell0-db-create-f5d68\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.860921 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.864476 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-aae5-account-create-update-xncvz"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.877129 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-61e5-account-create-update-k5rvg"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.877314 3552 topology_manager.go:215] "Topology Admit Handler" podUID="82b3f1de-b3b7-4401-bb4a-5f3d3a009041" podNamespace="openstack" podName="nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.878692 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerStarted","Data":"e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68"} Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.878769 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.884878 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.897586 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-61e5-account-create-update-k5rvg"] Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.899653 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8jlc\" (UniqueName: \"kubernetes.io/projected/084c549c-51f1-4d1d-92bf-1e18655e07f8-kube-api-access-j8jlc\") pod \"nova-cell0-db-create-f5d68\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956058 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a1a135a-ae66-4349-80ed-bef49b8632e4-operator-scripts\") pod \"nova-cell1-db-create-lbvs5\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956132 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-k7vnf\" (UniqueName: \"kubernetes.io/projected/b6543e96-134c-4d66-8410-a2f517b9d896-kube-api-access-k7vnf\") pod \"nova-api-adfe-account-create-update-7t7fl\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956170 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wmlv9\" (UniqueName: \"kubernetes.io/projected/8a1a135a-ae66-4349-80ed-bef49b8632e4-kube-api-access-wmlv9\") pod \"nova-cell1-db-create-lbvs5\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956208 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-operator-scripts\") pod \"nova-cell1-61e5-account-create-update-k5rvg\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956242 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hznf4\" (UniqueName: \"kubernetes.io/projected/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-kube-api-access-hznf4\") pod \"nova-cell0-aae5-account-create-update-xncvz\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956271 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph7fw\" (UniqueName: \"kubernetes.io/projected/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-kube-api-access-ph7fw\") pod \"nova-cell1-61e5-account-create-update-k5rvg\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956324 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6543e96-134c-4d66-8410-a2f517b9d896-operator-scripts\") pod \"nova-api-adfe-account-create-update-7t7fl\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.956357 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-operator-scripts\") pod \"nova-cell0-aae5-account-create-update-xncvz\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.957141 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a1a135a-ae66-4349-80ed-bef49b8632e4-operator-scripts\") pod \"nova-cell1-db-create-lbvs5\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.957470 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6543e96-134c-4d66-8410-a2f517b9d896-operator-scripts\") pod \"nova-api-adfe-account-create-update-7t7fl\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.971709 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7vnf\" (UniqueName: \"kubernetes.io/projected/b6543e96-134c-4d66-8410-a2f517b9d896-kube-api-access-k7vnf\") pod \"nova-api-adfe-account-create-update-7t7fl\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.976601 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmlv9\" (UniqueName: \"kubernetes.io/projected/8a1a135a-ae66-4349-80ed-bef49b8632e4-kube-api-access-wmlv9\") pod \"nova-cell1-db-create-lbvs5\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:19 crc kubenswrapper[3552]: I0320 15:50:19.978080 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.004186 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-bb7976659-5s6lm"] Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.025515 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.033497 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-bb7976659-5s6lm"] Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.066152 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-operator-scripts\") pod \"nova-cell1-61e5-account-create-update-k5rvg\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.066215 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hznf4\" (UniqueName: \"kubernetes.io/projected/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-kube-api-access-hznf4\") pod \"nova-cell0-aae5-account-create-update-xncvz\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.066279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ph7fw\" (UniqueName: \"kubernetes.io/projected/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-kube-api-access-ph7fw\") pod \"nova-cell1-61e5-account-create-update-k5rvg\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.066339 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-operator-scripts\") pod \"nova-cell0-aae5-account-create-update-xncvz\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.068051 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.068928 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-operator-scripts\") pod \"nova-cell1-61e5-account-create-update-k5rvg\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.069860 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-operator-scripts\") pod \"nova-cell0-aae5-account-create-update-xncvz\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.069949 3552 scope.go:117] "RemoveContainer" containerID="f914ef14fce3809ef7373ba9424927ac2926dc300a9140af5c324c48646876a2" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.090265 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hznf4\" (UniqueName: \"kubernetes.io/projected/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-kube-api-access-hznf4\") pod \"nova-cell0-aae5-account-create-update-xncvz\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.104671 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.111947 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph7fw\" (UniqueName: \"kubernetes.io/projected/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-kube-api-access-ph7fw\") pod \"nova-cell1-61e5-account-create-update-k5rvg\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.218527 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.241022 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:20 crc kubenswrapper[3552]: I0320 15:50:20.903621 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerStarted","Data":"3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.001920 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-zvnmw"] Mar 20 15:50:21 crc kubenswrapper[3552]: W0320 15:50:21.017457 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod084c549c_51f1_4d1d_92bf_1e18655e07f8.slice/crio-65439560e122a1e64185d3aa0cb0ad51407052bfcee36ffa89c3f0f6d159f751 WatchSource:0}: Error finding container 65439560e122a1e64185d3aa0cb0ad51407052bfcee36ffa89c3f0f6d159f751: Status 404 returned error can't find the container with id 65439560e122a1e64185d3aa0cb0ad51407052bfcee36ffa89c3f0f6d159f751 Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.025913 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-f5d68"] Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.191342 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-lbvs5"] Mar 20 15:50:21 crc kubenswrapper[3552]: W0320 15:50:21.214233 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a1a135a_ae66_4349_80ed_bef49b8632e4.slice/crio-96238862b36dde7133eacb1335fa5cc66f7f1fc7520d31cec73dcd232a26c56e WatchSource:0}: Error finding container 96238862b36dde7133eacb1335fa5cc66f7f1fc7520d31cec73dcd232a26c56e: Status 404 returned error can't find the container with id 96238862b36dde7133eacb1335fa5cc66f7f1fc7520d31cec73dcd232a26c56e Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.215499 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.215525 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.301765 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-aae5-account-create-update-xncvz"] Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.331749 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-adfe-account-create-update-7t7fl"] Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.465321 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b220040-ff08-4478-8b58-bc3ccc670c86" path="/var/lib/kubelet/pods/4b220040-ff08-4478-8b58-bc3ccc670c86/volumes" Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.469489 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-61e5-account-create-update-k5rvg"] Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.919733 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" event={"ID":"82b3f1de-b3b7-4401-bb4a-5f3d3a009041","Type":"ContainerStarted","Data":"029b285616e82d0239fb0a9bdaad91741aae2946ac3c676e4a7b1813f5c74c11"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.923825 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-zvnmw" event={"ID":"4103ddf5-a1fa-417e-8948-6f54835b9ff2","Type":"ContainerStarted","Data":"7fafa5b51087234d8d0a61eb64e29053ab8afcc0fb946e770ce31f800b8323ca"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.923857 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-zvnmw" event={"ID":"4103ddf5-a1fa-417e-8948-6f54835b9ff2","Type":"ContainerStarted","Data":"81322acc0996bef8d6fb50d8d7f2b9236d33abf1765bfc5b9421f3da18d0bb3e"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.928528 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-adfe-account-create-update-7t7fl" event={"ID":"b6543e96-134c-4d66-8410-a2f517b9d896","Type":"ContainerStarted","Data":"a3b22a27128877c452fcc160c26597b78a1491482799911d65a7cadd27f1e20d"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.933235 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-lbvs5" event={"ID":"8a1a135a-ae66-4349-80ed-bef49b8632e4","Type":"ContainerStarted","Data":"96238862b36dde7133eacb1335fa5cc66f7f1fc7520d31cec73dcd232a26c56e"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.951615 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-api-db-create-zvnmw" podStartSLOduration=2.951564769 podStartE2EDuration="2.951564769s" podCreationTimestamp="2026-03-20 15:50:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:50:21.946613103 +0000 UTC m=+1521.640309953" watchObservedRunningTime="2026-03-20 15:50:21.951564769 +0000 UTC m=+1521.645261599" Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.955156 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerStarted","Data":"ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.956314 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.960095 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-f5d68" event={"ID":"084c549c-51f1-4d1d-92bf-1e18655e07f8","Type":"ContainerStarted","Data":"65439560e122a1e64185d3aa0cb0ad51407052bfcee36ffa89c3f0f6d159f751"} Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.975168 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.105945186 podStartE2EDuration="6.975129298s" podCreationTimestamp="2026-03-20 15:50:15 +0000 UTC" firstStartedPulling="2026-03-20 15:50:16.660021401 +0000 UTC m=+1516.353718231" lastFinishedPulling="2026-03-20 15:50:20.529205513 +0000 UTC m=+1520.222902343" observedRunningTime="2026-03-20 15:50:21.973277001 +0000 UTC m=+1521.666973841" watchObservedRunningTime="2026-03-20 15:50:21.975129298 +0000 UTC m=+1521.668826128" Mar 20 15:50:21 crc kubenswrapper[3552]: I0320 15:50:21.983558 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" event={"ID":"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211","Type":"ContainerStarted","Data":"ef40d7fb325b41fe0357d5300bb8bc3e285decc51a74e3440d80da76ea2c1a1f"} Mar 20 15:50:22 crc kubenswrapper[3552]: I0320 15:50:22.480642 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-5dp9z" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="registry-server" probeResult="failure" output=< Mar 20 15:50:22 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:50:22 crc kubenswrapper[3552]: > Mar 20 15:50:22 crc kubenswrapper[3552]: I0320 15:50:22.997365 3552 generic.go:334] "Generic (PLEG): container finished" podID="8a1a135a-ae66-4349-80ed-bef49b8632e4" containerID="80e049e27f64b60997e81ae27313558fd5a297164ae94fde489aed360e16ddca" exitCode=0 Mar 20 15:50:22 crc kubenswrapper[3552]: I0320 15:50:22.997460 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-lbvs5" event={"ID":"8a1a135a-ae66-4349-80ed-bef49b8632e4","Type":"ContainerDied","Data":"80e049e27f64b60997e81ae27313558fd5a297164ae94fde489aed360e16ddca"} Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.005332 3552 generic.go:334] "Generic (PLEG): container finished" podID="084c549c-51f1-4d1d-92bf-1e18655e07f8" containerID="5fd3c9312436411d0fb3bc40ac6b3a98ea6249c887ec93793c8f307614f03379" exitCode=0 Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.005443 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-f5d68" event={"ID":"084c549c-51f1-4d1d-92bf-1e18655e07f8","Type":"ContainerDied","Data":"5fd3c9312436411d0fb3bc40ac6b3a98ea6249c887ec93793c8f307614f03379"} Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.009130 3552 generic.go:334] "Generic (PLEG): container finished" podID="b6543e96-134c-4d66-8410-a2f517b9d896" containerID="2e70252a8d7ea4b7a2db3ebb595c9247b1c9f888c76a425f7db4ddbc6a98f888" exitCode=0 Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.009246 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-adfe-account-create-update-7t7fl" event={"ID":"b6543e96-134c-4d66-8410-a2f517b9d896","Type":"ContainerDied","Data":"2e70252a8d7ea4b7a2db3ebb595c9247b1c9f888c76a425f7db4ddbc6a98f888"} Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.011243 3552 generic.go:334] "Generic (PLEG): container finished" podID="4103ddf5-a1fa-417e-8948-6f54835b9ff2" containerID="7fafa5b51087234d8d0a61eb64e29053ab8afcc0fb946e770ce31f800b8323ca" exitCode=0 Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.011319 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-zvnmw" event={"ID":"4103ddf5-a1fa-417e-8948-6f54835b9ff2","Type":"ContainerDied","Data":"7fafa5b51087234d8d0a61eb64e29053ab8afcc0fb946e770ce31f800b8323ca"} Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.021983 3552 generic.go:334] "Generic (PLEG): container finished" podID="6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" containerID="4a530425b07693de2e4e0194ace026aefd776a1eb85e6c3679f86da236480140" exitCode=0 Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.022076 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" event={"ID":"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211","Type":"ContainerDied","Data":"4a530425b07693de2e4e0194ace026aefd776a1eb85e6c3679f86da236480140"} Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.024731 3552 generic.go:334] "Generic (PLEG): container finished" podID="82b3f1de-b3b7-4401-bb4a-5f3d3a009041" containerID="1c2c7fe7711cd14faa9b682e958d7cdefdedad08f1a5328f09d3fc4f6c168c75" exitCode=0 Mar 20 15:50:23 crc kubenswrapper[3552]: I0320 15:50:23.026696 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" event={"ID":"82b3f1de-b3b7-4401-bb4a-5f3d3a009041","Type":"ContainerDied","Data":"1c2c7fe7711cd14faa9b682e958d7cdefdedad08f1a5328f09d3fc4f6c168c75"} Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.087843 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.088429 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-central-agent" containerID="cri-o://80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398" gracePeriod=30 Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.088522 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="proxy-httpd" containerID="cri-o://ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3" gracePeriod=30 Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.088553 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="sg-core" containerID="cri-o://3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4" gracePeriod=30 Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.088531 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-notification-agent" containerID="cri-o://e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68" gracePeriod=30 Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.804873 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.926542 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-operator-scripts\") pod \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.927534 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "82b3f1de-b3b7-4401-bb4a-5f3d3a009041" (UID: "82b3f1de-b3b7-4401-bb4a-5f3d3a009041"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.928087 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph7fw\" (UniqueName: \"kubernetes.io/projected/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-kube-api-access-ph7fw\") pod \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\" (UID: \"82b3f1de-b3b7-4401-bb4a-5f3d3a009041\") " Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.929553 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:24 crc kubenswrapper[3552]: I0320 15:50:24.938236 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-kube-api-access-ph7fw" (OuterVolumeSpecName: "kube-api-access-ph7fw") pod "82b3f1de-b3b7-4401-bb4a-5f3d3a009041" (UID: "82b3f1de-b3b7-4401-bb4a-5f3d3a009041"). InnerVolumeSpecName "kube-api-access-ph7fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.039876 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-ph7fw\" (UniqueName: \"kubernetes.io/projected/82b3f1de-b3b7-4401-bb4a-5f3d3a009041-kube-api-access-ph7fw\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.040603 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.086818 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.094542 3552 generic.go:334] "Generic (PLEG): container finished" podID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerID="ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3" exitCode=0 Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.094569 3552 generic.go:334] "Generic (PLEG): container finished" podID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerID="3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4" exitCode=2 Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.094580 3552 generic.go:334] "Generic (PLEG): container finished" podID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerID="e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68" exitCode=0 Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.094642 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerDied","Data":"ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3"} Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.094662 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerDied","Data":"3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4"} Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.094674 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerDied","Data":"e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68"} Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.111529 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-f5d68" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.111532 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-f5d68" event={"ID":"084c549c-51f1-4d1d-92bf-1e18655e07f8","Type":"ContainerDied","Data":"65439560e122a1e64185d3aa0cb0ad51407052bfcee36ffa89c3f0f6d159f751"} Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.111651 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65439560e122a1e64185d3aa0cb0ad51407052bfcee36ffa89c3f0f6d159f751" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.141677 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.142647 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-61e5-account-create-update-k5rvg" event={"ID":"82b3f1de-b3b7-4401-bb4a-5f3d3a009041","Type":"ContainerDied","Data":"029b285616e82d0239fb0a9bdaad91741aae2946ac3c676e4a7b1813f5c74c11"} Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.142687 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="029b285616e82d0239fb0a9bdaad91741aae2946ac3c676e4a7b1813f5c74c11" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.143588 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/084c549c-51f1-4d1d-92bf-1e18655e07f8-operator-scripts\") pod \"084c549c-51f1-4d1d-92bf-1e18655e07f8\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.143769 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8jlc\" (UniqueName: \"kubernetes.io/projected/084c549c-51f1-4d1d-92bf-1e18655e07f8-kube-api-access-j8jlc\") pod \"084c549c-51f1-4d1d-92bf-1e18655e07f8\" (UID: \"084c549c-51f1-4d1d-92bf-1e18655e07f8\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.148510 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/084c549c-51f1-4d1d-92bf-1e18655e07f8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "084c549c-51f1-4d1d-92bf-1e18655e07f8" (UID: "084c549c-51f1-4d1d-92bf-1e18655e07f8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.153378 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-zvnmw" event={"ID":"4103ddf5-a1fa-417e-8948-6f54835b9ff2","Type":"ContainerDied","Data":"81322acc0996bef8d6fb50d8d7f2b9236d33abf1765bfc5b9421f3da18d0bb3e"} Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.153472 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81322acc0996bef8d6fb50d8d7f2b9236d33abf1765bfc5b9421f3da18d0bb3e" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.155011 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-zvnmw" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.158577 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/084c549c-51f1-4d1d-92bf-1e18655e07f8-kube-api-access-j8jlc" (OuterVolumeSpecName: "kube-api-access-j8jlc") pod "084c549c-51f1-4d1d-92bf-1e18655e07f8" (UID: "084c549c-51f1-4d1d-92bf-1e18655e07f8"). InnerVolumeSpecName "kube-api-access-j8jlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.185018 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.185793 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.232948 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.250196 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5488\" (UniqueName: \"kubernetes.io/projected/4103ddf5-a1fa-417e-8948-6f54835b9ff2-kube-api-access-c5488\") pod \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.250519 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4103ddf5-a1fa-417e-8948-6f54835b9ff2-operator-scripts\") pod \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\" (UID: \"4103ddf5-a1fa-417e-8948-6f54835b9ff2\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.250988 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-j8jlc\" (UniqueName: \"kubernetes.io/projected/084c549c-51f1-4d1d-92bf-1e18655e07f8-kube-api-access-j8jlc\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.251015 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/084c549c-51f1-4d1d-92bf-1e18655e07f8-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.251762 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4103ddf5-a1fa-417e-8948-6f54835b9ff2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4103ddf5-a1fa-417e-8948-6f54835b9ff2" (UID: "4103ddf5-a1fa-417e-8948-6f54835b9ff2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.257927 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4103ddf5-a1fa-417e-8948-6f54835b9ff2-kube-api-access-c5488" (OuterVolumeSpecName: "kube-api-access-c5488") pod "4103ddf5-a1fa-417e-8948-6f54835b9ff2" (UID: "4103ddf5-a1fa-417e-8948-6f54835b9ff2"). InnerVolumeSpecName "kube-api-access-c5488". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.356567 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7vnf\" (UniqueName: \"kubernetes.io/projected/b6543e96-134c-4d66-8410-a2f517b9d896-kube-api-access-k7vnf\") pod \"b6543e96-134c-4d66-8410-a2f517b9d896\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.356917 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hznf4\" (UniqueName: \"kubernetes.io/projected/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-kube-api-access-hznf4\") pod \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.357042 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmlv9\" (UniqueName: \"kubernetes.io/projected/8a1a135a-ae66-4349-80ed-bef49b8632e4-kube-api-access-wmlv9\") pod \"8a1a135a-ae66-4349-80ed-bef49b8632e4\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.357093 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6543e96-134c-4d66-8410-a2f517b9d896-operator-scripts\") pod \"b6543e96-134c-4d66-8410-a2f517b9d896\" (UID: \"b6543e96-134c-4d66-8410-a2f517b9d896\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.357157 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a1a135a-ae66-4349-80ed-bef49b8632e4-operator-scripts\") pod \"8a1a135a-ae66-4349-80ed-bef49b8632e4\" (UID: \"8a1a135a-ae66-4349-80ed-bef49b8632e4\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.357805 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6543e96-134c-4d66-8410-a2f517b9d896-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6543e96-134c-4d66-8410-a2f517b9d896" (UID: "b6543e96-134c-4d66-8410-a2f517b9d896"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.358012 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-operator-scripts\") pod \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\" (UID: \"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.358063 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a1a135a-ae66-4349-80ed-bef49b8632e4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a1a135a-ae66-4349-80ed-bef49b8632e4" (UID: "8a1a135a-ae66-4349-80ed-bef49b8632e4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.358763 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" (UID: "6bc966f8-44aa-4dd7-bbd6-4ce4edb35211"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.362429 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a1a135a-ae66-4349-80ed-bef49b8632e4-kube-api-access-wmlv9" (OuterVolumeSpecName: "kube-api-access-wmlv9") pod "8a1a135a-ae66-4349-80ed-bef49b8632e4" (UID: "8a1a135a-ae66-4349-80ed-bef49b8632e4"). InnerVolumeSpecName "kube-api-access-wmlv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.364026 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.364064 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-c5488\" (UniqueName: \"kubernetes.io/projected/4103ddf5-a1fa-417e-8948-6f54835b9ff2-kube-api-access-c5488\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.364094 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6543e96-134c-4d66-8410-a2f517b9d896-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.364109 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a1a135a-ae66-4349-80ed-bef49b8632e4-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.364122 3552 reconciler_common.go:300] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4103ddf5-a1fa-417e-8948-6f54835b9ff2-operator-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.364483 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6543e96-134c-4d66-8410-a2f517b9d896-kube-api-access-k7vnf" (OuterVolumeSpecName: "kube-api-access-k7vnf") pod "b6543e96-134c-4d66-8410-a2f517b9d896" (UID: "b6543e96-134c-4d66-8410-a2f517b9d896"). InnerVolumeSpecName "kube-api-access-k7vnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.372502 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-kube-api-access-hznf4" (OuterVolumeSpecName: "kube-api-access-hznf4") pod "6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" (UID: "6bc966f8-44aa-4dd7-bbd6-4ce4edb35211"). InnerVolumeSpecName "kube-api-access-hznf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.473934 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-k7vnf\" (UniqueName: \"kubernetes.io/projected/b6543e96-134c-4d66-8410-a2f517b9d896-kube-api-access-k7vnf\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.473968 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hznf4\" (UniqueName: \"kubernetes.io/projected/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211-kube-api-access-hznf4\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.473980 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wmlv9\" (UniqueName: \"kubernetes.io/projected/8a1a135a-ae66-4349-80ed-bef49b8632e4-kube-api-access-wmlv9\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.614439 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779011 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-run-httpd\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779116 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-log-httpd\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779161 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-scripts\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779213 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-sg-core-conf-yaml\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779268 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-config-data\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779307 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-combined-ca-bundle\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.779382 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxxsm\" (UniqueName: \"kubernetes.io/projected/8c38cad6-beb6-473e-aafc-d4912c1a4072-kube-api-access-xxxsm\") pod \"8c38cad6-beb6-473e-aafc-d4912c1a4072\" (UID: \"8c38cad6-beb6-473e-aafc-d4912c1a4072\") " Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.780113 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.780283 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.780583 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.780606 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c38cad6-beb6-473e-aafc-d4912c1a4072-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.787889 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c38cad6-beb6-473e-aafc-d4912c1a4072-kube-api-access-xxxsm" (OuterVolumeSpecName: "kube-api-access-xxxsm") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "kube-api-access-xxxsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.790086 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-scripts" (OuterVolumeSpecName: "scripts") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.838523 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.882156 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.882196 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-xxxsm\" (UniqueName: \"kubernetes.io/projected/8c38cad6-beb6-473e-aafc-d4912c1a4072-kube-api-access-xxxsm\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.882210 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.900303 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-config-data" (OuterVolumeSpecName: "config-data") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.917796 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c38cad6-beb6-473e-aafc-d4912c1a4072" (UID: "8c38cad6-beb6-473e-aafc-d4912c1a4072"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.983712 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:25 crc kubenswrapper[3552]: I0320 15:50:25.983760 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c38cad6-beb6-473e-aafc-d4912c1a4072-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.087438 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2bkk6" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" probeResult="failure" output=< Mar 20 15:50:26 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 15:50:26 crc kubenswrapper[3552]: > Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.162207 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-adfe-account-create-update-7t7fl" event={"ID":"b6543e96-134c-4d66-8410-a2f517b9d896","Type":"ContainerDied","Data":"a3b22a27128877c452fcc160c26597b78a1491482799911d65a7cadd27f1e20d"} Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.162254 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3b22a27128877c452fcc160c26597b78a1491482799911d65a7cadd27f1e20d" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.162766 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-adfe-account-create-update-7t7fl" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.163905 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" event={"ID":"6bc966f8-44aa-4dd7-bbd6-4ce4edb35211","Type":"ContainerDied","Data":"ef40d7fb325b41fe0357d5300bb8bc3e285decc51a74e3440d80da76ea2c1a1f"} Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.163942 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef40d7fb325b41fe0357d5300bb8bc3e285decc51a74e3440d80da76ea2c1a1f" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.163966 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-aae5-account-create-update-xncvz" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.166147 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-lbvs5" event={"ID":"8a1a135a-ae66-4349-80ed-bef49b8632e4","Type":"ContainerDied","Data":"96238862b36dde7133eacb1335fa5cc66f7f1fc7520d31cec73dcd232a26c56e"} Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.166185 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96238862b36dde7133eacb1335fa5cc66f7f1fc7520d31cec73dcd232a26c56e" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.166263 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-lbvs5" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.171676 3552 generic.go:334] "Generic (PLEG): container finished" podID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerID="80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398" exitCode=0 Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.171714 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerDied","Data":"80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398"} Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.171734 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c38cad6-beb6-473e-aafc-d4912c1a4072","Type":"ContainerDied","Data":"53814e25e4466959e71527d585284a984c78a04390bdf00bc6ccda9f16db5e7e"} Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.171751 3552 scope.go:117] "RemoveContainer" containerID="ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.171927 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.227515 3552 scope.go:117] "RemoveContainer" containerID="3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.297572 3552 scope.go:117] "RemoveContainer" containerID="e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.311297 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.322530 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.344618 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.344831 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345129 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="sg-core" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345145 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="sg-core" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345159 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b6543e96-134c-4d66-8410-a2f517b9d896" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345166 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6543e96-134c-4d66-8410-a2f517b9d896" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345187 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="084c549c-51f1-4d1d-92bf-1e18655e07f8" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345193 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="084c549c-51f1-4d1d-92bf-1e18655e07f8" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345204 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-notification-agent" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345211 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-notification-agent" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345219 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345227 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345241 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="proxy-httpd" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345247 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="proxy-httpd" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345256 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8a1a135a-ae66-4349-80ed-bef49b8632e4" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345262 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a1a135a-ae66-4349-80ed-bef49b8632e4" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345273 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4103ddf5-a1fa-417e-8948-6f54835b9ff2" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345279 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4103ddf5-a1fa-417e-8948-6f54835b9ff2" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345291 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-central-agent" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345298 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-central-agent" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.345313 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="82b3f1de-b3b7-4401-bb4a-5f3d3a009041" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345319 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b3f1de-b3b7-4401-bb4a-5f3d3a009041" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345535 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-central-agent" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345550 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="ceilometer-notification-agent" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345563 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345573 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b3f1de-b3b7-4401-bb4a-5f3d3a009041" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345584 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="proxy-httpd" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345598 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" containerName="sg-core" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345610 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4103ddf5-a1fa-417e-8948-6f54835b9ff2" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345620 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a1a135a-ae66-4349-80ed-bef49b8632e4" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345628 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="084c549c-51f1-4d1d-92bf-1e18655e07f8" containerName="mariadb-database-create" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.345654 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6543e96-134c-4d66-8410-a2f517b9d896" containerName="mariadb-account-create-update" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.348359 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.363237 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.363509 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.366818 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.372618 3552 scope.go:117] "RemoveContainer" containerID="80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.414763 3552 scope.go:117] "RemoveContainer" containerID="ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.418869 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3\": container with ID starting with ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3 not found: ID does not exist" containerID="ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.418917 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3"} err="failed to get container status \"ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3\": rpc error: code = NotFound desc = could not find container \"ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3\": container with ID starting with ca03eea4b6aa3297a43569025f906f74cd66140c870bfb7017447446492d40b3 not found: ID does not exist" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.418930 3552 scope.go:117] "RemoveContainer" containerID="3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.419594 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4\": container with ID starting with 3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4 not found: ID does not exist" containerID="3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.419738 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4"} err="failed to get container status \"3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4\": rpc error: code = NotFound desc = could not find container \"3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4\": container with ID starting with 3b8bac7b50238f5af44da4bf8574fcf201fb387d93dc379d6c6d4278b363c5a4 not found: ID does not exist" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.419825 3552 scope.go:117] "RemoveContainer" containerID="e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.420172 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68\": container with ID starting with e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68 not found: ID does not exist" containerID="e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.420214 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68"} err="failed to get container status \"e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68\": rpc error: code = NotFound desc = could not find container \"e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68\": container with ID starting with e29747ea064fab6a76d3ebcaca26e14035c7101c0ea6702dd62df34d71422d68 not found: ID does not exist" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.420225 3552 scope.go:117] "RemoveContainer" containerID="80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398" Mar 20 15:50:26 crc kubenswrapper[3552]: E0320 15:50:26.420731 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398\": container with ID starting with 80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398 not found: ID does not exist" containerID="80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.420766 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398"} err="failed to get container status \"80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398\": rpc error: code = NotFound desc = could not find container \"80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398\": container with ID starting with 80b4fedb137956bba8059b53b170b1296078630a5ec044095a4fbf0b6c410398 not found: ID does not exist" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495039 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-config-data\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495117 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr2zp\" (UniqueName: \"kubernetes.io/projected/e06a89d5-b404-460d-9148-68d2bd25e4c1-kube-api-access-fr2zp\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495323 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-scripts\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495554 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-log-httpd\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495751 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495824 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.495871 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-run-httpd\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.597393 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-config-data\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.597785 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fr2zp\" (UniqueName: \"kubernetes.io/projected/e06a89d5-b404-460d-9148-68d2bd25e4c1-kube-api-access-fr2zp\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.597909 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-scripts\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.598051 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-log-httpd\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.598200 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.598326 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.598489 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-run-httpd\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.598830 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-log-httpd\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.599021 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-run-httpd\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.603090 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-scripts\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.603303 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.603896 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.614539 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-config-data\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.624752 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr2zp\" (UniqueName: \"kubernetes.io/projected/e06a89d5-b404-460d-9148-68d2bd25e4c1-kube-api-access-fr2zp\") pod \"ceilometer-0\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " pod="openstack/ceilometer-0" Mar 20 15:50:26 crc kubenswrapper[3552]: I0320 15:50:26.698154 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:27 crc kubenswrapper[3552]: I0320 15:50:27.166243 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:27 crc kubenswrapper[3552]: I0320 15:50:27.182285 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerStarted","Data":"170a400d69e7b2c9124e2c28c601708ad85120331743172ce031f86076110db5"} Mar 20 15:50:27 crc kubenswrapper[3552]: I0320 15:50:27.439801 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c38cad6-beb6-473e-aafc-d4912c1a4072" path="/var/lib/kubelet/pods/8c38cad6-beb6-473e-aafc-d4912c1a4072/volumes" Mar 20 15:50:28 crc kubenswrapper[3552]: I0320 15:50:28.192062 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerStarted","Data":"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6"} Mar 20 15:50:29 crc kubenswrapper[3552]: I0320 15:50:29.216181 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerStarted","Data":"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b"} Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.033573 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5sbpr"] Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.034672 3552 topology_manager.go:215] "Topology Admit Handler" podUID="23c8de82-3582-4efe-9b18-b448fd4c8776" podNamespace="openstack" podName="nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.035763 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.039297 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.039827 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-sd8ws" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.056031 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.058111 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5sbpr"] Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.212745 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-config-data\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.213152 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.213194 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-scripts\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.213280 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk6lc\" (UniqueName: \"kubernetes.io/projected/23c8de82-3582-4efe-9b18-b448fd4c8776-kube-api-access-nk6lc\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.227169 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerStarted","Data":"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1"} Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.227214 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerStarted","Data":"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96"} Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.229288 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.253504 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.6544421009999999 podStartE2EDuration="4.253458277s" podCreationTimestamp="2026-03-20 15:50:26 +0000 UTC" firstStartedPulling="2026-03-20 15:50:27.174670577 +0000 UTC m=+1526.868367407" lastFinishedPulling="2026-03-20 15:50:29.773686753 +0000 UTC m=+1529.467383583" observedRunningTime="2026-03-20 15:50:30.24647706 +0000 UTC m=+1529.940173900" watchObservedRunningTime="2026-03-20 15:50:30.253458277 +0000 UTC m=+1529.947155127" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.315555 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-config-data\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.315688 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.315739 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-scripts\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.315821 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nk6lc\" (UniqueName: \"kubernetes.io/projected/23c8de82-3582-4efe-9b18-b448fd4c8776-kube-api-access-nk6lc\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.323039 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-scripts\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.323254 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.324006 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-config-data\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.335869 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk6lc\" (UniqueName: \"kubernetes.io/projected/23c8de82-3582-4efe-9b18-b448fd4c8776-kube-api-access-nk6lc\") pod \"nova-cell0-conductor-db-sync-5sbpr\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:30 crc kubenswrapper[3552]: I0320 15:50:30.353935 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:31 crc kubenswrapper[3552]: I0320 15:50:31.031853 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5sbpr"] Mar 20 15:50:31 crc kubenswrapper[3552]: W0320 15:50:31.041805 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23c8de82_3582_4efe_9b18_b448fd4c8776.slice/crio-ba1ee46267c709619d3fee18002ec0c271de6745480a64af25207a8af3b48e7f WatchSource:0}: Error finding container ba1ee46267c709619d3fee18002ec0c271de6745480a64af25207a8af3b48e7f: Status 404 returned error can't find the container with id ba1ee46267c709619d3fee18002ec0c271de6745480a64af25207a8af3b48e7f Mar 20 15:50:31 crc kubenswrapper[3552]: I0320 15:50:31.248150 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" event={"ID":"23c8de82-3582-4efe-9b18-b448fd4c8776","Type":"ContainerStarted","Data":"ba1ee46267c709619d3fee18002ec0c271de6745480a64af25207a8af3b48e7f"} Mar 20 15:50:31 crc kubenswrapper[3552]: I0320 15:50:31.317099 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:50:31 crc kubenswrapper[3552]: I0320 15:50:31.440117 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:50:31 crc kubenswrapper[3552]: I0320 15:50:31.548375 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5dp9z"] Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.268139 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5dp9z" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="registry-server" containerID="cri-o://9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4" gracePeriod=2 Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.820748 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.893472 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-catalog-content\") pod \"eb12b794-eb41-45ba-9acf-0c42cada176c\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.893611 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9dpg\" (UniqueName: \"kubernetes.io/projected/eb12b794-eb41-45ba-9acf-0c42cada176c-kube-api-access-r9dpg\") pod \"eb12b794-eb41-45ba-9acf-0c42cada176c\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.893671 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-utilities\") pod \"eb12b794-eb41-45ba-9acf-0c42cada176c\" (UID: \"eb12b794-eb41-45ba-9acf-0c42cada176c\") " Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.895079 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-utilities" (OuterVolumeSpecName: "utilities") pod "eb12b794-eb41-45ba-9acf-0c42cada176c" (UID: "eb12b794-eb41-45ba-9acf-0c42cada176c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.902748 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb12b794-eb41-45ba-9acf-0c42cada176c-kube-api-access-r9dpg" (OuterVolumeSpecName: "kube-api-access-r9dpg") pod "eb12b794-eb41-45ba-9acf-0c42cada176c" (UID: "eb12b794-eb41-45ba-9acf-0c42cada176c"). InnerVolumeSpecName "kube-api-access-r9dpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.995964 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-r9dpg\" (UniqueName: \"kubernetes.io/projected/eb12b794-eb41-45ba-9acf-0c42cada176c-kube-api-access-r9dpg\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:33 crc kubenswrapper[3552]: I0320 15:50:33.996008 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.285036 3552 generic.go:334] "Generic (PLEG): container finished" podID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerID="9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4" exitCode=0 Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.285077 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerDied","Data":"9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4"} Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.285099 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dp9z" event={"ID":"eb12b794-eb41-45ba-9acf-0c42cada176c","Type":"ContainerDied","Data":"1be282b2427dcc016b84a629dd562d6bb2360aca6cd32a5794745dbf2956e021"} Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.285119 3552 scope.go:117] "RemoveContainer" containerID="9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.285243 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dp9z" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.330891 3552 scope.go:117] "RemoveContainer" containerID="1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.397590 3552 scope.go:117] "RemoveContainer" containerID="5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.429311 3552 scope.go:117] "RemoveContainer" containerID="9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4" Mar 20 15:50:34 crc kubenswrapper[3552]: E0320 15:50:34.430950 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4\": container with ID starting with 9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4 not found: ID does not exist" containerID="9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.430998 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4"} err="failed to get container status \"9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4\": rpc error: code = NotFound desc = could not find container \"9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4\": container with ID starting with 9cea4775729155bd37596d7a643653f96c83f922c77924f67eb8618af9861be4 not found: ID does not exist" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.431016 3552 scope.go:117] "RemoveContainer" containerID="1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87" Mar 20 15:50:34 crc kubenswrapper[3552]: E0320 15:50:34.431755 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87\": container with ID starting with 1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87 not found: ID does not exist" containerID="1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.431788 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87"} err="failed to get container status \"1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87\": rpc error: code = NotFound desc = could not find container \"1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87\": container with ID starting with 1617b25547b7b67af2e296bb359412e2b362f2a8dcd59e22314279255ee2db87 not found: ID does not exist" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.431799 3552 scope.go:117] "RemoveContainer" containerID="5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257" Mar 20 15:50:34 crc kubenswrapper[3552]: E0320 15:50:34.432090 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257\": container with ID starting with 5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257 not found: ID does not exist" containerID="5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.432151 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257"} err="failed to get container status \"5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257\": rpc error: code = NotFound desc = could not find container \"5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257\": container with ID starting with 5a867b20c40b68f198c4035491850a1dd46f194cb8f865e278cac00f47825257 not found: ID does not exist" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.539332 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb12b794-eb41-45ba-9acf-0c42cada176c" (UID: "eb12b794-eb41-45ba-9acf-0c42cada176c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.607631 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb12b794-eb41-45ba-9acf-0c42cada176c-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.617729 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5dp9z"] Mar 20 15:50:34 crc kubenswrapper[3552]: I0320 15:50:34.626445 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5dp9z"] Mar 20 15:50:35 crc kubenswrapper[3552]: I0320 15:50:35.039641 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:50:35 crc kubenswrapper[3552]: I0320 15:50:35.143274 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:50:35 crc kubenswrapper[3552]: I0320 15:50:35.441318 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" path="/var/lib/kubelet/pods/eb12b794-eb41-45ba-9acf-0c42cada176c/volumes" Mar 20 15:50:35 crc kubenswrapper[3552]: I0320 15:50:35.657928 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2bkk6"] Mar 20 15:50:36 crc kubenswrapper[3552]: I0320 15:50:36.303165 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2bkk6" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" containerID="cri-o://f4174c6f20afe7a5e89a0ed16176254d9f455ae0b806478a25ff6eb301e03ec6" gracePeriod=2 Mar 20 15:50:37 crc kubenswrapper[3552]: I0320 15:50:37.323759 3552 generic.go:334] "Generic (PLEG): container finished" podID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerID="f4174c6f20afe7a5e89a0ed16176254d9f455ae0b806478a25ff6eb301e03ec6" exitCode=0 Mar 20 15:50:37 crc kubenswrapper[3552]: I0320 15:50:37.323804 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerDied","Data":"f4174c6f20afe7a5e89a0ed16176254d9f455ae0b806478a25ff6eb301e03ec6"} Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.075359 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.111137 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-catalog-content\") pod \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.111208 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-utilities\") pod \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.111361 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gfxt\" (UniqueName: \"kubernetes.io/projected/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-kube-api-access-7gfxt\") pod \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\" (UID: \"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876\") " Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.112776 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-utilities" (OuterVolumeSpecName: "utilities") pod "47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" (UID: "47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.117815 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-kube-api-access-7gfxt" (OuterVolumeSpecName: "kube-api-access-7gfxt") pod "47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" (UID: "47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876"). InnerVolumeSpecName "kube-api-access-7gfxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.214634 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.214669 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-7gfxt\" (UniqueName: \"kubernetes.io/projected/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-kube-api-access-7gfxt\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.369023 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bkk6" event={"ID":"47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876","Type":"ContainerDied","Data":"f8fb98b9156b1b93e9e665053f1979871b60f954e28d0cb78069a9791cce7774"} Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.369069 3552 scope.go:117] "RemoveContainer" containerID="f4174c6f20afe7a5e89a0ed16176254d9f455ae0b806478a25ff6eb301e03ec6" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.369216 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bkk6" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.372812 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" event={"ID":"23c8de82-3582-4efe-9b18-b448fd4c8776","Type":"ContainerStarted","Data":"23e82a487853058a5b68c6d0737232c3cb0ff11505f6c0e3255ae6020f901a15"} Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.428969 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" podStartSLOduration=1.646798395 podStartE2EDuration="10.428889211s" podCreationTimestamp="2026-03-20 15:50:30 +0000 UTC" firstStartedPulling="2026-03-20 15:50:31.045718896 +0000 UTC m=+1530.739415726" lastFinishedPulling="2026-03-20 15:50:39.827809692 +0000 UTC m=+1539.521506542" observedRunningTime="2026-03-20 15:50:40.420767655 +0000 UTC m=+1540.114464485" watchObservedRunningTime="2026-03-20 15:50:40.428889211 +0000 UTC m=+1540.122586041" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.512657 3552 scope.go:117] "RemoveContainer" containerID="fa7d1fdea4c505d6227d252bd086fcdefa3963ac1c01396e743a4ac49b423e7a" Mar 20 15:50:40 crc kubenswrapper[3552]: I0320 15:50:40.571547 3552 scope.go:117] "RemoveContainer" containerID="79e1658cd1b2219496a780218a3274b5a548e188b7dda7130a5ef80fc1d1edd0" Mar 20 15:50:41 crc kubenswrapper[3552]: I0320 15:50:41.098212 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" (UID: "47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:41 crc kubenswrapper[3552]: I0320 15:50:41.133517 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:41 crc kubenswrapper[3552]: I0320 15:50:41.308391 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2bkk6"] Mar 20 15:50:41 crc kubenswrapper[3552]: I0320 15:50:41.318070 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2bkk6"] Mar 20 15:50:41 crc kubenswrapper[3552]: I0320 15:50:41.450945 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" path="/var/lib/kubelet/pods/47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876/volumes" Mar 20 15:50:43 crc kubenswrapper[3552]: I0320 15:50:43.954340 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:43 crc kubenswrapper[3552]: I0320 15:50:43.955804 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-central-agent" containerID="cri-o://bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6" gracePeriod=30 Mar 20 15:50:43 crc kubenswrapper[3552]: I0320 15:50:43.955924 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="sg-core" containerID="cri-o://3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96" gracePeriod=30 Mar 20 15:50:43 crc kubenswrapper[3552]: I0320 15:50:43.955951 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="proxy-httpd" containerID="cri-o://dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1" gracePeriod=30 Mar 20 15:50:43 crc kubenswrapper[3552]: I0320 15:50:43.955957 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-notification-agent" containerID="cri-o://1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b" gracePeriod=30 Mar 20 15:50:43 crc kubenswrapper[3552]: I0320 15:50:43.968730 3552 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.206:3000/\": EOF" Mar 20 15:50:44 crc kubenswrapper[3552]: I0320 15:50:44.410946 3552 generic.go:334] "Generic (PLEG): container finished" podID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerID="dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1" exitCode=0 Mar 20 15:50:44 crc kubenswrapper[3552]: I0320 15:50:44.410980 3552 generic.go:334] "Generic (PLEG): container finished" podID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerID="3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96" exitCode=2 Mar 20 15:50:44 crc kubenswrapper[3552]: I0320 15:50:44.411004 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerDied","Data":"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1"} Mar 20 15:50:44 crc kubenswrapper[3552]: I0320 15:50:44.411025 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerDied","Data":"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96"} Mar 20 15:50:44 crc kubenswrapper[3552]: I0320 15:50:44.970174 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032333 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-run-httpd\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032415 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fr2zp\" (UniqueName: \"kubernetes.io/projected/e06a89d5-b404-460d-9148-68d2bd25e4c1-kube-api-access-fr2zp\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032508 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-scripts\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032566 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-combined-ca-bundle\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032657 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-sg-core-conf-yaml\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032704 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-log-httpd\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.032754 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-config-data\") pod \"e06a89d5-b404-460d-9148-68d2bd25e4c1\" (UID: \"e06a89d5-b404-460d-9148-68d2bd25e4c1\") " Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.034564 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.035103 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.039951 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e06a89d5-b404-460d-9148-68d2bd25e4c1-kube-api-access-fr2zp" (OuterVolumeSpecName: "kube-api-access-fr2zp") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "kube-api-access-fr2zp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.044045 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-scripts" (OuterVolumeSpecName: "scripts") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.068825 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.132303 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.134838 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.134934 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.134956 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e06a89d5-b404-460d-9148-68d2bd25e4c1-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.134979 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-fr2zp\" (UniqueName: \"kubernetes.io/projected/e06a89d5-b404-460d-9148-68d2bd25e4c1-kube-api-access-fr2zp\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.134996 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.135014 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.167871 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-config-data" (OuterVolumeSpecName: "config-data") pod "e06a89d5-b404-460d-9148-68d2bd25e4c1" (UID: "e06a89d5-b404-460d-9148-68d2bd25e4c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.236689 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06a89d5-b404-460d-9148-68d2bd25e4c1-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428072 3552 generic.go:334] "Generic (PLEG): container finished" podID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerID="1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b" exitCode=0 Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428111 3552 generic.go:334] "Generic (PLEG): container finished" podID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerID="bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6" exitCode=0 Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428138 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerDied","Data":"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b"} Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428163 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerDied","Data":"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6"} Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428175 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e06a89d5-b404-460d-9148-68d2bd25e4c1","Type":"ContainerDied","Data":"170a400d69e7b2c9124e2c28c601708ad85120331743172ce031f86076110db5"} Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428198 3552 scope.go:117] "RemoveContainer" containerID="dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.428381 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.561286 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.561300 3552 scope.go:117] "RemoveContainer" containerID="3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.571986 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.590927 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591124 3552 topology_manager.go:215] "Topology Admit Handler" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591380 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="registry-server" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591396 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="registry-server" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591446 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="extract-content" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591455 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="extract-content" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591465 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-notification-agent" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591472 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-notification-agent" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591485 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-central-agent" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591491 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-central-agent" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591504 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="extract-content" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591510 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="extract-content" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591526 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="extract-utilities" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591533 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="extract-utilities" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591544 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591551 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591560 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="sg-core" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591568 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="sg-core" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591588 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="extract-utilities" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591595 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="extract-utilities" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.591603 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="proxy-httpd" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591609 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="proxy-httpd" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591804 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-notification-agent" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591818 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="47cc8d9e-e8fe-4998-93a7-0b1c2bfdc876" containerName="registry-server" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591832 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="ceilometer-central-agent" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591842 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="proxy-httpd" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591852 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" containerName="sg-core" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.591874 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb12b794-eb41-45ba-9acf-0c42cada176c" containerName="registry-server" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.593767 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.595861 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.601783 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.602933 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.638488 3552 scope.go:117] "RemoveContainer" containerID="1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.667821 3552 scope.go:117] "RemoveContainer" containerID="bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.696370 3552 scope.go:117] "RemoveContainer" containerID="dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.696921 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1\": container with ID starting with dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1 not found: ID does not exist" containerID="dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.697010 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1"} err="failed to get container status \"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1\": rpc error: code = NotFound desc = could not find container \"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1\": container with ID starting with dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1 not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.697035 3552 scope.go:117] "RemoveContainer" containerID="3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.697512 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96\": container with ID starting with 3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96 not found: ID does not exist" containerID="3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.697566 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96"} err="failed to get container status \"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96\": rpc error: code = NotFound desc = could not find container \"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96\": container with ID starting with 3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96 not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.697578 3552 scope.go:117] "RemoveContainer" containerID="1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.697862 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b\": container with ID starting with 1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b not found: ID does not exist" containerID="1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.697896 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b"} err="failed to get container status \"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b\": rpc error: code = NotFound desc = could not find container \"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b\": container with ID starting with 1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.697908 3552 scope.go:117] "RemoveContainer" containerID="bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6" Mar 20 15:50:45 crc kubenswrapper[3552]: E0320 15:50:45.698177 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6\": container with ID starting with bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6 not found: ID does not exist" containerID="bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.698208 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6"} err="failed to get container status \"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6\": rpc error: code = NotFound desc = could not find container \"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6\": container with ID starting with bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6 not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.698221 3552 scope.go:117] "RemoveContainer" containerID="dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.698515 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1"} err="failed to get container status \"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1\": rpc error: code = NotFound desc = could not find container \"dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1\": container with ID starting with dab52010b040ac48a0cb496839eb863883eaef56ad19c1d9937b6cde5fc9d7a1 not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.698555 3552 scope.go:117] "RemoveContainer" containerID="3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.698832 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96"} err="failed to get container status \"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96\": rpc error: code = NotFound desc = could not find container \"3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96\": container with ID starting with 3c5743d59450a69720697013b9f2134cd8b3b4aa3ae56170c8605f3c0eadfc96 not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.698848 3552 scope.go:117] "RemoveContainer" containerID="1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.699037 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b"} err="failed to get container status \"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b\": rpc error: code = NotFound desc = could not find container \"1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b\": container with ID starting with 1290a6e47c77b9f817954192386b0f7ebeca6fbc7316d38ec17cc8e32a03a20b not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.699061 3552 scope.go:117] "RemoveContainer" containerID="bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.699466 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6"} err="failed to get container status \"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6\": rpc error: code = NotFound desc = could not find container \"bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6\": container with ID starting with bf08e47157f45620571457afde9d96b75653ce46ad695a8b1547057829fc91b6 not found: ID does not exist" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.766187 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-scripts\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.766273 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-log-httpd\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.766622 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm5xg\" (UniqueName: \"kubernetes.io/projected/02ab53ee-11af-4c70-8156-a7a5e378cd40-kube-api-access-hm5xg\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.766759 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.767093 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-config-data\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.767488 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.767712 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-run-httpd\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.869959 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870067 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-config-data\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870118 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870165 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-run-httpd\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870228 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-scripts\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870256 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-log-httpd\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870296 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hm5xg\" (UniqueName: \"kubernetes.io/projected/02ab53ee-11af-4c70-8156-a7a5e378cd40-kube-api-access-hm5xg\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.870869 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-log-httpd\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.872515 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-run-httpd\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.877157 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-config-data\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.879274 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-scripts\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.885285 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.889367 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.893163 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm5xg\" (UniqueName: \"kubernetes.io/projected/02ab53ee-11af-4c70-8156-a7a5e378cd40-kube-api-access-hm5xg\") pod \"ceilometer-0\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " pod="openstack/ceilometer-0" Mar 20 15:50:45 crc kubenswrapper[3552]: I0320 15:50:45.911082 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:50:46 crc kubenswrapper[3552]: I0320 15:50:46.406665 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:50:46 crc kubenswrapper[3552]: W0320 15:50:46.407590 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02ab53ee_11af_4c70_8156_a7a5e378cd40.slice/crio-a4c099e7bcf3fa643b4ebfe9c66d05d3084406816a01b9ad83d4955c672eba8d WatchSource:0}: Error finding container a4c099e7bcf3fa643b4ebfe9c66d05d3084406816a01b9ad83d4955c672eba8d: Status 404 returned error can't find the container with id a4c099e7bcf3fa643b4ebfe9c66d05d3084406816a01b9ad83d4955c672eba8d Mar 20 15:50:46 crc kubenswrapper[3552]: I0320 15:50:46.438502 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerStarted","Data":"a4c099e7bcf3fa643b4ebfe9c66d05d3084406816a01b9ad83d4955c672eba8d"} Mar 20 15:50:47 crc kubenswrapper[3552]: I0320 15:50:47.446352 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e06a89d5-b404-460d-9148-68d2bd25e4c1" path="/var/lib/kubelet/pods/e06a89d5-b404-460d-9148-68d2bd25e4c1/volumes" Mar 20 15:50:47 crc kubenswrapper[3552]: I0320 15:50:47.462181 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerStarted","Data":"d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3"} Mar 20 15:50:48 crc kubenswrapper[3552]: I0320 15:50:48.472985 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerStarted","Data":"272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4"} Mar 20 15:50:48 crc kubenswrapper[3552]: I0320 15:50:48.473706 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerStarted","Data":"f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f"} Mar 20 15:50:49 crc kubenswrapper[3552]: I0320 15:50:49.486715 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerStarted","Data":"b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b"} Mar 20 15:50:49 crc kubenswrapper[3552]: I0320 15:50:49.492122 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:50:49 crc kubenswrapper[3552]: I0320 15:50:49.522440 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.978281973 podStartE2EDuration="4.522380872s" podCreationTimestamp="2026-03-20 15:50:45 +0000 UTC" firstStartedPulling="2026-03-20 15:50:46.409787412 +0000 UTC m=+1546.103484242" lastFinishedPulling="2026-03-20 15:50:48.953886311 +0000 UTC m=+1548.647583141" observedRunningTime="2026-03-20 15:50:49.516058581 +0000 UTC m=+1549.209755411" watchObservedRunningTime="2026-03-20 15:50:49.522380872 +0000 UTC m=+1549.216077702" Mar 20 15:50:51 crc kubenswrapper[3552]: E0320 15:50:51.260339 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23c8de82_3582_4efe_9b18_b448fd4c8776.slice/crio-23e82a487853058a5b68c6d0737232c3cb0ff11505f6c0e3255ae6020f901a15.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23c8de82_3582_4efe_9b18_b448fd4c8776.slice/crio-conmon-23e82a487853058a5b68c6d0737232c3cb0ff11505f6c0e3255ae6020f901a15.scope\": RecentStats: unable to find data in memory cache]" Mar 20 15:50:51 crc kubenswrapper[3552]: I0320 15:50:51.512291 3552 generic.go:334] "Generic (PLEG): container finished" podID="23c8de82-3582-4efe-9b18-b448fd4c8776" containerID="23e82a487853058a5b68c6d0737232c3cb0ff11505f6c0e3255ae6020f901a15" exitCode=0 Mar 20 15:50:51 crc kubenswrapper[3552]: I0320 15:50:51.512614 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" event={"ID":"23c8de82-3582-4efe-9b18-b448fd4c8776","Type":"ContainerDied","Data":"23e82a487853058a5b68c6d0737232c3cb0ff11505f6c0e3255ae6020f901a15"} Mar 20 15:50:52 crc kubenswrapper[3552]: I0320 15:50:52.882102 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.023708 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-scripts\") pod \"23c8de82-3582-4efe-9b18-b448fd4c8776\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.023756 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-combined-ca-bundle\") pod \"23c8de82-3582-4efe-9b18-b448fd4c8776\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.023795 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-config-data\") pod \"23c8de82-3582-4efe-9b18-b448fd4c8776\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.023961 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk6lc\" (UniqueName: \"kubernetes.io/projected/23c8de82-3582-4efe-9b18-b448fd4c8776-kube-api-access-nk6lc\") pod \"23c8de82-3582-4efe-9b18-b448fd4c8776\" (UID: \"23c8de82-3582-4efe-9b18-b448fd4c8776\") " Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.029446 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-scripts" (OuterVolumeSpecName: "scripts") pod "23c8de82-3582-4efe-9b18-b448fd4c8776" (UID: "23c8de82-3582-4efe-9b18-b448fd4c8776"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.031480 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23c8de82-3582-4efe-9b18-b448fd4c8776-kube-api-access-nk6lc" (OuterVolumeSpecName: "kube-api-access-nk6lc") pod "23c8de82-3582-4efe-9b18-b448fd4c8776" (UID: "23c8de82-3582-4efe-9b18-b448fd4c8776"). InnerVolumeSpecName "kube-api-access-nk6lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.050663 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-config-data" (OuterVolumeSpecName: "config-data") pod "23c8de82-3582-4efe-9b18-b448fd4c8776" (UID: "23c8de82-3582-4efe-9b18-b448fd4c8776"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.052011 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23c8de82-3582-4efe-9b18-b448fd4c8776" (UID: "23c8de82-3582-4efe-9b18-b448fd4c8776"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.126441 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nk6lc\" (UniqueName: \"kubernetes.io/projected/23c8de82-3582-4efe-9b18-b448fd4c8776-kube-api-access-nk6lc\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.126480 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.126494 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.126504 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23c8de82-3582-4efe-9b18-b448fd4c8776-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.540502 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" event={"ID":"23c8de82-3582-4efe-9b18-b448fd4c8776","Type":"ContainerDied","Data":"ba1ee46267c709619d3fee18002ec0c271de6745480a64af25207a8af3b48e7f"} Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.540542 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba1ee46267c709619d3fee18002ec0c271de6745480a64af25207a8af3b48e7f" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.540601 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5sbpr" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.647503 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.647726 3552 topology_manager.go:215] "Topology Admit Handler" podUID="51c26f37-ba5c-4900-832b-1fdf603b5b62" podNamespace="openstack" podName="nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: E0320 15:50:53.648093 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="23c8de82-3582-4efe-9b18-b448fd4c8776" containerName="nova-cell0-conductor-db-sync" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.648116 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="23c8de82-3582-4efe-9b18-b448fd4c8776" containerName="nova-cell0-conductor-db-sync" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.648368 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="23c8de82-3582-4efe-9b18-b448fd4c8776" containerName="nova-cell0-conductor-db-sync" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.649168 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.652321 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.653771 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-sd8ws" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.658157 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.839671 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51c26f37-ba5c-4900-832b-1fdf603b5b62-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.839734 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csg5d\" (UniqueName: \"kubernetes.io/projected/51c26f37-ba5c-4900-832b-1fdf603b5b62-kube-api-access-csg5d\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.839914 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51c26f37-ba5c-4900-832b-1fdf603b5b62-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.941672 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51c26f37-ba5c-4900-832b-1fdf603b5b62-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.941812 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51c26f37-ba5c-4900-832b-1fdf603b5b62-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.941851 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-csg5d\" (UniqueName: \"kubernetes.io/projected/51c26f37-ba5c-4900-832b-1fdf603b5b62-kube-api-access-csg5d\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.946903 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51c26f37-ba5c-4900-832b-1fdf603b5b62-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.946994 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51c26f37-ba5c-4900-832b-1fdf603b5b62-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.972467 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-csg5d\" (UniqueName: \"kubernetes.io/projected/51c26f37-ba5c-4900-832b-1fdf603b5b62-kube-api-access-csg5d\") pod \"nova-cell0-conductor-0\" (UID: \"51c26f37-ba5c-4900-832b-1fdf603b5b62\") " pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:53 crc kubenswrapper[3552]: I0320 15:50:53.977618 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Mar 20 15:50:54 crc kubenswrapper[3552]: I0320 15:50:54.470686 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Mar 20 15:50:54 crc kubenswrapper[3552]: I0320 15:50:54.554260 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"51c26f37-ba5c-4900-832b-1fdf603b5b62","Type":"ContainerStarted","Data":"8398972b2ecc25f6b50b41ec4a5b8d7121d0a6171125ebd7a941d83571c012b5"} Mar 20 15:50:55 crc kubenswrapper[3552]: I0320 15:50:55.564255 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"51c26f37-ba5c-4900-832b-1fdf603b5b62","Type":"ContainerStarted","Data":"a43bbcc05a24edd31a53b794029add73f8c175bb14760147b0f61495df7ec658"} Mar 20 15:50:55 crc kubenswrapper[3552]: I0320 15:50:55.590358 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.5903159650000003 podStartE2EDuration="2.590315965s" podCreationTimestamp="2026-03-20 15:50:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:50:55.582327122 +0000 UTC m=+1555.276023952" watchObservedRunningTime="2026-03-20 15:50:55.590315965 +0000 UTC m=+1555.284012795" Mar 20 15:50:56 crc kubenswrapper[3552]: I0320 15:50:56.571432 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Mar 20 15:51:01 crc kubenswrapper[3552]: I0320 15:51:01.316612 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:51:01 crc kubenswrapper[3552]: I0320 15:51:01.317524 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:51:01 crc kubenswrapper[3552]: I0320 15:51:01.317659 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:51:01 crc kubenswrapper[3552]: I0320 15:51:01.317734 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:51:01 crc kubenswrapper[3552]: I0320 15:51:01.317969 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:51:03 crc kubenswrapper[3552]: I0320 15:51:03.904215 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gl4mx"] Mar 20 15:51:03 crc kubenswrapper[3552]: I0320 15:51:03.904851 3552 topology_manager.go:215] "Topology Admit Handler" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" podNamespace="openshift-marketplace" podName="redhat-marketplace-gl4mx" Mar 20 15:51:03 crc kubenswrapper[3552]: I0320 15:51:03.906808 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:03 crc kubenswrapper[3552]: I0320 15:51:03.917994 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl4mx"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.024968 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.098410 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-utilities\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.099230 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-catalog-content\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.099714 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7vn2\" (UniqueName: \"kubernetes.io/projected/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-kube-api-access-j7vn2\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.202352 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j7vn2\" (UniqueName: \"kubernetes.io/projected/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-kube-api-access-j7vn2\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.202679 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-utilities\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.202768 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-catalog-content\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.203233 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-utilities\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.203311 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-catalog-content\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.221308 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7vn2\" (UniqueName: \"kubernetes.io/projected/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-kube-api-access-j7vn2\") pod \"redhat-marketplace-gl4mx\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.226563 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.582948 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-w7mf5"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.583528 3552 topology_manager.go:215] "Topology Admit Handler" podUID="773e318d-c001-447d-99e0-c351b3782d0d" podNamespace="openstack" podName="nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.584747 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.591930 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.592167 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.629459 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-w7mf5"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.712716 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-scripts\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.712865 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.713429 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-config-data\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.713487 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cwfl\" (UniqueName: \"kubernetes.io/projected/773e318d-c001-447d-99e0-c351b3782d0d-kube-api-access-8cwfl\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.781991 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.782184 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" podNamespace="openstack" podName="nova-api-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.783961 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.788565 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.790615 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.790851 3552 topology_manager.go:215] "Topology Admit Handler" podUID="69a966a0-89a7-41a6-816e-83a53f7260b4" podNamespace="openstack" podName="nova-cell1-novncproxy-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.792016 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.799862 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.806282 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.814844 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.814945 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-config-data\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.814973 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-8cwfl\" (UniqueName: \"kubernetes.io/projected/773e318d-c001-447d-99e0-c351b3782d0d-kube-api-access-8cwfl\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.815022 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-scripts\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.820187 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl4mx"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.822152 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-scripts\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.827733 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.844067 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-config-data\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.852774 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cwfl\" (UniqueName: \"kubernetes.io/projected/773e318d-c001-447d-99e0-c351b3782d0d-kube-api-access-8cwfl\") pod \"nova-cell0-cell-mapping-w7mf5\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.860492 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.913004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932435 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfgr7\" (UniqueName: \"kubernetes.io/projected/69a966a0-89a7-41a6-816e-83a53f7260b4-kube-api-access-kfgr7\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932476 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-logs\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932512 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-config-data\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932593 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932636 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932658 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.932675 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmjz8\" (UniqueName: \"kubernetes.io/projected/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-kube-api-access-tmjz8\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.950784 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.950980 3552 topology_manager.go:215] "Topology Admit Handler" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" podNamespace="openstack" podName="nova-metadata-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.952517 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.955220 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.960773 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.987521 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.987960 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" podNamespace="openstack" podName="nova-scheduler-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.989031 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:51:04 crc kubenswrapper[3552]: I0320 15:51:04.991881 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034294 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034380 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034457 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tmjz8\" (UniqueName: \"kubernetes.io/projected/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-kube-api-access-tmjz8\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034619 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kfgr7\" (UniqueName: \"kubernetes.io/projected/69a966a0-89a7-41a6-816e-83a53f7260b4-kube-api-access-kfgr7\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034680 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-logs\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034743 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-config-data\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.034920 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.037263 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-logs\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.049474 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.057595 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-config-data\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.063031 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.077094 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmjz8\" (UniqueName: \"kubernetes.io/projected/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-kube-api-access-tmjz8\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.077605 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.087274 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfgr7\" (UniqueName: \"kubernetes.io/projected/69a966a0-89a7-41a6-816e-83a53f7260b4-kube-api-access-kfgr7\") pod \"nova-cell1-novncproxy-0\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.087921 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.100464 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-855c567469-gnq4t"] Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.100683 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" podNamespace="openstack" podName="dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.102662 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.103165 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.117898 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-855c567469-gnq4t"] Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.121760 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.151778 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-config-data\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.152168 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.152280 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdt77\" (UniqueName: \"kubernetes.io/projected/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-kube-api-access-zdt77\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.152468 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.152706 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-config-data\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.152828 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-logs\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.152918 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm8rc\" (UniqueName: \"kubernetes.io/projected/ad87847c-d6d9-42cd-a08a-a9dd620db3db-kube-api-access-mm8rc\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254665 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-svc\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254737 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254781 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-config-data\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254828 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-logs\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254856 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mm8rc\" (UniqueName: \"kubernetes.io/projected/ad87847c-d6d9-42cd-a08a-a9dd620db3db-kube-api-access-mm8rc\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254880 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254909 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254933 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.254959 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-config-data\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.255002 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd98v\" (UniqueName: \"kubernetes.io/projected/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-kube-api-access-dd98v\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.255026 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-config\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.255048 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.255071 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zdt77\" (UniqueName: \"kubernetes.io/projected/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-kube-api-access-zdt77\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.262629 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-config-data\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.263433 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-logs\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.274871 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.287508 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-config-data\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.287538 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.300037 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdt77\" (UniqueName: \"kubernetes.io/projected/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-kube-api-access-zdt77\") pod \"nova-metadata-0\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.313007 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm8rc\" (UniqueName: \"kubernetes.io/projected/ad87847c-d6d9-42cd-a08a-a9dd620db3db-kube-api-access-mm8rc\") pod \"nova-scheduler-0\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.370933 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.371010 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.371039 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.371129 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-dd98v\" (UniqueName: \"kubernetes.io/projected/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-kube-api-access-dd98v\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.371169 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-config\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.371260 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-svc\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.372340 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-svc\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.372745 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.373596 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-config\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.373692 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.374376 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.380114 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.396348 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd98v\" (UniqueName: \"kubernetes.io/projected/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-kube-api-access-dd98v\") pod \"dnsmasq-dns-855c567469-gnq4t\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.610984 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.625940 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.695049 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerStarted","Data":"e67bef394b7dc2e7f2142af889c1d09d2ba3ed0d732a854ae634162b42701ee0"} Mar 20 15:51:05 crc kubenswrapper[3552]: I0320 15:51:05.771198 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-w7mf5"] Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.004794 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:06 crc kubenswrapper[3552]: W0320 15:51:06.007137 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee8b3ca1_da04_4a30_8f8c_e9bda79881a4.slice/crio-ad46db02e9457cc7b9b64f741e6fb69f5427998eb752e0f9f2e700c1a6f46ba2 WatchSource:0}: Error finding container ad46db02e9457cc7b9b64f741e6fb69f5427998eb752e0f9f2e700c1a6f46ba2: Status 404 returned error can't find the container with id ad46db02e9457cc7b9b64f741e6fb69f5427998eb752e0f9f2e700c1a6f46ba2 Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.020012 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:06 crc kubenswrapper[3552]: W0320 15:51:06.218215 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29f72ada_8b4f_43d7_a7a5_d2d9090f8cc1.slice/crio-4c1651e43676cbf811fb37f24a9e23bbb4e62e2aa9d870d63adee0eab5af5073 WatchSource:0}: Error finding container 4c1651e43676cbf811fb37f24a9e23bbb4e62e2aa9d870d63adee0eab5af5073: Status 404 returned error can't find the container with id 4c1651e43676cbf811fb37f24a9e23bbb4e62e2aa9d870d63adee0eab5af5073 Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.231443 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.346475 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.360071 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-855c567469-gnq4t"] Mar 20 15:51:06 crc kubenswrapper[3552]: W0320 15:51:06.376236 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5ef9583_15b5_44f2_b2cc_8b41c8d40e1d.slice/crio-e31cb748a6126538b1a741e6cdf4a92636a99ac78fb2d7f446e0cd21b842f613 WatchSource:0}: Error finding container e31cb748a6126538b1a741e6cdf4a92636a99ac78fb2d7f446e0cd21b842f613: Status 404 returned error can't find the container with id e31cb748a6126538b1a741e6cdf4a92636a99ac78fb2d7f446e0cd21b842f613 Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.509428 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-kk9lv"] Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.509620 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e8cee03b-a095-46b4-aa33-ebb7498fdc43" podNamespace="openstack" podName="nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.510692 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.515225 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.515464 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.529578 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-kk9lv"] Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.613386 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-config-data\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.615499 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpbd7\" (UniqueName: \"kubernetes.io/projected/e8cee03b-a095-46b4-aa33-ebb7498fdc43-kube-api-access-jpbd7\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.615715 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-scripts\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.615904 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.709107 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad87847c-d6d9-42cd-a08a-a9dd620db3db","Type":"ContainerStarted","Data":"8064e62f3b79cf7582abfd09e60d60b780e22ebd53954e646c745b45f8994ede"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.712123 3552 generic.go:334] "Generic (PLEG): container finished" podID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerID="6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2" exitCode=0 Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.712215 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerDied","Data":"6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.713889 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-855c567469-gnq4t" event={"ID":"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d","Type":"ContainerStarted","Data":"e31cb748a6126538b1a741e6cdf4a92636a99ac78fb2d7f446e0cd21b842f613"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.716348 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-w7mf5" event={"ID":"773e318d-c001-447d-99e0-c351b3782d0d","Type":"ContainerStarted","Data":"d5ac1d7793cb31283017aa5481d427a38fb2bfb427499424d5226c984cd5cec7"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.716380 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-w7mf5" event={"ID":"773e318d-c001-447d-99e0-c351b3782d0d","Type":"ContainerStarted","Data":"fe112b6a0b9504807efb01b17ac3c3b310e95175a5bd8a9c0c18f4e5b1d63fcf"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.717283 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-scripts\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.717352 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.717384 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-config-data\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.717482 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-jpbd7\" (UniqueName: \"kubernetes.io/projected/e8cee03b-a095-46b4-aa33-ebb7498fdc43-kube-api-access-jpbd7\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.717642 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4","Type":"ContainerStarted","Data":"ad46db02e9457cc7b9b64f741e6fb69f5427998eb752e0f9f2e700c1a6f46ba2"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.722326 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-config-data\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.722538 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"69a966a0-89a7-41a6-816e-83a53f7260b4","Type":"ContainerStarted","Data":"c6438d027d0450b226851fee45135348e8ebd472d3edce0f9a28d3d4cc77b8e3"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.723689 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-scripts\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.729514 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1","Type":"ContainerStarted","Data":"4c1651e43676cbf811fb37f24a9e23bbb4e62e2aa9d870d63adee0eab5af5073"} Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.730073 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.739970 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpbd7\" (UniqueName: \"kubernetes.io/projected/e8cee03b-a095-46b4-aa33-ebb7498fdc43-kube-api-access-jpbd7\") pod \"nova-cell1-conductor-db-sync-kk9lv\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.765154 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-w7mf5" podStartSLOduration=2.765111142 podStartE2EDuration="2.765111142s" podCreationTimestamp="2026-03-20 15:51:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:06.761005977 +0000 UTC m=+1566.454702837" watchObservedRunningTime="2026-03-20 15:51:06.765111142 +0000 UTC m=+1566.458807972" Mar 20 15:51:06 crc kubenswrapper[3552]: I0320 15:51:06.831715 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:07 crc kubenswrapper[3552]: I0320 15:51:07.331226 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-kk9lv"] Mar 20 15:51:07 crc kubenswrapper[3552]: I0320 15:51:07.740112 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" event={"ID":"e8cee03b-a095-46b4-aa33-ebb7498fdc43","Type":"ContainerStarted","Data":"280d30840e9bfc901e92ad5e9bee8a7641a79fdfc238b672e1b3f2796fe104bc"} Mar 20 15:51:07 crc kubenswrapper[3552]: I0320 15:51:07.742207 3552 generic.go:334] "Generic (PLEG): container finished" podID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerID="63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d" exitCode=0 Mar 20 15:51:07 crc kubenswrapper[3552]: I0320 15:51:07.743958 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-855c567469-gnq4t" event={"ID":"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d","Type":"ContainerDied","Data":"63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d"} Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.191017 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.209432 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.757152 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerStarted","Data":"40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a"} Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.762791 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" event={"ID":"e8cee03b-a095-46b4-aa33-ebb7498fdc43","Type":"ContainerStarted","Data":"7bbc5d3815c901d18f171790d0ec33f438443169ed6818e6c189c2dd6da77fe2"} Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.765905 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-855c567469-gnq4t" event={"ID":"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d","Type":"ContainerStarted","Data":"2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9"} Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.802884 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" podStartSLOduration=2.8028378 podStartE2EDuration="2.8028378s" podCreationTimestamp="2026-03-20 15:51:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:08.802498871 +0000 UTC m=+1568.496195701" watchObservedRunningTime="2026-03-20 15:51:08.8028378 +0000 UTC m=+1568.496534630" Mar 20 15:51:08 crc kubenswrapper[3552]: I0320 15:51:08.832702 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-855c567469-gnq4t" podStartSLOduration=3.832645188 podStartE2EDuration="3.832645188s" podCreationTimestamp="2026-03-20 15:51:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:08.822739496 +0000 UTC m=+1568.516436346" watchObservedRunningTime="2026-03-20 15:51:08.832645188 +0000 UTC m=+1568.526342018" Mar 20 15:51:09 crc kubenswrapper[3552]: I0320 15:51:09.772869 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.812084 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4","Type":"ContainerStarted","Data":"781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2"} Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.812668 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4","Type":"ContainerStarted","Data":"ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e"} Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.815631 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"69a966a0-89a7-41a6-816e-83a53f7260b4","Type":"ContainerStarted","Data":"47a793258dd9e259b1cff53ba218c124c7f4374889e24e965e52a43b02010a47"} Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.815730 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="69a966a0-89a7-41a6-816e-83a53f7260b4" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://47a793258dd9e259b1cff53ba218c124c7f4374889e24e965e52a43b02010a47" gracePeriod=30 Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.820590 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-log" containerID="cri-o://3e5bfd0c995152b0417ef5b67f5f25f5179ac2e8745dead2622cdeef39659234" gracePeriod=30 Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.820612 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-metadata" containerID="cri-o://d9074cd70522c85ba4774595bb9c2ec7a3dfc17555f921873ee0b3a3766e590b" gracePeriod=30 Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.820608 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1","Type":"ContainerStarted","Data":"d9074cd70522c85ba4774595bb9c2ec7a3dfc17555f921873ee0b3a3766e590b"} Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.820658 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1","Type":"ContainerStarted","Data":"3e5bfd0c995152b0417ef5b67f5f25f5179ac2e8745dead2622cdeef39659234"} Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.827012 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad87847c-d6d9-42cd-a08a-a9dd620db3db","Type":"ContainerStarted","Data":"9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d"} Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.840257 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.534819047 podStartE2EDuration="7.840213448s" podCreationTimestamp="2026-03-20 15:51:04 +0000 UTC" firstStartedPulling="2026-03-20 15:51:06.021026787 +0000 UTC m=+1565.714723617" lastFinishedPulling="2026-03-20 15:51:10.326421178 +0000 UTC m=+1570.020118018" observedRunningTime="2026-03-20 15:51:11.832528693 +0000 UTC m=+1571.526225543" watchObservedRunningTime="2026-03-20 15:51:11.840213448 +0000 UTC m=+1571.533910278" Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.851995 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.522030324 podStartE2EDuration="7.851931586s" podCreationTimestamp="2026-03-20 15:51:04 +0000 UTC" firstStartedPulling="2026-03-20 15:51:05.98920797 +0000 UTC m=+1565.682904800" lastFinishedPulling="2026-03-20 15:51:10.319109232 +0000 UTC m=+1570.012806062" observedRunningTime="2026-03-20 15:51:11.849202347 +0000 UTC m=+1571.542899167" watchObservedRunningTime="2026-03-20 15:51:11.851931586 +0000 UTC m=+1571.545628416" Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.878195 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.98925447 podStartE2EDuration="7.878143172s" podCreationTimestamp="2026-03-20 15:51:04 +0000 UTC" firstStartedPulling="2026-03-20 15:51:06.223281619 +0000 UTC m=+1565.916978449" lastFinishedPulling="2026-03-20 15:51:11.112170321 +0000 UTC m=+1570.805867151" observedRunningTime="2026-03-20 15:51:11.870509308 +0000 UTC m=+1571.564206138" watchObservedRunningTime="2026-03-20 15:51:11.878143172 +0000 UTC m=+1571.571840002" Mar 20 15:51:11 crc kubenswrapper[3552]: I0320 15:51:11.899070 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.925314124 podStartE2EDuration="7.899025263s" podCreationTimestamp="2026-03-20 15:51:04 +0000 UTC" firstStartedPulling="2026-03-20 15:51:06.344611323 +0000 UTC m=+1566.038308153" lastFinishedPulling="2026-03-20 15:51:10.318322462 +0000 UTC m=+1570.012019292" observedRunningTime="2026-03-20 15:51:11.883600201 +0000 UTC m=+1571.577297031" watchObservedRunningTime="2026-03-20 15:51:11.899025263 +0000 UTC m=+1571.592722093" Mar 20 15:51:12 crc kubenswrapper[3552]: I0320 15:51:12.837552 3552 generic.go:334] "Generic (PLEG): container finished" podID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerID="3e5bfd0c995152b0417ef5b67f5f25f5179ac2e8745dead2622cdeef39659234" exitCode=143 Mar 20 15:51:12 crc kubenswrapper[3552]: I0320 15:51:12.837638 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1","Type":"ContainerDied","Data":"3e5bfd0c995152b0417ef5b67f5f25f5179ac2e8745dead2622cdeef39659234"} Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.103907 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.104297 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.122119 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.612598 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.612658 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.627668 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.685285 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c899bf5cf-7lhp5"] Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.685572 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerName="dnsmasq-dns" containerID="cri-o://723b0950289893a7d1926deee7c09bc806fb812a7c061e0b51e3f7aa3bb0aa36" gracePeriod=10 Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.691004 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.868564 3552 generic.go:334] "Generic (PLEG): container finished" podID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerID="723b0950289893a7d1926deee7c09bc806fb812a7c061e0b51e3f7aa3bb0aa36" exitCode=0 Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.868672 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" event={"ID":"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d","Type":"ContainerDied","Data":"723b0950289893a7d1926deee7c09bc806fb812a7c061e0b51e3f7aa3bb0aa36"} Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.870041 3552 generic.go:334] "Generic (PLEG): container finished" podID="773e318d-c001-447d-99e0-c351b3782d0d" containerID="d5ac1d7793cb31283017aa5481d427a38fb2bfb427499424d5226c984cd5cec7" exitCode=0 Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.870203 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-w7mf5" event={"ID":"773e318d-c001-447d-99e0-c351b3782d0d","Type":"ContainerDied","Data":"d5ac1d7793cb31283017aa5481d427a38fb2bfb427499424d5226c984cd5cec7"} Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.944943 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Mar 20 15:51:15 crc kubenswrapper[3552]: I0320 15:51:15.946461 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.183553 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.190585 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.212:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.190603 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.212:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.365133 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-nb\") pod \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.365222 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-svc\") pod \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.365304 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-config\") pod \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.365365 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zxnd\" (UniqueName: \"kubernetes.io/projected/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-kube-api-access-6zxnd\") pod \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.365438 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-swift-storage-0\") pod \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.365600 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-sb\") pod \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\" (UID: \"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d\") " Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.373584 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-kube-api-access-6zxnd" (OuterVolumeSpecName: "kube-api-access-6zxnd") pod "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" (UID: "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d"). InnerVolumeSpecName "kube-api-access-6zxnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.427629 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" (UID: "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.427937 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" (UID: "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.429231 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" (UID: "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.444361 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-config" (OuterVolumeSpecName: "config") pod "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" (UID: "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.448786 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" (UID: "e02ab3fd-5d7b-4360-a37a-3b4c74cf831d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.469478 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-6zxnd\" (UniqueName: \"kubernetes.io/projected/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-kube-api-access-6zxnd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.470031 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.470286 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.470311 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.470322 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.470332 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.879948 3552 generic.go:334] "Generic (PLEG): container finished" podID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerID="40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a" exitCode=0 Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.880013 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerDied","Data":"40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a"} Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.884579 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.885140 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c899bf5cf-7lhp5" event={"ID":"e02ab3fd-5d7b-4360-a37a-3b4c74cf831d","Type":"ContainerDied","Data":"9ed0651aa5ec84a51c8358987b69ff022603d11ee13c5ac268b658ff4ebfe7d7"} Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.885249 3552 scope.go:117] "RemoveContainer" containerID="723b0950289893a7d1926deee7c09bc806fb812a7c061e0b51e3f7aa3bb0aa36" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.950163 3552 scope.go:117] "RemoveContainer" containerID="d446b4066c75e04d83452bc4f98c7effa25d72eaa75e07e919bec5d8c6ee7c94" Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.966488 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c899bf5cf-7lhp5"] Mar 20 15:51:16 crc kubenswrapper[3552]: I0320 15:51:16.978644 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c899bf5cf-7lhp5"] Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.190485 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.285136 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-combined-ca-bundle\") pod \"773e318d-c001-447d-99e0-c351b3782d0d\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.285205 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cwfl\" (UniqueName: \"kubernetes.io/projected/773e318d-c001-447d-99e0-c351b3782d0d-kube-api-access-8cwfl\") pod \"773e318d-c001-447d-99e0-c351b3782d0d\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.285245 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-config-data\") pod \"773e318d-c001-447d-99e0-c351b3782d0d\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.285375 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-scripts\") pod \"773e318d-c001-447d-99e0-c351b3782d0d\" (UID: \"773e318d-c001-447d-99e0-c351b3782d0d\") " Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.293864 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-scripts" (OuterVolumeSpecName: "scripts") pod "773e318d-c001-447d-99e0-c351b3782d0d" (UID: "773e318d-c001-447d-99e0-c351b3782d0d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.293958 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/773e318d-c001-447d-99e0-c351b3782d0d-kube-api-access-8cwfl" (OuterVolumeSpecName: "kube-api-access-8cwfl") pod "773e318d-c001-447d-99e0-c351b3782d0d" (UID: "773e318d-c001-447d-99e0-c351b3782d0d"). InnerVolumeSpecName "kube-api-access-8cwfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.312602 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-config-data" (OuterVolumeSpecName: "config-data") pod "773e318d-c001-447d-99e0-c351b3782d0d" (UID: "773e318d-c001-447d-99e0-c351b3782d0d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.329513 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "773e318d-c001-447d-99e0-c351b3782d0d" (UID: "773e318d-c001-447d-99e0-c351b3782d0d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.388254 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.388306 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-8cwfl\" (UniqueName: \"kubernetes.io/projected/773e318d-c001-447d-99e0-c351b3782d0d-kube-api-access-8cwfl\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.388325 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.388339 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/773e318d-c001-447d-99e0-c351b3782d0d-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.445015 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" path="/var/lib/kubelet/pods/e02ab3fd-5d7b-4360-a37a-3b4c74cf831d/volumes" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.906390 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-w7mf5" event={"ID":"773e318d-c001-447d-99e0-c351b3782d0d","Type":"ContainerDied","Data":"fe112b6a0b9504807efb01b17ac3c3b310e95175a5bd8a9c0c18f4e5b1d63fcf"} Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.906724 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-w7mf5" Mar 20 15:51:17 crc kubenswrapper[3552]: I0320 15:51:17.906750 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe112b6a0b9504807efb01b17ac3c3b310e95175a5bd8a9c0c18f4e5b1d63fcf" Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.013937 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.014188 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-log" containerID="cri-o://ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e" gracePeriod=30 Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.014665 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-api" containerID="cri-o://781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2" gracePeriod=30 Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.043319 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.043576 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" containerName="nova-scheduler-scheduler" containerID="cri-o://9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" gracePeriod=30 Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.920961 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerStarted","Data":"639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623"} Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.923623 3552 generic.go:334] "Generic (PLEG): container finished" podID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerID="ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e" exitCode=143 Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.923660 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4","Type":"ContainerDied","Data":"ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e"} Mar 20 15:51:18 crc kubenswrapper[3552]: I0320 15:51:18.938476 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gl4mx" podStartSLOduration=5.371069364 podStartE2EDuration="15.93842756s" podCreationTimestamp="2026-03-20 15:51:03 +0000 UTC" firstStartedPulling="2026-03-20 15:51:06.713875319 +0000 UTC m=+1566.407572149" lastFinishedPulling="2026-03-20 15:51:17.281233515 +0000 UTC m=+1576.974930345" observedRunningTime="2026-03-20 15:51:18.934854649 +0000 UTC m=+1578.628551489" watchObservedRunningTime="2026-03-20 15:51:18.93842756 +0000 UTC m=+1578.632124410" Mar 20 15:51:20 crc kubenswrapper[3552]: E0320 15:51:20.614646 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d is running failed: container process not found" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Mar 20 15:51:20 crc kubenswrapper[3552]: E0320 15:51:20.615805 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d is running failed: container process not found" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Mar 20 15:51:20 crc kubenswrapper[3552]: E0320 15:51:20.616216 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d is running failed: container process not found" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Mar 20 15:51:20 crc kubenswrapper[3552]: E0320 15:51:20.616246 3552 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" containerName="nova-scheduler-scheduler" Mar 20 15:51:20 crc kubenswrapper[3552]: I0320 15:51:20.932135 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:51:20 crc kubenswrapper[3552]: I0320 15:51:20.939574 3552 generic.go:334] "Generic (PLEG): container finished" podID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" exitCode=0 Mar 20 15:51:20 crc kubenswrapper[3552]: I0320 15:51:20.939596 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:51:20 crc kubenswrapper[3552]: I0320 15:51:20.939612 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad87847c-d6d9-42cd-a08a-a9dd620db3db","Type":"ContainerDied","Data":"9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d"} Mar 20 15:51:20 crc kubenswrapper[3552]: I0320 15:51:20.939632 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad87847c-d6d9-42cd-a08a-a9dd620db3db","Type":"ContainerDied","Data":"8064e62f3b79cf7582abfd09e60d60b780e22ebd53954e646c745b45f8994ede"} Mar 20 15:51:20 crc kubenswrapper[3552]: I0320 15:51:20.939652 3552 scope.go:117] "RemoveContainer" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.014124 3552 scope.go:117] "RemoveContainer" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" Mar 20 15:51:21 crc kubenswrapper[3552]: E0320 15:51:21.017625 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d\": container with ID starting with 9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d not found: ID does not exist" containerID="9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.017677 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d"} err="failed to get container status \"9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d\": rpc error: code = NotFound desc = could not find container \"9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d\": container with ID starting with 9091dd22f275598b455ae7326222c3d043c3d1be44077c29bcbff5e4a81f945d not found: ID does not exist" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.051341 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.051542 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="788065ca-23a0-4317-bff9-f6503f659aee" containerName="kube-state-metrics" containerID="cri-o://dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83" gracePeriod=30 Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.066073 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mm8rc\" (UniqueName: \"kubernetes.io/projected/ad87847c-d6d9-42cd-a08a-a9dd620db3db-kube-api-access-mm8rc\") pod \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.066156 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-combined-ca-bundle\") pod \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.066213 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-config-data\") pod \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\" (UID: \"ad87847c-d6d9-42cd-a08a-a9dd620db3db\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.075544 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad87847c-d6d9-42cd-a08a-a9dd620db3db-kube-api-access-mm8rc" (OuterVolumeSpecName: "kube-api-access-mm8rc") pod "ad87847c-d6d9-42cd-a08a-a9dd620db3db" (UID: "ad87847c-d6d9-42cd-a08a-a9dd620db3db"). InnerVolumeSpecName "kube-api-access-mm8rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.104396 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-config-data" (OuterVolumeSpecName: "config-data") pod "ad87847c-d6d9-42cd-a08a-a9dd620db3db" (UID: "ad87847c-d6d9-42cd-a08a-a9dd620db3db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.104651 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad87847c-d6d9-42cd-a08a-a9dd620db3db" (UID: "ad87847c-d6d9-42cd-a08a-a9dd620db3db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.169083 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mm8rc\" (UniqueName: \"kubernetes.io/projected/ad87847c-d6d9-42cd-a08a-a9dd620db3db-kube-api-access-mm8rc\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.169128 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.169143 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad87847c-d6d9-42cd-a08a-a9dd620db3db-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.304936 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.326266 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.338758 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.338992 3552 topology_manager.go:215] "Topology Admit Handler" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" podNamespace="openstack" podName="nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: E0320 15:51:21.339333 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" containerName="nova-scheduler-scheduler" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339351 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" containerName="nova-scheduler-scheduler" Mar 20 15:51:21 crc kubenswrapper[3552]: E0320 15:51:21.339364 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="773e318d-c001-447d-99e0-c351b3782d0d" containerName="nova-manage" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339373 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="773e318d-c001-447d-99e0-c351b3782d0d" containerName="nova-manage" Mar 20 15:51:21 crc kubenswrapper[3552]: E0320 15:51:21.339396 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerName="dnsmasq-dns" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339418 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerName="dnsmasq-dns" Mar 20 15:51:21 crc kubenswrapper[3552]: E0320 15:51:21.339444 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerName="init" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339453 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerName="init" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339679 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="773e318d-c001-447d-99e0-c351b3782d0d" containerName="nova-manage" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339701 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e02ab3fd-5d7b-4360-a37a-3b4c74cf831d" containerName="dnsmasq-dns" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.339720 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" containerName="nova-scheduler-scheduler" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.340705 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.343056 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.350787 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.417979 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.469102 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad87847c-d6d9-42cd-a08a-a9dd620db3db" path="/var/lib/kubelet/pods/ad87847c-d6d9-42cd-a08a-a9dd620db3db/volumes" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.478664 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz4pk\" (UniqueName: \"kubernetes.io/projected/788065ca-23a0-4317-bff9-f6503f659aee-kube-api-access-mz4pk\") pod \"788065ca-23a0-4317-bff9-f6503f659aee\" (UID: \"788065ca-23a0-4317-bff9-f6503f659aee\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.479609 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c869b\" (UniqueName: \"kubernetes.io/projected/305d791f-0105-4106-913f-d1d945d0b1f9-kube-api-access-c869b\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.479652 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-config-data\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.479751 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.502546 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/788065ca-23a0-4317-bff9-f6503f659aee-kube-api-access-mz4pk" (OuterVolumeSpecName: "kube-api-access-mz4pk") pod "788065ca-23a0-4317-bff9-f6503f659aee" (UID: "788065ca-23a0-4317-bff9-f6503f659aee"). InnerVolumeSpecName "kube-api-access-mz4pk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.581685 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.581791 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-c869b\" (UniqueName: \"kubernetes.io/projected/305d791f-0105-4106-913f-d1d945d0b1f9-kube-api-access-c869b\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.581816 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-config-data\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.581880 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mz4pk\" (UniqueName: \"kubernetes.io/projected/788065ca-23a0-4317-bff9-f6503f659aee-kube-api-access-mz4pk\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.585590 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-config-data\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.591043 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.605169 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-c869b\" (UniqueName: \"kubernetes.io/projected/305d791f-0105-4106-913f-d1d945d0b1f9-kube-api-access-c869b\") pod \"nova-scheduler-0\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.670143 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.720366 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.784268 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-config-data\") pod \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.784350 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-logs\") pod \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.784524 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmjz8\" (UniqueName: \"kubernetes.io/projected/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-kube-api-access-tmjz8\") pod \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.784604 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-combined-ca-bundle\") pod \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\" (UID: \"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4\") " Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.784870 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-logs" (OuterVolumeSpecName: "logs") pod "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" (UID: "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.785194 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.791660 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-kube-api-access-tmjz8" (OuterVolumeSpecName: "kube-api-access-tmjz8") pod "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" (UID: "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4"). InnerVolumeSpecName "kube-api-access-tmjz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.820809 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" (UID: "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.836790 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-config-data" (OuterVolumeSpecName: "config-data") pod "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" (UID: "ee8b3ca1-da04-4a30-8f8c-e9bda79881a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.886517 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-tmjz8\" (UniqueName: \"kubernetes.io/projected/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-kube-api-access-tmjz8\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.886552 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.886564 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.951188 3552 generic.go:334] "Generic (PLEG): container finished" podID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerID="781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2" exitCode=0 Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.951249 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.951253 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4","Type":"ContainerDied","Data":"781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2"} Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.951391 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee8b3ca1-da04-4a30-8f8c-e9bda79881a4","Type":"ContainerDied","Data":"ad46db02e9457cc7b9b64f741e6fb69f5427998eb752e0f9f2e700c1a6f46ba2"} Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.951450 3552 scope.go:117] "RemoveContainer" containerID="781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.953596 3552 generic.go:334] "Generic (PLEG): container finished" podID="788065ca-23a0-4317-bff9-f6503f659aee" containerID="dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83" exitCode=2 Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.953671 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.953746 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"788065ca-23a0-4317-bff9-f6503f659aee","Type":"ContainerDied","Data":"dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83"} Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.953792 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"788065ca-23a0-4317-bff9-f6503f659aee","Type":"ContainerDied","Data":"4fe62e7b0308a0002367b20fdb23ed98f3e5963ea4862d9a800f378effdd1efe"} Mar 20 15:51:21 crc kubenswrapper[3552]: I0320 15:51:21.994712 3552 scope.go:117] "RemoveContainer" containerID="ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.056184 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.071961 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.080372 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.088238 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.106985 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.107353 3552 topology_manager.go:215] "Topology Admit Handler" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" podNamespace="openstack" podName="nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.107766 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-log" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.107909 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-log" Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.108045 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-api" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.108132 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-api" Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.108240 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="788065ca-23a0-4317-bff9-f6503f659aee" containerName="kube-state-metrics" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.108328 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="788065ca-23a0-4317-bff9-f6503f659aee" containerName="kube-state-metrics" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.108605 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="788065ca-23a0-4317-bff9-f6503f659aee" containerName="kube-state-metrics" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.108694 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-log" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.108762 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" containerName="nova-api-api" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.110782 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.117319 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.117572 3552 topology_manager.go:215] "Topology Admit Handler" podUID="596194fb-82df-4137-b62a-0f29c83d4978" podNamespace="openstack" podName="kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.118753 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.120563 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.125829 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.129436 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.129831 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.149580 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.158206 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.159834 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod788065ca_23a0_4317_bff9_f6503f659aee.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod788065ca_23a0_4317_bff9_f6503f659aee.slice/crio-4fe62e7b0308a0002367b20fdb23ed98f3e5963ea4862d9a800f378effdd1efe\": RecentStats: unable to find data in memory cache]" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.160562 3552 scope.go:117] "RemoveContainer" containerID="781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2" Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.161369 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2\": container with ID starting with 781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2 not found: ID does not exist" containerID="781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.161457 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2"} err="failed to get container status \"781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2\": rpc error: code = NotFound desc = could not find container \"781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2\": container with ID starting with 781935fafd2d789bf4def233fb34e7a4fa9d556897d039801d8098cf3c0130b2 not found: ID does not exist" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.161478 3552 scope.go:117] "RemoveContainer" containerID="ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e" Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.162263 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e\": container with ID starting with ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e not found: ID does not exist" containerID="ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.162311 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e"} err="failed to get container status \"ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e\": rpc error: code = NotFound desc = could not find container \"ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e\": container with ID starting with ca40b8dc686f78e4364abb9830c69a032a0cc20422d6cfa45bc77bb8899b1b4e not found: ID does not exist" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.162327 3552 scope.go:117] "RemoveContainer" containerID="dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.192263 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-config-data\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.192845 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.193069 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmcv5\" (UniqueName: \"kubernetes.io/projected/596194fb-82df-4137-b62a-0f29c83d4978-kube-api-access-lmcv5\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.193176 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.193324 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.193375 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.193486 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9tmm\" (UniqueName: \"kubernetes.io/projected/363614e4-b836-4c3f-b5ff-d27f9970b8b3-kube-api-access-s9tmm\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.193553 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/363614e4-b836-4c3f-b5ff-d27f9970b8b3-logs\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295555 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295647 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295692 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295712 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-s9tmm\" (UniqueName: \"kubernetes.io/projected/363614e4-b836-4c3f-b5ff-d27f9970b8b3-kube-api-access-s9tmm\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295760 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/363614e4-b836-4c3f-b5ff-d27f9970b8b3-logs\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295833 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-config-data\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295855 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.295955 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-lmcv5\" (UniqueName: \"kubernetes.io/projected/596194fb-82df-4137-b62a-0f29c83d4978-kube-api-access-lmcv5\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.296683 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/363614e4-b836-4c3f-b5ff-d27f9970b8b3-logs\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.299485 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.300147 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.301295 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/596194fb-82df-4137-b62a-0f29c83d4978-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.303909 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.303975 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-config-data\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.311485 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9tmm\" (UniqueName: \"kubernetes.io/projected/363614e4-b836-4c3f-b5ff-d27f9970b8b3-kube-api-access-s9tmm\") pod \"nova-api-0\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.316145 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmcv5\" (UniqueName: \"kubernetes.io/projected/596194fb-82df-4137-b62a-0f29c83d4978-kube-api-access-lmcv5\") pod \"kube-state-metrics-0\" (UID: \"596194fb-82df-4137-b62a-0f29c83d4978\") " pod="openstack/kube-state-metrics-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.316934 3552 scope.go:117] "RemoveContainer" containerID="dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83" Mar 20 15:51:22 crc kubenswrapper[3552]: E0320 15:51:22.317460 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83\": container with ID starting with dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83 not found: ID does not exist" containerID="dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.317510 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83"} err="failed to get container status \"dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83\": rpc error: code = NotFound desc = could not find container \"dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83\": container with ID starting with dfb6939da695075bb1db7cb8e24844f052885e2966caf6a41e73e12aa8403f83 not found: ID does not exist" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.462339 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:22 crc kubenswrapper[3552]: I0320 15:51:22.476831 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:22.967245 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:22.974265 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"305d791f-0105-4106-913f-d1d945d0b1f9","Type":"ContainerStarted","Data":"ea55787b4075184f8b451cb3d8f81aaf35da76dfb3ed1bf42e30e3e6a8fe55a8"} Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.058739 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.188606 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.188827 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-central-agent" containerID="cri-o://d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3" gracePeriod=30 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.188959 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="proxy-httpd" containerID="cri-o://b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b" gracePeriod=30 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.188996 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="sg-core" containerID="cri-o://272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4" gracePeriod=30 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.189028 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-notification-agent" containerID="cri-o://f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f" gracePeriod=30 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.375593 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.375645 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.447941 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="788065ca-23a0-4317-bff9-f6503f659aee" path="/var/lib/kubelet/pods/788065ca-23a0-4317-bff9-f6503f659aee/volumes" Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.453175 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee8b3ca1-da04-4a30-8f8c-e9bda79881a4" path="/var/lib/kubelet/pods/ee8b3ca1-da04-4a30-8f8c-e9bda79881a4/volumes" Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.996769 3552 generic.go:334] "Generic (PLEG): container finished" podID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerID="b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b" exitCode=0 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.996807 3552 generic.go:334] "Generic (PLEG): container finished" podID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerID="272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4" exitCode=2 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.996821 3552 generic.go:334] "Generic (PLEG): container finished" podID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerID="d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3" exitCode=0 Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.996871 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerDied","Data":"b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b"} Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.996895 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerDied","Data":"272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4"} Mar 20 15:51:23 crc kubenswrapper[3552]: I0320 15:51:23.996910 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerDied","Data":"d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.004865 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"305d791f-0105-4106-913f-d1d945d0b1f9","Type":"ContainerStarted","Data":"fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.012055 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"596194fb-82df-4137-b62a-0f29c83d4978","Type":"ContainerStarted","Data":"a8d05340f934284092849cf332e87580a921ee2781dd4bb7a6fdee02d46751c4"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.012091 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"596194fb-82df-4137-b62a-0f29c83d4978","Type":"ContainerStarted","Data":"d051e681f21ba32ff67982d3dc3c8a8864de878bdc7f722bf5771817374023ca"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.013236 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.027902 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"363614e4-b836-4c3f-b5ff-d27f9970b8b3","Type":"ContainerStarted","Data":"6d3bafba21b26aa9210a016fa3c8c5584592257bbe9371f6708f2421ff549429"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.027951 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"363614e4-b836-4c3f-b5ff-d27f9970b8b3","Type":"ContainerStarted","Data":"265d20eac124d681339843cdb9a3d8ef18933c1d5853cebfa9a467660b4ae62e"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.027969 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"363614e4-b836-4c3f-b5ff-d27f9970b8b3","Type":"ContainerStarted","Data":"dfd3d5901717c15ca2eff22d01d7c1acec62ec63b6d531fd76cc5676b15f3cb1"} Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.037619 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.037571197 podStartE2EDuration="3.037571197s" podCreationTimestamp="2026-03-20 15:51:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:24.025829658 +0000 UTC m=+1583.719526488" watchObservedRunningTime="2026-03-20 15:51:24.037571197 +0000 UTC m=+1583.731268027" Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.074222 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.549744997 podStartE2EDuration="2.074169217s" podCreationTimestamp="2026-03-20 15:51:22 +0000 UTC" firstStartedPulling="2026-03-20 15:51:23.062913963 +0000 UTC m=+1582.756610793" lastFinishedPulling="2026-03-20 15:51:23.587338183 +0000 UTC m=+1583.281035013" observedRunningTime="2026-03-20 15:51:24.070912164 +0000 UTC m=+1583.764609004" watchObservedRunningTime="2026-03-20 15:51:24.074169217 +0000 UTC m=+1583.767866047" Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.087590 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.087544337 podStartE2EDuration="2.087544337s" podCreationTimestamp="2026-03-20 15:51:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:24.084428278 +0000 UTC m=+1583.778125128" watchObservedRunningTime="2026-03-20 15:51:24.087544337 +0000 UTC m=+1583.781241167" Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.228659 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.230040 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:24 crc kubenswrapper[3552]: I0320 15:51:24.329467 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.041082 3552 generic.go:334] "Generic (PLEG): container finished" podID="e8cee03b-a095-46b4-aa33-ebb7498fdc43" containerID="7bbc5d3815c901d18f171790d0ec33f438443169ed6818e6c189c2dd6da77fe2" exitCode=0 Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.041250 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" event={"ID":"e8cee03b-a095-46b4-aa33-ebb7498fdc43","Type":"ContainerDied","Data":"7bbc5d3815c901d18f171790d0ec33f438443169ed6818e6c189c2dd6da77fe2"} Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.132110 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.182936 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl4mx"] Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.401965 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496140 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm5xg\" (UniqueName: \"kubernetes.io/projected/02ab53ee-11af-4c70-8156-a7a5e378cd40-kube-api-access-hm5xg\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496529 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-config-data\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496564 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-combined-ca-bundle\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496663 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-log-httpd\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496708 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-run-httpd\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496785 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-scripts\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.496897 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-sg-core-conf-yaml\") pod \"02ab53ee-11af-4c70-8156-a7a5e378cd40\" (UID: \"02ab53ee-11af-4c70-8156-a7a5e378cd40\") " Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.498022 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.498245 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.515429 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02ab53ee-11af-4c70-8156-a7a5e378cd40-kube-api-access-hm5xg" (OuterVolumeSpecName: "kube-api-access-hm5xg") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "kube-api-access-hm5xg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.520645 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-scripts" (OuterVolumeSpecName: "scripts") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.541120 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.598800 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hm5xg\" (UniqueName: \"kubernetes.io/projected/02ab53ee-11af-4c70-8156-a7a5e378cd40-kube-api-access-hm5xg\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.598832 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.598843 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02ab53ee-11af-4c70-8156-a7a5e378cd40-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.598854 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.598866 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.600128 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.639360 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-config-data" (OuterVolumeSpecName: "config-data") pod "02ab53ee-11af-4c70-8156-a7a5e378cd40" (UID: "02ab53ee-11af-4c70-8156-a7a5e378cd40"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.701177 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:25 crc kubenswrapper[3552]: I0320 15:51:25.701227 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ab53ee-11af-4c70-8156-a7a5e378cd40-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.050337 3552 generic.go:334] "Generic (PLEG): container finished" podID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerID="f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f" exitCode=0 Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.050386 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.050450 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerDied","Data":"f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f"} Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.050477 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02ab53ee-11af-4c70-8156-a7a5e378cd40","Type":"ContainerDied","Data":"a4c099e7bcf3fa643b4ebfe9c66d05d3084406816a01b9ad83d4955c672eba8d"} Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.050502 3552 scope.go:117] "RemoveContainer" containerID="b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.111396 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.116872 3552 scope.go:117] "RemoveContainer" containerID="272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.129243 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.141459 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.141681 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.141997 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-central-agent" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142010 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-central-agent" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.142025 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="proxy-httpd" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142032 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="proxy-httpd" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.142062 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-notification-agent" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142070 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-notification-agent" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.142091 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="sg-core" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142125 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="sg-core" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142348 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-central-agent" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142371 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="sg-core" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142394 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="ceilometer-notification-agent" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.142425 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" containerName="proxy-httpd" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.144629 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.146632 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.147379 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.147720 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.149031 3552 scope.go:117] "RemoveContainer" containerID="f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.170603 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.185085 3552 scope.go:117] "RemoveContainer" containerID="d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.231889 3552 scope.go:117] "RemoveContainer" containerID="b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.232341 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b\": container with ID starting with b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b not found: ID does not exist" containerID="b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.232392 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b"} err="failed to get container status \"b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b\": rpc error: code = NotFound desc = could not find container \"b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b\": container with ID starting with b11e6c448b35f0a91e4b3227e2511e6776ba6fc0f0616e602aa5ab572606a75b not found: ID does not exist" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.232434 3552 scope.go:117] "RemoveContainer" containerID="272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.240662 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4\": container with ID starting with 272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4 not found: ID does not exist" containerID="272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.240729 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4"} err="failed to get container status \"272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4\": rpc error: code = NotFound desc = could not find container \"272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4\": container with ID starting with 272d220fc1f8db444aed90ad954da1b4eaa277854731b4d0329f6de458233ee4 not found: ID does not exist" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.240746 3552 scope.go:117] "RemoveContainer" containerID="f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.241199 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f\": container with ID starting with f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f not found: ID does not exist" containerID="f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.241233 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f"} err="failed to get container status \"f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f\": rpc error: code = NotFound desc = could not find container \"f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f\": container with ID starting with f621daf3d2d4552d3ccead4334b6083a9f6e9ce0ef68864d5fbdc8b5db8b1c7f not found: ID does not exist" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.241248 3552 scope.go:117] "RemoveContainer" containerID="d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3" Mar 20 15:51:26 crc kubenswrapper[3552]: E0320 15:51:26.241667 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3\": container with ID starting with d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3 not found: ID does not exist" containerID="d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.241703 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3"} err="failed to get container status \"d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3\": rpc error: code = NotFound desc = could not find container \"d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3\": container with ID starting with d4a2e4a99dfd689f0c1da91656e9d042edde00a8b0141ce5b3b1dc9d5b6a8dc3 not found: ID does not exist" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313044 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-scripts\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313355 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-config-data\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313396 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313437 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313593 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313727 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-log-httpd\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313756 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-run-httpd\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.313807 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2rz2\" (UniqueName: \"kubernetes.io/projected/5c07b496-3a09-48d1-961a-159af89b9e22-kube-api-access-q2rz2\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.362203 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.415474 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-scripts\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.415924 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-config-data\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.416006 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.416849 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.416920 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.416990 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-log-httpd\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.417016 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-run-httpd\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.417069 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-q2rz2\" (UniqueName: \"kubernetes.io/projected/5c07b496-3a09-48d1-961a-159af89b9e22-kube-api-access-q2rz2\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.418576 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-log-httpd\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.418881 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-run-httpd\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.421089 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.422164 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-scripts\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.423152 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.424074 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.431526 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-config-data\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.464393 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2rz2\" (UniqueName: \"kubernetes.io/projected/5c07b496-3a09-48d1-961a-159af89b9e22-kube-api-access-q2rz2\") pod \"ceilometer-0\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.465438 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.518397 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-scripts\") pod \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.518596 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-combined-ca-bundle\") pod \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.518636 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpbd7\" (UniqueName: \"kubernetes.io/projected/e8cee03b-a095-46b4-aa33-ebb7498fdc43-kube-api-access-jpbd7\") pod \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.518678 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-config-data\") pod \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\" (UID: \"e8cee03b-a095-46b4-aa33-ebb7498fdc43\") " Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.523496 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8cee03b-a095-46b4-aa33-ebb7498fdc43-kube-api-access-jpbd7" (OuterVolumeSpecName: "kube-api-access-jpbd7") pod "e8cee03b-a095-46b4-aa33-ebb7498fdc43" (UID: "e8cee03b-a095-46b4-aa33-ebb7498fdc43"). InnerVolumeSpecName "kube-api-access-jpbd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.529556 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-scripts" (OuterVolumeSpecName: "scripts") pod "e8cee03b-a095-46b4-aa33-ebb7498fdc43" (UID: "e8cee03b-a095-46b4-aa33-ebb7498fdc43"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.557715 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-config-data" (OuterVolumeSpecName: "config-data") pod "e8cee03b-a095-46b4-aa33-ebb7498fdc43" (UID: "e8cee03b-a095-46b4-aa33-ebb7498fdc43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.557779 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8cee03b-a095-46b4-aa33-ebb7498fdc43" (UID: "e8cee03b-a095-46b4-aa33-ebb7498fdc43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.620908 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.621269 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.621289 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cee03b-a095-46b4-aa33-ebb7498fdc43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.621308 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-jpbd7\" (UniqueName: \"kubernetes.io/projected/e8cee03b-a095-46b4-aa33-ebb7498fdc43-kube-api-access-jpbd7\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.670989 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Mar 20 15:51:26 crc kubenswrapper[3552]: I0320 15:51:26.926746 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.059869 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" event={"ID":"e8cee03b-a095-46b4-aa33-ebb7498fdc43","Type":"ContainerDied","Data":"280d30840e9bfc901e92ad5e9bee8a7641a79fdfc238b672e1b3f2796fe104bc"} Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.059900 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="280d30840e9bfc901e92ad5e9bee8a7641a79fdfc238b672e1b3f2796fe104bc" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.059941 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-kk9lv" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.062225 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gl4mx" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="registry-server" containerID="cri-o://639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623" gracePeriod=2 Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.062309 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerStarted","Data":"bfaeefa9431695856cab0c8bfe23e2e6bc5bea02196a0d7aab411c6d6934f911"} Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.142648 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.142834 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6dedb4ba-7f06-4600-bfc8-f9e8596fc013" podNamespace="openstack" podName="nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: E0320 15:51:27.143196 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e8cee03b-a095-46b4-aa33-ebb7498fdc43" containerName="nova-cell1-conductor-db-sync" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.143218 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8cee03b-a095-46b4-aa33-ebb7498fdc43" containerName="nova-cell1-conductor-db-sync" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.143484 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8cee03b-a095-46b4-aa33-ebb7498fdc43" containerName="nova-cell1-conductor-db-sync" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.144232 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.147999 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.152959 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.231608 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.231913 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlxz7\" (UniqueName: \"kubernetes.io/projected/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-kube-api-access-rlxz7\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.232175 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.334532 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.334647 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.334750 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rlxz7\" (UniqueName: \"kubernetes.io/projected/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-kube-api-access-rlxz7\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.339674 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.339818 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.353797 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlxz7\" (UniqueName: \"kubernetes.io/projected/6dedb4ba-7f06-4600-bfc8-f9e8596fc013-kube-api-access-rlxz7\") pod \"nova-cell1-conductor-0\" (UID: \"6dedb4ba-7f06-4600-bfc8-f9e8596fc013\") " pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.463180 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02ab53ee-11af-4c70-8156-a7a5e378cd40" path="/var/lib/kubelet/pods/02ab53ee-11af-4c70-8156-a7a5e378cd40/volumes" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.481122 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.719496 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.848658 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7vn2\" (UniqueName: \"kubernetes.io/projected/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-kube-api-access-j7vn2\") pod \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.848904 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-utilities\") pod \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.849769 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-utilities" (OuterVolumeSpecName: "utilities") pod "043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" (UID: "043c27e8-cfbf-4d7e-85bf-505d7f7e1a98"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.849986 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-catalog-content\") pod \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\" (UID: \"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98\") " Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.853272 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-kube-api-access-j7vn2" (OuterVolumeSpecName: "kube-api-access-j7vn2") pod "043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" (UID: "043c27e8-cfbf-4d7e-85bf-505d7f7e1a98"). InnerVolumeSpecName "kube-api-access-j7vn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.865627 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-j7vn2\" (UniqueName: \"kubernetes.io/projected/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-kube-api-access-j7vn2\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.865665 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:27 crc kubenswrapper[3552]: I0320 15:51:27.948853 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Mar 20 15:51:27 crc kubenswrapper[3552]: W0320 15:51:27.957265 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6dedb4ba_7f06_4600_bfc8_f9e8596fc013.slice/crio-4c974af7dbfc2a8b94be9a30f46e3e586d4d5541773c5cc8efc0fe2a6298617b WatchSource:0}: Error finding container 4c974af7dbfc2a8b94be9a30f46e3e586d4d5541773c5cc8efc0fe2a6298617b: Status 404 returned error can't find the container with id 4c974af7dbfc2a8b94be9a30f46e3e586d4d5541773c5cc8efc0fe2a6298617b Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.006917 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" (UID: "043c27e8-cfbf-4d7e-85bf-505d7f7e1a98"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.069055 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.073304 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerStarted","Data":"09f5368b7e1e33dbc9cb6bf3ee4574972cfb5cbd80ca6f3545400e77c764eb17"} Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.082538 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6dedb4ba-7f06-4600-bfc8-f9e8596fc013","Type":"ContainerStarted","Data":"4c974af7dbfc2a8b94be9a30f46e3e586d4d5541773c5cc8efc0fe2a6298617b"} Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.086618 3552 generic.go:334] "Generic (PLEG): container finished" podID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerID="639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623" exitCode=0 Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.086651 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerDied","Data":"639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623"} Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.086658 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl4mx" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.086685 3552 scope.go:117] "RemoveContainer" containerID="639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.086670 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl4mx" event={"ID":"043c27e8-cfbf-4d7e-85bf-505d7f7e1a98","Type":"ContainerDied","Data":"e67bef394b7dc2e7f2142af889c1d09d2ba3ed0d732a854ae634162b42701ee0"} Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.137178 3552 scope.go:117] "RemoveContainer" containerID="40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.161876 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl4mx"] Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.175226 3552 scope.go:117] "RemoveContainer" containerID="6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.176890 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl4mx"] Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.204519 3552 scope.go:117] "RemoveContainer" containerID="639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623" Mar 20 15:51:28 crc kubenswrapper[3552]: E0320 15:51:28.205085 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623\": container with ID starting with 639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623 not found: ID does not exist" containerID="639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.205152 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623"} err="failed to get container status \"639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623\": rpc error: code = NotFound desc = could not find container \"639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623\": container with ID starting with 639b4aad958a7009b471bb7d25f14404bacaf465c9f19c2fef2904ad6d83c623 not found: ID does not exist" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.205163 3552 scope.go:117] "RemoveContainer" containerID="40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a" Mar 20 15:51:28 crc kubenswrapper[3552]: E0320 15:51:28.205502 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a\": container with ID starting with 40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a not found: ID does not exist" containerID="40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.205530 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a"} err="failed to get container status \"40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a\": rpc error: code = NotFound desc = could not find container \"40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a\": container with ID starting with 40ae054614afdda421bb60949c6032f056feaa04b1a06f69dab925109fd3020a not found: ID does not exist" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.205539 3552 scope.go:117] "RemoveContainer" containerID="6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2" Mar 20 15:51:28 crc kubenswrapper[3552]: E0320 15:51:28.206136 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2\": container with ID starting with 6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2 not found: ID does not exist" containerID="6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2" Mar 20 15:51:28 crc kubenswrapper[3552]: I0320 15:51:28.206181 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2"} err="failed to get container status \"6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2\": rpc error: code = NotFound desc = could not find container \"6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2\": container with ID starting with 6ade1f103181fb0aff148936bcfa5df47dfbcd96c63e01707834e3875e254ce2 not found: ID does not exist" Mar 20 15:51:29 crc kubenswrapper[3552]: I0320 15:51:29.096581 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6dedb4ba-7f06-4600-bfc8-f9e8596fc013","Type":"ContainerStarted","Data":"eb22637f6d0bc92f34cc3268f02ed9c3e44e7a635d034633217bce2db0cd4237"} Mar 20 15:51:29 crc kubenswrapper[3552]: I0320 15:51:29.106448 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerStarted","Data":"411e92145b527676f13cfbe0f7b600c348d76faadb1cbc369a48189996d91054"} Mar 20 15:51:29 crc kubenswrapper[3552]: I0320 15:51:29.123715 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.123661113 podStartE2EDuration="2.123661113s" podCreationTimestamp="2026-03-20 15:51:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:29.113142205 +0000 UTC m=+1588.806839095" watchObservedRunningTime="2026-03-20 15:51:29.123661113 +0000 UTC m=+1588.817357943" Mar 20 15:51:29 crc kubenswrapper[3552]: I0320 15:51:29.442002 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" path="/var/lib/kubelet/pods/043c27e8-cfbf-4d7e-85bf-505d7f7e1a98/volumes" Mar 20 15:51:30 crc kubenswrapper[3552]: I0320 15:51:30.115785 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerStarted","Data":"4ef9a33b68b5ef59015e495964eb1fedf757e95eaaa347ba80e27e6b16f24fad"} Mar 20 15:51:30 crc kubenswrapper[3552]: I0320 15:51:30.116162 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:31 crc kubenswrapper[3552]: I0320 15:51:31.126998 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerStarted","Data":"40bcd0bb57d177057857baad05484b8d82e9862af802ee098ea24618aca92632"} Mar 20 15:51:31 crc kubenswrapper[3552]: I0320 15:51:31.152146 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.244795622 podStartE2EDuration="5.152098714s" podCreationTimestamp="2026-03-20 15:51:26 +0000 UTC" firstStartedPulling="2026-03-20 15:51:26.928875433 +0000 UTC m=+1586.622572263" lastFinishedPulling="2026-03-20 15:51:29.836178515 +0000 UTC m=+1589.529875355" observedRunningTime="2026-03-20 15:51:31.147958529 +0000 UTC m=+1590.841655389" watchObservedRunningTime="2026-03-20 15:51:31.152098714 +0000 UTC m=+1590.845795544" Mar 20 15:51:31 crc kubenswrapper[3552]: I0320 15:51:31.671227 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Mar 20 15:51:31 crc kubenswrapper[3552]: I0320 15:51:31.725894 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Mar 20 15:51:32 crc kubenswrapper[3552]: I0320 15:51:32.134729 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:51:32 crc kubenswrapper[3552]: I0320 15:51:32.181915 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Mar 20 15:51:32 crc kubenswrapper[3552]: I0320 15:51:32.463342 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:51:32 crc kubenswrapper[3552]: I0320 15:51:32.463389 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:51:32 crc kubenswrapper[3552]: I0320 15:51:32.517599 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Mar 20 15:51:33 crc kubenswrapper[3552]: I0320 15:51:33.547700 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.219:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:51:33 crc kubenswrapper[3552]: I0320 15:51:33.547821 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.219:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Mar 20 15:51:37 crc kubenswrapper[3552]: I0320 15:51:37.536253 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Mar 20 15:51:40 crc kubenswrapper[3552]: I0320 15:51:40.462999 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Mar 20 15:51:40 crc kubenswrapper[3552]: I0320 15:51:40.463701 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.217956 3552 generic.go:334] "Generic (PLEG): container finished" podID="69a966a0-89a7-41a6-816e-83a53f7260b4" containerID="47a793258dd9e259b1cff53ba218c124c7f4374889e24e965e52a43b02010a47" exitCode=137 Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.218082 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"69a966a0-89a7-41a6-816e-83a53f7260b4","Type":"ContainerDied","Data":"47a793258dd9e259b1cff53ba218c124c7f4374889e24e965e52a43b02010a47"} Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.220340 3552 generic.go:334] "Generic (PLEG): container finished" podID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerID="d9074cd70522c85ba4774595bb9c2ec7a3dfc17555f921873ee0b3a3766e590b" exitCode=137 Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.220370 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1","Type":"ContainerDied","Data":"d9074cd70522c85ba4774595bb9c2ec7a3dfc17555f921873ee0b3a3766e590b"} Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.245139 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.264753 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-config-data\") pod \"69a966a0-89a7-41a6-816e-83a53f7260b4\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.264966 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfgr7\" (UniqueName: \"kubernetes.io/projected/69a966a0-89a7-41a6-816e-83a53f7260b4-kube-api-access-kfgr7\") pod \"69a966a0-89a7-41a6-816e-83a53f7260b4\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.265090 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-combined-ca-bundle\") pod \"69a966a0-89a7-41a6-816e-83a53f7260b4\" (UID: \"69a966a0-89a7-41a6-816e-83a53f7260b4\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.268993 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.277135 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a966a0-89a7-41a6-816e-83a53f7260b4-kube-api-access-kfgr7" (OuterVolumeSpecName: "kube-api-access-kfgr7") pod "69a966a0-89a7-41a6-816e-83a53f7260b4" (UID: "69a966a0-89a7-41a6-816e-83a53f7260b4"). InnerVolumeSpecName "kube-api-access-kfgr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.322372 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-config-data" (OuterVolumeSpecName: "config-data") pod "69a966a0-89a7-41a6-816e-83a53f7260b4" (UID: "69a966a0-89a7-41a6-816e-83a53f7260b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.329104 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69a966a0-89a7-41a6-816e-83a53f7260b4" (UID: "69a966a0-89a7-41a6-816e-83a53f7260b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367144 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdt77\" (UniqueName: \"kubernetes.io/projected/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-kube-api-access-zdt77\") pod \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367203 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-logs\") pod \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367350 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-combined-ca-bundle\") pod \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367423 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-config-data\") pod \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\" (UID: \"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1\") " Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367850 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367879 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69a966a0-89a7-41a6-816e-83a53f7260b4-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.367890 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kfgr7\" (UniqueName: \"kubernetes.io/projected/69a966a0-89a7-41a6-816e-83a53f7260b4-kube-api-access-kfgr7\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.368420 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-logs" (OuterVolumeSpecName: "logs") pod "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" (UID: "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.371164 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-kube-api-access-zdt77" (OuterVolumeSpecName: "kube-api-access-zdt77") pod "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" (UID: "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1"). InnerVolumeSpecName "kube-api-access-zdt77". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.402555 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-config-data" (OuterVolumeSpecName: "config-data") pod "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" (UID: "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.404863 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" (UID: "29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.468556 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.469568 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zdt77\" (UniqueName: \"kubernetes.io/projected/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-kube-api-access-zdt77\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.469601 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.469615 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.469628 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.470512 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Mar 20 15:51:42 crc kubenswrapper[3552]: I0320 15:51:42.472352 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.227819 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"69a966a0-89a7-41a6-816e-83a53f7260b4","Type":"ContainerDied","Data":"c6438d027d0450b226851fee45135348e8ebd472d3edce0f9a28d3d4cc77b8e3"} Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.227971 3552 scope.go:117] "RemoveContainer" containerID="47a793258dd9e259b1cff53ba218c124c7f4374889e24e965e52a43b02010a47" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.227839 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.229394 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1","Type":"ContainerDied","Data":"4c1651e43676cbf811fb37f24a9e23bbb4e62e2aa9d870d63adee0eab5af5073"} Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.229454 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.288217 3552 scope.go:117] "RemoveContainer" containerID="d9074cd70522c85ba4774595bb9c2ec7a3dfc17555f921873ee0b3a3766e590b" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.299663 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.314053 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.349909 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350125 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" podNamespace="openstack" podName="nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: E0320 15:51:43.350359 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-log" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350378 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-log" Mar 20 15:51:43 crc kubenswrapper[3552]: E0320 15:51:43.350396 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-metadata" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350425 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-metadata" Mar 20 15:51:43 crc kubenswrapper[3552]: E0320 15:51:43.350436 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="extract-utilities" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350443 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="extract-utilities" Mar 20 15:51:43 crc kubenswrapper[3552]: E0320 15:51:43.350462 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="69a966a0-89a7-41a6-816e-83a53f7260b4" containerName="nova-cell1-novncproxy-novncproxy" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350469 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a966a0-89a7-41a6-816e-83a53f7260b4" containerName="nova-cell1-novncproxy-novncproxy" Mar 20 15:51:43 crc kubenswrapper[3552]: E0320 15:51:43.350485 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="registry-server" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350491 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="registry-server" Mar 20 15:51:43 crc kubenswrapper[3552]: E0320 15:51:43.350505 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="extract-content" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350512 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="extract-content" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350699 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-metadata" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350716 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="043c27e8-cfbf-4d7e-85bf-505d7f7e1a98" containerName="registry-server" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350735 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a966a0-89a7-41a6-816e-83a53f7260b4" containerName="nova-cell1-novncproxy-novncproxy" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.350750 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" containerName="nova-metadata-log" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.351669 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.354345 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.354745 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.358795 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.367647 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.376237 3552 scope.go:117] "RemoveContainer" containerID="3e5bfd0c995152b0417ef5b67f5f25f5179ac2e8745dead2622cdeef39659234" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.376966 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.382864 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.382972 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-config-data\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.383126 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mggwn\" (UniqueName: \"kubernetes.io/projected/4965d23d-0756-438c-9c82-74ffcec602d5-kube-api-access-mggwn\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.383271 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4965d23d-0756-438c-9c82-74ffcec602d5-logs\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.383324 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.408825 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.409151 3552 topology_manager.go:215] "Topology Admit Handler" podUID="cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c" podNamespace="openstack" podName="nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.410631 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.415171 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.415382 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.415592 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.447154 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1" path="/var/lib/kubelet/pods/29f72ada-8b4f-43d7-a7a5-d2d9090f8cc1/volumes" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.447922 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69a966a0-89a7-41a6-816e-83a53f7260b4" path="/var/lib/kubelet/pods/69a966a0-89a7-41a6-816e-83a53f7260b4/volumes" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.448712 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485182 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485265 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mggwn\" (UniqueName: \"kubernetes.io/projected/4965d23d-0756-438c-9c82-74ffcec602d5-kube-api-access-mggwn\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485363 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485425 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4965d23d-0756-438c-9c82-74ffcec602d5-logs\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485516 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485545 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-599hn\" (UniqueName: \"kubernetes.io/projected/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-kube-api-access-599hn\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485699 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.485788 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.486037 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4965d23d-0756-438c-9c82-74ffcec602d5-logs\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.487534 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.487701 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-config-data\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.492249 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.494763 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.496855 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-config-data\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.513555 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mggwn\" (UniqueName: \"kubernetes.io/projected/4965d23d-0756-438c-9c82-74ffcec602d5-kube-api-access-mggwn\") pod \"nova-metadata-0\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.589442 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.589494 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.589535 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.589574 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.589603 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-599hn\" (UniqueName: \"kubernetes.io/projected/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-kube-api-access-599hn\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.593569 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.593624 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.593770 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.594166 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.608749 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-599hn\" (UniqueName: \"kubernetes.io/projected/cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c-kube-api-access-599hn\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c\") " pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.685351 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:51:43 crc kubenswrapper[3552]: I0320 15:51:43.791944 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:44 crc kubenswrapper[3552]: W0320 15:51:44.119512 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4965d23d_0756_438c_9c82_74ffcec602d5.slice/crio-f26d67ba787c2e79486393423cd5c9b911b30478f7e4a628411e788a104b330a WatchSource:0}: Error finding container f26d67ba787c2e79486393423cd5c9b911b30478f7e4a628411e788a104b330a: Status 404 returned error can't find the container with id f26d67ba787c2e79486393423cd5c9b911b30478f7e4a628411e788a104b330a Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.122599 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.243197 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4965d23d-0756-438c-9c82-74ffcec602d5","Type":"ContainerStarted","Data":"f26d67ba787c2e79486393423cd5c9b911b30478f7e4a628411e788a104b330a"} Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.250966 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.273481 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Mar 20 15:51:44 crc kubenswrapper[3552]: W0320 15:51:44.288825 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf9d9b6c_39f4_430a_a8ef_9b632d0bf96c.slice/crio-9eb7cbaf7faca92ab1e86b29f6bef08c98e482ad93848f4858645f9c112ad3a5 WatchSource:0}: Error finding container 9eb7cbaf7faca92ab1e86b29f6bef08c98e482ad93848f4858645f9c112ad3a5: Status 404 returned error can't find the container with id 9eb7cbaf7faca92ab1e86b29f6bef08c98e482ad93848f4858645f9c112ad3a5 Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.449328 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fc4f48d55-bzfz6"] Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.449574 3552 topology_manager.go:215] "Topology Admit Handler" podUID="39f1f950-065d-4899-a254-363f2279cde1" podNamespace="openstack" podName="dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.451003 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.464381 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fc4f48d55-bzfz6"] Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.505828 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-svc\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.505914 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-sb\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.506011 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfc8k\" (UniqueName: \"kubernetes.io/projected/39f1f950-065d-4899-a254-363f2279cde1-kube-api-access-hfc8k\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.506055 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-nb\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.506103 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-swift-storage-0\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.506205 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-config\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.612290 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-svc\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.612718 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-sb\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.612796 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hfc8k\" (UniqueName: \"kubernetes.io/projected/39f1f950-065d-4899-a254-363f2279cde1-kube-api-access-hfc8k\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.612918 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-nb\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.612963 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-swift-storage-0\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.613043 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-config\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.617517 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-svc\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.619688 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-sb\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.619719 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-nb\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.620108 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-swift-storage-0\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.626841 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-config\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.633743 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfc8k\" (UniqueName: \"kubernetes.io/projected/39f1f950-065d-4899-a254-363f2279cde1-kube-api-access-hfc8k\") pod \"dnsmasq-dns-7fc4f48d55-bzfz6\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:44 crc kubenswrapper[3552]: I0320 15:51:44.643133 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:45 crc kubenswrapper[3552]: W0320 15:51:45.154176 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39f1f950_065d_4899_a254_363f2279cde1.slice/crio-14e30c3577ce27a3b5705457ac20b6103fa847793b740a96d3954a084f1c39e1 WatchSource:0}: Error finding container 14e30c3577ce27a3b5705457ac20b6103fa847793b740a96d3954a084f1c39e1: Status 404 returned error can't find the container with id 14e30c3577ce27a3b5705457ac20b6103fa847793b740a96d3954a084f1c39e1 Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.155108 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fc4f48d55-bzfz6"] Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.264498 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4965d23d-0756-438c-9c82-74ffcec602d5","Type":"ContainerStarted","Data":"939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222"} Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.264546 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4965d23d-0756-438c-9c82-74ffcec602d5","Type":"ContainerStarted","Data":"ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57"} Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.274257 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c","Type":"ContainerStarted","Data":"aca6a3a9d05af1a919342dc1e6c34fbfc84cfc37e0e04329cff46cc3dccb3c9f"} Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.274291 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c","Type":"ContainerStarted","Data":"9eb7cbaf7faca92ab1e86b29f6bef08c98e482ad93848f4858645f9c112ad3a5"} Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.277307 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" event={"ID":"39f1f950-065d-4899-a254-363f2279cde1","Type":"ContainerStarted","Data":"14e30c3577ce27a3b5705457ac20b6103fa847793b740a96d3954a084f1c39e1"} Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.297659 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.297619816 podStartE2EDuration="2.297619816s" podCreationTimestamp="2026-03-20 15:51:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:45.28992968 +0000 UTC m=+1604.983626510" watchObservedRunningTime="2026-03-20 15:51:45.297619816 +0000 UTC m=+1604.991316646" Mar 20 15:51:45 crc kubenswrapper[3552]: I0320 15:51:45.317891 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.31784642 podStartE2EDuration="2.31784642s" podCreationTimestamp="2026-03-20 15:51:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:45.313897539 +0000 UTC m=+1605.007594379" watchObservedRunningTime="2026-03-20 15:51:45.31784642 +0000 UTC m=+1605.011543250" Mar 20 15:51:46 crc kubenswrapper[3552]: I0320 15:51:46.589747 3552 generic.go:334] "Generic (PLEG): container finished" podID="39f1f950-065d-4899-a254-363f2279cde1" containerID="db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee" exitCode=0 Mar 20 15:51:46 crc kubenswrapper[3552]: I0320 15:51:46.592624 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" event={"ID":"39f1f950-065d-4899-a254-363f2279cde1","Type":"ContainerDied","Data":"db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee"} Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.283771 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.284333 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-log" containerID="cri-o://265d20eac124d681339843cdb9a3d8ef18933c1d5853cebfa9a467660b4ae62e" gracePeriod=30 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.284723 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-api" containerID="cri-o://6d3bafba21b26aa9210a016fa3c8c5584592257bbe9371f6708f2421ff549429" gracePeriod=30 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.608044 3552 generic.go:334] "Generic (PLEG): container finished" podID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerID="265d20eac124d681339843cdb9a3d8ef18933c1d5853cebfa9a467660b4ae62e" exitCode=143 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.608125 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"363614e4-b836-4c3f-b5ff-d27f9970b8b3","Type":"ContainerDied","Data":"265d20eac124d681339843cdb9a3d8ef18933c1d5853cebfa9a467660b4ae62e"} Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.610478 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" event={"ID":"39f1f950-065d-4899-a254-363f2279cde1","Type":"ContainerStarted","Data":"02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9"} Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.638302 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" podStartSLOduration=3.638249923 podStartE2EDuration="3.638249923s" podCreationTimestamp="2026-03-20 15:51:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:47.63460827 +0000 UTC m=+1607.328305130" watchObservedRunningTime="2026-03-20 15:51:47.638249923 +0000 UTC m=+1607.331946753" Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.833930 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.834942 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-central-agent" containerID="cri-o://09f5368b7e1e33dbc9cb6bf3ee4574972cfb5cbd80ca6f3545400e77c764eb17" gracePeriod=30 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.835083 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="proxy-httpd" containerID="cri-o://40bcd0bb57d177057857baad05484b8d82e9862af802ee098ea24618aca92632" gracePeriod=30 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.835133 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="sg-core" containerID="cri-o://4ef9a33b68b5ef59015e495964eb1fedf757e95eaaa347ba80e27e6b16f24fad" gracePeriod=30 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.835227 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-notification-agent" containerID="cri-o://411e92145b527676f13cfbe0f7b600c348d76faadb1cbc369a48189996d91054" gracePeriod=30 Mar 20 15:51:47 crc kubenswrapper[3552]: I0320 15:51:47.849893 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.623844 3552 generic.go:334] "Generic (PLEG): container finished" podID="5c07b496-3a09-48d1-961a-159af89b9e22" containerID="40bcd0bb57d177057857baad05484b8d82e9862af802ee098ea24618aca92632" exitCode=0 Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.623877 3552 generic.go:334] "Generic (PLEG): container finished" podID="5c07b496-3a09-48d1-961a-159af89b9e22" containerID="4ef9a33b68b5ef59015e495964eb1fedf757e95eaaa347ba80e27e6b16f24fad" exitCode=2 Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.623888 3552 generic.go:334] "Generic (PLEG): container finished" podID="5c07b496-3a09-48d1-961a-159af89b9e22" containerID="411e92145b527676f13cfbe0f7b600c348d76faadb1cbc369a48189996d91054" exitCode=0 Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.623899 3552 generic.go:334] "Generic (PLEG): container finished" podID="5c07b496-3a09-48d1-961a-159af89b9e22" containerID="09f5368b7e1e33dbc9cb6bf3ee4574972cfb5cbd80ca6f3545400e77c764eb17" exitCode=0 Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.625154 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerDied","Data":"40bcd0bb57d177057857baad05484b8d82e9862af802ee098ea24618aca92632"} Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.625187 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.625202 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerDied","Data":"4ef9a33b68b5ef59015e495964eb1fedf757e95eaaa347ba80e27e6b16f24fad"} Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.625213 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerDied","Data":"411e92145b527676f13cfbe0f7b600c348d76faadb1cbc369a48189996d91054"} Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.625226 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerDied","Data":"09f5368b7e1e33dbc9cb6bf3ee4574972cfb5cbd80ca6f3545400e77c764eb17"} Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.792310 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:48 crc kubenswrapper[3552]: I0320 15:51:48.910852 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008412 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-log-httpd\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008510 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2rz2\" (UniqueName: \"kubernetes.io/projected/5c07b496-3a09-48d1-961a-159af89b9e22-kube-api-access-q2rz2\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008587 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-scripts\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008639 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-config-data\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008708 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-run-httpd\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008750 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-ceilometer-tls-certs\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008785 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-sg-core-conf-yaml\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.008807 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-combined-ca-bundle\") pod \"5c07b496-3a09-48d1-961a-159af89b9e22\" (UID: \"5c07b496-3a09-48d1-961a-159af89b9e22\") " Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.010026 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.011746 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.016323 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-scripts" (OuterVolumeSpecName: "scripts") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.017195 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c07b496-3a09-48d1-961a-159af89b9e22-kube-api-access-q2rz2" (OuterVolumeSpecName: "kube-api-access-q2rz2") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "kube-api-access-q2rz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.042692 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.111120 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-q2rz2\" (UniqueName: \"kubernetes.io/projected/5c07b496-3a09-48d1-961a-159af89b9e22-kube-api-access-q2rz2\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.111492 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.111504 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.111513 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.111523 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c07b496-3a09-48d1-961a-159af89b9e22-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.130201 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.147435 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.162668 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-config-data" (OuterVolumeSpecName: "config-data") pod "5c07b496-3a09-48d1-961a-159af89b9e22" (UID: "5c07b496-3a09-48d1-961a-159af89b9e22"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.213940 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.213980 3552 reconciler_common.go:300] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.213994 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c07b496-3a09-48d1-961a-159af89b9e22-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.633662 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c07b496-3a09-48d1-961a-159af89b9e22","Type":"ContainerDied","Data":"bfaeefa9431695856cab0c8bfe23e2e6bc5bea02196a0d7aab411c6d6934f911"} Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.633677 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.633717 3552 scope.go:117] "RemoveContainer" containerID="40bcd0bb57d177057857baad05484b8d82e9862af802ee098ea24618aca92632" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.674138 3552 scope.go:117] "RemoveContainer" containerID="4ef9a33b68b5ef59015e495964eb1fedf757e95eaaa347ba80e27e6b16f24fad" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.676853 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.686910 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.701362 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.702029 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d988c69e-f7af-4e98-9af6-76f179bbeb03" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: E0320 15:51:49.702444 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="proxy-httpd" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.702574 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="proxy-httpd" Mar 20 15:51:49 crc kubenswrapper[3552]: E0320 15:51:49.702752 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="sg-core" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.702925 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="sg-core" Mar 20 15:51:49 crc kubenswrapper[3552]: E0320 15:51:49.703027 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-notification-agent" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.703111 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-notification-agent" Mar 20 15:51:49 crc kubenswrapper[3552]: E0320 15:51:49.703206 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-central-agent" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.703367 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-central-agent" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.703734 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="sg-core" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.703838 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="proxy-httpd" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.703930 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-notification-agent" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.704010 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" containerName="ceilometer-central-agent" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.707758 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.711213 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.711597 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.711863 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.716113 3552 scope.go:117] "RemoveContainer" containerID="411e92145b527676f13cfbe0f7b600c348d76faadb1cbc369a48189996d91054" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.723251 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.750345 3552 scope.go:117] "RemoveContainer" containerID="09f5368b7e1e33dbc9cb6bf3ee4574972cfb5cbd80ca6f3545400e77c764eb17" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.825888 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78jnz\" (UniqueName: \"kubernetes.io/projected/d988c69e-f7af-4e98-9af6-76f179bbeb03-kube-api-access-78jnz\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.826073 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.826238 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-config-data\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.826295 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-log-httpd\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.826470 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.826562 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-run-httpd\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.826723 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-scripts\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.827042 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.837680 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:49 crc kubenswrapper[3552]: E0320 15:51:49.838375 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="unmounted volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-78jnz log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="d988c69e-f7af-4e98-9af6-76f179bbeb03" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929666 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-78jnz\" (UniqueName: \"kubernetes.io/projected/d988c69e-f7af-4e98-9af6-76f179bbeb03-kube-api-access-78jnz\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929746 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929787 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-config-data\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929825 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-log-httpd\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929909 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929956 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-run-httpd\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.929997 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-scripts\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.930033 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.930332 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-log-httpd\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.930450 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-run-httpd\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.940607 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.940736 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-scripts\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.940996 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-config-data\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.944675 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.948071 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:49 crc kubenswrapper[3552]: I0320 15:51:49.951731 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-78jnz\" (UniqueName: \"kubernetes.io/projected/d988c69e-f7af-4e98-9af6-76f179bbeb03-kube-api-access-78jnz\") pod \"ceilometer-0\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " pod="openstack/ceilometer-0" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.650662 3552 generic.go:334] "Generic (PLEG): container finished" podID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerID="6d3bafba21b26aa9210a016fa3c8c5584592257bbe9371f6708f2421ff549429" exitCode=0 Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.651019 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.651625 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"363614e4-b836-4c3f-b5ff-d27f9970b8b3","Type":"ContainerDied","Data":"6d3bafba21b26aa9210a016fa3c8c5584592257bbe9371f6708f2421ff549429"} Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.669456 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.743923 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-scripts\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744288 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-sg-core-conf-yaml\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744326 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-log-httpd\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744379 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-combined-ca-bundle\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744428 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-run-httpd\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744453 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-config-data\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744486 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-ceilometer-tls-certs\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.744522 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78jnz\" (UniqueName: \"kubernetes.io/projected/d988c69e-f7af-4e98-9af6-76f179bbeb03-kube-api-access-78jnz\") pod \"d988c69e-f7af-4e98-9af6-76f179bbeb03\" (UID: \"d988c69e-f7af-4e98-9af6-76f179bbeb03\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.745026 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.745214 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.749651 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.749864 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-config-data" (OuterVolumeSpecName: "config-data") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.749971 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.749990 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.750519 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d988c69e-f7af-4e98-9af6-76f179bbeb03-kube-api-access-78jnz" (OuterVolumeSpecName: "kube-api-access-78jnz") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "kube-api-access-78jnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.763612 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-scripts" (OuterVolumeSpecName: "scripts") pod "d988c69e-f7af-4e98-9af6-76f179bbeb03" (UID: "d988c69e-f7af-4e98-9af6-76f179bbeb03"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846872 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846911 3552 reconciler_common.go:300] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846925 3552 reconciler_common.go:300] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-log-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846938 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846950 3552 reconciler_common.go:300] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d988c69e-f7af-4e98-9af6-76f179bbeb03-run-httpd\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846961 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846972 3552 reconciler_common.go:300] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d988c69e-f7af-4e98-9af6-76f179bbeb03-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.846986 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-78jnz\" (UniqueName: \"kubernetes.io/projected/d988c69e-f7af-4e98-9af6-76f179bbeb03-kube-api-access-78jnz\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.877425 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.947863 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9tmm\" (UniqueName: \"kubernetes.io/projected/363614e4-b836-4c3f-b5ff-d27f9970b8b3-kube-api-access-s9tmm\") pod \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.948124 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-config-data\") pod \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.948176 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-combined-ca-bundle\") pod \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.948239 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/363614e4-b836-4c3f-b5ff-d27f9970b8b3-logs\") pod \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\" (UID: \"363614e4-b836-4c3f-b5ff-d27f9970b8b3\") " Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.949171 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/363614e4-b836-4c3f-b5ff-d27f9970b8b3-logs" (OuterVolumeSpecName: "logs") pod "363614e4-b836-4c3f-b5ff-d27f9970b8b3" (UID: "363614e4-b836-4c3f-b5ff-d27f9970b8b3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:51:50 crc kubenswrapper[3552]: I0320 15:51:50.958202 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/363614e4-b836-4c3f-b5ff-d27f9970b8b3-kube-api-access-s9tmm" (OuterVolumeSpecName: "kube-api-access-s9tmm") pod "363614e4-b836-4c3f-b5ff-d27f9970b8b3" (UID: "363614e4-b836-4c3f-b5ff-d27f9970b8b3"). InnerVolumeSpecName "kube-api-access-s9tmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.010638 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "363614e4-b836-4c3f-b5ff-d27f9970b8b3" (UID: "363614e4-b836-4c3f-b5ff-d27f9970b8b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.011582 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-config-data" (OuterVolumeSpecName: "config-data") pod "363614e4-b836-4c3f-b5ff-d27f9970b8b3" (UID: "363614e4-b836-4c3f-b5ff-d27f9970b8b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.052060 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/363614e4-b836-4c3f-b5ff-d27f9970b8b3-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.052087 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-s9tmm\" (UniqueName: \"kubernetes.io/projected/363614e4-b836-4c3f-b5ff-d27f9970b8b3-kube-api-access-s9tmm\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.052098 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.052108 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/363614e4-b836-4c3f-b5ff-d27f9970b8b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.455968 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c07b496-3a09-48d1-961a-159af89b9e22" path="/var/lib/kubelet/pods/5c07b496-3a09-48d1-961a-159af89b9e22/volumes" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.664863 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.664886 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"363614e4-b836-4c3f-b5ff-d27f9970b8b3","Type":"ContainerDied","Data":"dfd3d5901717c15ca2eff22d01d7c1acec62ec63b6d531fd76cc5676b15f3cb1"} Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.664903 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.664944 3552 scope.go:117] "RemoveContainer" containerID="6d3bafba21b26aa9210a016fa3c8c5584592257bbe9371f6708f2421ff549429" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.733871 3552 scope.go:117] "RemoveContainer" containerID="265d20eac124d681339843cdb9a3d8ef18933c1d5853cebfa9a467660b4ae62e" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.762045 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.776469 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.789215 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.808864 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.822708 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.822911 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1" podNamespace="openstack" podName="ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: E0320 15:51:51.823162 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-api" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.823173 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-api" Mar 20 15:51:51 crc kubenswrapper[3552]: E0320 15:51:51.823187 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-log" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.823193 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-log" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.823376 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-api" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.823428 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" containerName="nova-api-log" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.825568 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.828819 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.828891 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.828962 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.833050 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.846043 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.846285 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" podNamespace="openstack" podName="nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.847933 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.855930 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.856122 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.856629 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.859354 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872238 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r59sm\" (UniqueName: \"kubernetes.io/projected/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-kube-api-access-r59sm\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872291 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-logs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872455 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-config-data\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872557 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-public-tls-certs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872584 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkr9b\" (UniqueName: \"kubernetes.io/projected/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-kube-api-access-pkr9b\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872665 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872695 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-config-data\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872747 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-run-httpd\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872781 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-log-httpd\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872813 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872859 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872914 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.872933 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.873011 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-scripts\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975214 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975344 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-scripts\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-r59sm\" (UniqueName: \"kubernetes.io/projected/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-kube-api-access-r59sm\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975468 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-logs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975498 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-config-data\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975538 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-public-tls-certs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975569 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-pkr9b\" (UniqueName: \"kubernetes.io/projected/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-kube-api-access-pkr9b\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975612 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975643 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-config-data\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975678 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-run-httpd\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975718 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-log-httpd\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975756 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.975786 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.976652 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-run-httpd\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.976768 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-log-httpd\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.979583 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.979862 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-scripts\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.981815 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.982671 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-logs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.983050 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.983774 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-config-data\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.984825 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-public-tls-certs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.985297 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.995150 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.996703 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkr9b\" (UniqueName: \"kubernetes.io/projected/ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1-kube-api-access-pkr9b\") pod \"ceilometer-0\" (UID: \"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1\") " pod="openstack/ceilometer-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.997537 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-r59sm\" (UniqueName: \"kubernetes.io/projected/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-kube-api-access-r59sm\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:51 crc kubenswrapper[3552]: I0320 15:51:51.997988 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-config-data\") pod \"nova-api-0\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " pod="openstack/nova-api-0" Mar 20 15:51:52 crc kubenswrapper[3552]: I0320 15:51:52.141260 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Mar 20 15:51:52 crc kubenswrapper[3552]: I0320 15:51:52.167014 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:51:52 crc kubenswrapper[3552]: I0320 15:51:52.608084 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Mar 20 15:51:52 crc kubenswrapper[3552]: I0320 15:51:52.668154 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:51:52 crc kubenswrapper[3552]: I0320 15:51:52.682463 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35","Type":"ContainerStarted","Data":"24e0a89142579c716dca89cedf903b338da1ec3869fbbee8303567a60cc3505e"} Mar 20 15:51:52 crc kubenswrapper[3552]: I0320 15:51:52.684246 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1","Type":"ContainerStarted","Data":"ba12939892321351f08c53cd0a3d512dbf3b2ab12320c0f7a13dd69dada3e6f5"} Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.454620 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="363614e4-b836-4c3f-b5ff-d27f9970b8b3" path="/var/lib/kubelet/pods/363614e4-b836-4c3f-b5ff-d27f9970b8b3/volumes" Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.455720 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d988c69e-f7af-4e98-9af6-76f179bbeb03" path="/var/lib/kubelet/pods/d988c69e-f7af-4e98-9af6-76f179bbeb03/volumes" Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.686053 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.686532 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.693742 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1","Type":"ContainerStarted","Data":"05c2a4a58409548f3448cdbbf8b126af070c4716e3a56563eef8d85d93bc549b"} Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.695525 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35","Type":"ContainerStarted","Data":"8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7"} Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.695581 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35","Type":"ContainerStarted","Data":"b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2"} Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.722070 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.722023518 podStartE2EDuration="2.722023518s" podCreationTimestamp="2026-03-20 15:51:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:53.713802489 +0000 UTC m=+1613.407499319" watchObservedRunningTime="2026-03-20 15:51:53.722023518 +0000 UTC m=+1613.415720338" Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.793392 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:53 crc kubenswrapper[3552]: I0320 15:51:53.813033 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.645295 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.698619 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.223:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.698640 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-855c567469-gnq4t"] Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.698838 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-855c567469-gnq4t" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerName="dnsmasq-dns" containerID="cri-o://2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9" gracePeriod=10 Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.698634 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.223:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.708260 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1","Type":"ContainerStarted","Data":"42e91ad6d9deb17906a04eb81076d2a2ead7392d57370402fa37d0717ffd90f7"} Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.708291 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1","Type":"ContainerStarted","Data":"0fcdffc40295d1fdfb33d8281eff463be00863b07f4448241e85efbd8ca20159"} Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.743767 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.952299 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-6zdxq"] Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.952732 3552 topology_manager.go:215] "Topology Admit Handler" podUID="88c7817a-b4f0-4685-a348-8cf8a662632b" podNamespace="openstack" podName="nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.953772 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.963912 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.964534 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Mar 20 15:51:54 crc kubenswrapper[3552]: I0320 15:51:54.994602 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6zdxq"] Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.141650 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-scripts\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.141774 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-config-data\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.141832 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8ghk\" (UniqueName: \"kubernetes.io/projected/88c7817a-b4f0-4685-a348-8cf8a662632b-kube-api-access-g8ghk\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.141868 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.243432 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-config-data\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.243512 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-g8ghk\" (UniqueName: \"kubernetes.io/projected/88c7817a-b4f0-4685-a348-8cf8a662632b-kube-api-access-g8ghk\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.243565 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.243658 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-scripts\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.338286 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8ghk\" (UniqueName: \"kubernetes.io/projected/88c7817a-b4f0-4685-a348-8cf8a662632b-kube-api-access-g8ghk\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.338308 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.338379 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-scripts\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.338432 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-config-data\") pod \"nova-cell1-cell-mapping-6zdxq\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.502428 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.610299 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.671957 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dd98v\" (UniqueName: \"kubernetes.io/projected/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-kube-api-access-dd98v\") pod \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.672361 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-config\") pod \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.672397 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-sb\") pod \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.672448 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-swift-storage-0\") pod \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.672479 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-nb\") pod \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.672514 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-svc\") pod \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\" (UID: \"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d\") " Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.680145 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-kube-api-access-dd98v" (OuterVolumeSpecName: "kube-api-access-dd98v") pod "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" (UID: "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d"). InnerVolumeSpecName "kube-api-access-dd98v". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.734099 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1","Type":"ContainerStarted","Data":"db11f816ca4cdb3301d02954b53d5331fbaa737c117a1a289ef5869370463c07"} Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.741188 3552 generic.go:334] "Generic (PLEG): container finished" podID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerID="2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9" exitCode=0 Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.742256 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" (UID: "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.742310 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-855c567469-gnq4t" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.742312 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-855c567469-gnq4t" event={"ID":"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d","Type":"ContainerDied","Data":"2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9"} Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.742488 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-855c567469-gnq4t" event={"ID":"a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d","Type":"ContainerDied","Data":"e31cb748a6126538b1a741e6cdf4a92636a99ac78fb2d7f446e0cd21b842f613"} Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.742507 3552 scope.go:117] "RemoveContainer" containerID="2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.765801 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" (UID: "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.775515 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" (UID: "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.777004 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-config" (OuterVolumeSpecName: "config") pod "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" (UID: "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.777092 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-dd98v\" (UniqueName: \"kubernetes.io/projected/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-kube-api-access-dd98v\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.777126 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.777146 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.777165 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.811127 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" (UID: "a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.834079 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.623602947 podStartE2EDuration="4.834032475s" podCreationTimestamp="2026-03-20 15:51:51 +0000 UTC" firstStartedPulling="2026-03-20 15:51:52.611598292 +0000 UTC m=+1612.305295122" lastFinishedPulling="2026-03-20 15:51:54.82202782 +0000 UTC m=+1614.515724650" observedRunningTime="2026-03-20 15:51:55.778738259 +0000 UTC m=+1615.472435109" watchObservedRunningTime="2026-03-20 15:51:55.834032475 +0000 UTC m=+1615.527729305" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.881310 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.881684 3552 scope.go:117] "RemoveContainer" containerID="63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.882324 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.945664 3552 scope.go:117] "RemoveContainer" containerID="2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9" Mar 20 15:51:55 crc kubenswrapper[3552]: E0320 15:51:55.946884 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9\": container with ID starting with 2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9 not found: ID does not exist" containerID="2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.946934 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9"} err="failed to get container status \"2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9\": rpc error: code = NotFound desc = could not find container \"2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9\": container with ID starting with 2d9805d2ea2969cb90880bd2840d2ea6a1732c5b5f69c649c0de5c13a58a5dc9 not found: ID does not exist" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.946950 3552 scope.go:117] "RemoveContainer" containerID="63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d" Mar 20 15:51:55 crc kubenswrapper[3552]: E0320 15:51:55.950649 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d\": container with ID starting with 63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d not found: ID does not exist" containerID="63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d" Mar 20 15:51:55 crc kubenswrapper[3552]: I0320 15:51:55.950694 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d"} err="failed to get container status \"63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d\": rpc error: code = NotFound desc = could not find container \"63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d\": container with ID starting with 63aa1ab0ec4325cb7743a002a8986acdb5f35ddcb3256d0d7e98cafcfba31b5d not found: ID does not exist" Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.079216 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-855c567469-gnq4t"] Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.087867 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-855c567469-gnq4t"] Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.144975 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6zdxq"] Mar 20 15:51:56 crc kubenswrapper[3552]: W0320 15:51:56.152248 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88c7817a_b4f0_4685_a348_8cf8a662632b.slice/crio-9277ad990850ad07d5bbdcd1a23c1760b09b5de6d07c7711747d07dd9d16c030 WatchSource:0}: Error finding container 9277ad990850ad07d5bbdcd1a23c1760b09b5de6d07c7711747d07dd9d16c030: Status 404 returned error can't find the container with id 9277ad990850ad07d5bbdcd1a23c1760b09b5de6d07c7711747d07dd9d16c030 Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.754252 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6zdxq" event={"ID":"88c7817a-b4f0-4685-a348-8cf8a662632b","Type":"ContainerStarted","Data":"351b090c26880b314b89750cf13da50d0b8ae5e108482d11f8545dfbc54860c7"} Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.754629 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.754647 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6zdxq" event={"ID":"88c7817a-b4f0-4685-a348-8cf8a662632b","Type":"ContainerStarted","Data":"9277ad990850ad07d5bbdcd1a23c1760b09b5de6d07c7711747d07dd9d16c030"} Mar 20 15:51:56 crc kubenswrapper[3552]: I0320 15:51:56.770897 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-6zdxq" podStartSLOduration=2.7708379770000002 podStartE2EDuration="2.770837977s" podCreationTimestamp="2026-03-20 15:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:51:56.76977319 +0000 UTC m=+1616.463470030" watchObservedRunningTime="2026-03-20 15:51:56.770837977 +0000 UTC m=+1616.464534807" Mar 20 15:51:57 crc kubenswrapper[3552]: I0320 15:51:57.441833 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" path="/var/lib/kubelet/pods/a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d/volumes" Mar 20 15:52:00 crc kubenswrapper[3552]: I0320 15:52:00.789084 3552 generic.go:334] "Generic (PLEG): container finished" podID="88c7817a-b4f0-4685-a348-8cf8a662632b" containerID="351b090c26880b314b89750cf13da50d0b8ae5e108482d11f8545dfbc54860c7" exitCode=0 Mar 20 15:52:00 crc kubenswrapper[3552]: I0320 15:52:00.789167 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6zdxq" event={"ID":"88c7817a-b4f0-4685-a348-8cf8a662632b","Type":"ContainerDied","Data":"351b090c26880b314b89750cf13da50d0b8ae5e108482d11f8545dfbc54860c7"} Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.319947 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.320058 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.320134 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.320169 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.320252 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.685526 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Mar 20 15:52:01 crc kubenswrapper[3552]: I0320 15:52:01.685940 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.156687 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.168178 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.168230 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.321229 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-config-data\") pod \"88c7817a-b4f0-4685-a348-8cf8a662632b\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.321502 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8ghk\" (UniqueName: \"kubernetes.io/projected/88c7817a-b4f0-4685-a348-8cf8a662632b-kube-api-access-g8ghk\") pod \"88c7817a-b4f0-4685-a348-8cf8a662632b\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.321541 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-scripts\") pod \"88c7817a-b4f0-4685-a348-8cf8a662632b\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.321653 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-combined-ca-bundle\") pod \"88c7817a-b4f0-4685-a348-8cf8a662632b\" (UID: \"88c7817a-b4f0-4685-a348-8cf8a662632b\") " Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.327416 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88c7817a-b4f0-4685-a348-8cf8a662632b-kube-api-access-g8ghk" (OuterVolumeSpecName: "kube-api-access-g8ghk") pod "88c7817a-b4f0-4685-a348-8cf8a662632b" (UID: "88c7817a-b4f0-4685-a348-8cf8a662632b"). InnerVolumeSpecName "kube-api-access-g8ghk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.337035 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-scripts" (OuterVolumeSpecName: "scripts") pod "88c7817a-b4f0-4685-a348-8cf8a662632b" (UID: "88c7817a-b4f0-4685-a348-8cf8a662632b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.358500 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88c7817a-b4f0-4685-a348-8cf8a662632b" (UID: "88c7817a-b4f0-4685-a348-8cf8a662632b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.369175 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-config-data" (OuterVolumeSpecName: "config-data") pod "88c7817a-b4f0-4685-a348-8cf8a662632b" (UID: "88c7817a-b4f0-4685-a348-8cf8a662632b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.423662 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.423700 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.423712 3552 reconciler_common.go:300] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88c7817a-b4f0-4685-a348-8cf8a662632b-scripts\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.423721 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-g8ghk\" (UniqueName: \"kubernetes.io/projected/88c7817a-b4f0-4685-a348-8cf8a662632b-kube-api-access-g8ghk\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.814449 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6zdxq" event={"ID":"88c7817a-b4f0-4685-a348-8cf8a662632b","Type":"ContainerDied","Data":"9277ad990850ad07d5bbdcd1a23c1760b09b5de6d07c7711747d07dd9d16c030"} Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.814486 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9277ad990850ad07d5bbdcd1a23c1760b09b5de6d07c7711747d07dd9d16c030" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.814585 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6zdxq" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.934734 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.934942 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-log" containerID="cri-o://b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2" gracePeriod=30 Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.935328 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-api" containerID="cri-o://8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7" gracePeriod=30 Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.952538 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.228:8774/\": EOF" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.954216 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.228:8774/\": EOF" Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.960417 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.960598 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" containerName="nova-scheduler-scheduler" containerID="cri-o://fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" gracePeriod=30 Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.974394 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.974608 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-log" containerID="cri-o://ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57" gracePeriod=30 Mar 20 15:52:02 crc kubenswrapper[3552]: I0320 15:52:02.975106 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-metadata" containerID="cri-o://939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222" gracePeriod=30 Mar 20 15:52:03 crc kubenswrapper[3552]: E0320 15:52:03.200248 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4965d23d_0756_438c_9c82_74ffcec602d5.slice/crio-conmon-ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8f1d47d_5cfb_4340_b8bd_22da11a5fb35.slice/crio-conmon-b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8f1d47d_5cfb_4340_b8bd_22da11a5fb35.slice/crio-b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2.scope\": RecentStats: unable to find data in memory cache]" Mar 20 15:52:03 crc kubenswrapper[3552]: I0320 15:52:03.825430 3552 generic.go:334] "Generic (PLEG): container finished" podID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerID="b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2" exitCode=143 Mar 20 15:52:03 crc kubenswrapper[3552]: I0320 15:52:03.825472 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35","Type":"ContainerDied","Data":"b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2"} Mar 20 15:52:03 crc kubenswrapper[3552]: I0320 15:52:03.827934 3552 generic.go:334] "Generic (PLEG): container finished" podID="4965d23d-0756-438c-9c82-74ffcec602d5" containerID="ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57" exitCode=143 Mar 20 15:52:03 crc kubenswrapper[3552]: I0320 15:52:03.827972 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4965d23d-0756-438c-9c82-74ffcec602d5","Type":"ContainerDied","Data":"ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57"} Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.671245 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee is running failed: container process not found" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.672128 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee is running failed: container process not found" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.672339 3552 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee is running failed: container process not found" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.672369 3552 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" containerName="nova-scheduler-scheduler" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.673433 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.680001 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820328 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-config-data\") pod \"4965d23d-0756-438c-9c82-74ffcec602d5\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820493 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-combined-ca-bundle\") pod \"305d791f-0105-4106-913f-d1d945d0b1f9\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820555 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-combined-ca-bundle\") pod \"4965d23d-0756-438c-9c82-74ffcec602d5\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820710 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-config-data\") pod \"305d791f-0105-4106-913f-d1d945d0b1f9\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820749 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mggwn\" (UniqueName: \"kubernetes.io/projected/4965d23d-0756-438c-9c82-74ffcec602d5-kube-api-access-mggwn\") pod \"4965d23d-0756-438c-9c82-74ffcec602d5\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820776 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-nova-metadata-tls-certs\") pod \"4965d23d-0756-438c-9c82-74ffcec602d5\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820802 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c869b\" (UniqueName: \"kubernetes.io/projected/305d791f-0105-4106-913f-d1d945d0b1f9-kube-api-access-c869b\") pod \"305d791f-0105-4106-913f-d1d945d0b1f9\" (UID: \"305d791f-0105-4106-913f-d1d945d0b1f9\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.820868 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4965d23d-0756-438c-9c82-74ffcec602d5-logs\") pod \"4965d23d-0756-438c-9c82-74ffcec602d5\" (UID: \"4965d23d-0756-438c-9c82-74ffcec602d5\") " Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.821574 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4965d23d-0756-438c-9c82-74ffcec602d5-logs" (OuterVolumeSpecName: "logs") pod "4965d23d-0756-438c-9c82-74ffcec602d5" (UID: "4965d23d-0756-438c-9c82-74ffcec602d5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.826244 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/305d791f-0105-4106-913f-d1d945d0b1f9-kube-api-access-c869b" (OuterVolumeSpecName: "kube-api-access-c869b") pod "305d791f-0105-4106-913f-d1d945d0b1f9" (UID: "305d791f-0105-4106-913f-d1d945d0b1f9"). InnerVolumeSpecName "kube-api-access-c869b". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.826890 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4965d23d-0756-438c-9c82-74ffcec602d5-kube-api-access-mggwn" (OuterVolumeSpecName: "kube-api-access-mggwn") pod "4965d23d-0756-438c-9c82-74ffcec602d5" (UID: "4965d23d-0756-438c-9c82-74ffcec602d5"). InnerVolumeSpecName "kube-api-access-mggwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.851060 3552 generic.go:334] "Generic (PLEG): container finished" podID="305d791f-0105-4106-913f-d1d945d0b1f9" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" exitCode=0 Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.851112 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.851118 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"305d791f-0105-4106-913f-d1d945d0b1f9","Type":"ContainerDied","Data":"fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee"} Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.851618 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"305d791f-0105-4106-913f-d1d945d0b1f9","Type":"ContainerDied","Data":"ea55787b4075184f8b451cb3d8f81aaf35da76dfb3ed1bf42e30e3e6a8fe55a8"} Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.851639 3552 scope.go:117] "RemoveContainer" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.851947 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-config-data" (OuterVolumeSpecName: "config-data") pod "305d791f-0105-4106-913f-d1d945d0b1f9" (UID: "305d791f-0105-4106-913f-d1d945d0b1f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.852752 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-config-data" (OuterVolumeSpecName: "config-data") pod "4965d23d-0756-438c-9c82-74ffcec602d5" (UID: "4965d23d-0756-438c-9c82-74ffcec602d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.853279 3552 generic.go:334] "Generic (PLEG): container finished" podID="4965d23d-0756-438c-9c82-74ffcec602d5" containerID="939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222" exitCode=0 Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.853310 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4965d23d-0756-438c-9c82-74ffcec602d5","Type":"ContainerDied","Data":"939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222"} Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.853331 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4965d23d-0756-438c-9c82-74ffcec602d5","Type":"ContainerDied","Data":"f26d67ba787c2e79486393423cd5c9b911b30478f7e4a628411e788a104b330a"} Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.853370 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.853625 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "305d791f-0105-4106-913f-d1d945d0b1f9" (UID: "305d791f-0105-4106-913f-d1d945d0b1f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.867126 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4965d23d-0756-438c-9c82-74ffcec602d5" (UID: "4965d23d-0756-438c-9c82-74ffcec602d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.881884 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "4965d23d-0756-438c-9c82-74ffcec602d5" (UID: "4965d23d-0756-438c-9c82-74ffcec602d5"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.896618 3552 scope.go:117] "RemoveContainer" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.897103 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee\": container with ID starting with fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee not found: ID does not exist" containerID="fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.897143 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee"} err="failed to get container status \"fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee\": rpc error: code = NotFound desc = could not find container \"fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee\": container with ID starting with fec67068a5c82a36d540d6f0789214dcc79d0c0e9d375df8170ec9df624d73ee not found: ID does not exist" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.897154 3552 scope.go:117] "RemoveContainer" containerID="939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923077 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923119 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923133 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/305d791f-0105-4106-913f-d1d945d0b1f9-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923157 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mggwn\" (UniqueName: \"kubernetes.io/projected/4965d23d-0756-438c-9c82-74ffcec602d5-kube-api-access-mggwn\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923173 3552 reconciler_common.go:300] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923191 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-c869b\" (UniqueName: \"kubernetes.io/projected/305d791f-0105-4106-913f-d1d945d0b1f9-kube-api-access-c869b\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923205 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4965d23d-0756-438c-9c82-74ffcec602d5-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.923217 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4965d23d-0756-438c-9c82-74ffcec602d5-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.928841 3552 scope.go:117] "RemoveContainer" containerID="ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.959588 3552 scope.go:117] "RemoveContainer" containerID="939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222" Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.960362 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222\": container with ID starting with 939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222 not found: ID does not exist" containerID="939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.960432 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222"} err="failed to get container status \"939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222\": rpc error: code = NotFound desc = could not find container \"939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222\": container with ID starting with 939e1369433cc88d3bd8d2d3d4031d9e9cb8ef104455b6142c21ccfd31d0c222 not found: ID does not exist" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.960447 3552 scope.go:117] "RemoveContainer" containerID="ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57" Mar 20 15:52:06 crc kubenswrapper[3552]: E0320 15:52:06.960960 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57\": container with ID starting with ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57 not found: ID does not exist" containerID="ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57" Mar 20 15:52:06 crc kubenswrapper[3552]: I0320 15:52:06.961009 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57"} err="failed to get container status \"ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57\": rpc error: code = NotFound desc = could not find container \"ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57\": container with ID starting with ab480e921797257ee43869a89b068e957fd19f9a4021a09873a12f9d41d8ca57 not found: ID does not exist" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.183006 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.197821 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.207347 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.223290 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233189 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233365 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6d77cf2f-1599-4fe2-84fa-925dec6a7e26" podNamespace="openstack" podName="nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: E0320 15:52:07.233631 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerName="init" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233643 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerName="init" Mar 20 15:52:07 crc kubenswrapper[3552]: E0320 15:52:07.233653 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-metadata" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233660 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-metadata" Mar 20 15:52:07 crc kubenswrapper[3552]: E0320 15:52:07.233687 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="88c7817a-b4f0-4685-a348-8cf8a662632b" containerName="nova-manage" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233693 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="88c7817a-b4f0-4685-a348-8cf8a662632b" containerName="nova-manage" Mar 20 15:52:07 crc kubenswrapper[3552]: E0320 15:52:07.233705 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerName="dnsmasq-dns" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233711 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerName="dnsmasq-dns" Mar 20 15:52:07 crc kubenswrapper[3552]: E0320 15:52:07.233725 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" containerName="nova-scheduler-scheduler" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233732 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" containerName="nova-scheduler-scheduler" Mar 20 15:52:07 crc kubenswrapper[3552]: E0320 15:52:07.233744 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-log" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233749 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-log" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233935 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-metadata" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233949 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5ef9583-15b5-44f2-b2cc-8b41c8d40e1d" containerName="dnsmasq-dns" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233962 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="88c7817a-b4f0-4685-a348-8cf8a662632b" containerName="nova-manage" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233975 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" containerName="nova-scheduler-scheduler" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.233986 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" containerName="nova-metadata-log" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.234629 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.240955 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.246980 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.260956 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.261174 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d3c60297-64e7-4110-a1ec-410b35338011" podNamespace="openstack" podName="nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.263175 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.265613 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.265780 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.272978 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.330151 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.330342 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-config-data\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.330382 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zczwh\" (UniqueName: \"kubernetes.io/projected/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-kube-api-access-zczwh\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.431878 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.431969 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvv5n\" (UniqueName: \"kubernetes.io/projected/d3c60297-64e7-4110-a1ec-410b35338011-kube-api-access-kvv5n\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.432086 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-config-data\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.432136 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zczwh\" (UniqueName: \"kubernetes.io/projected/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-kube-api-access-zczwh\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.432260 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-config-data\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.432371 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.432595 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3c60297-64e7-4110-a1ec-410b35338011-logs\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.432638 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.442180 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.448670 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="305d791f-0105-4106-913f-d1d945d0b1f9" path="/var/lib/kubelet/pods/305d791f-0105-4106-913f-d1d945d0b1f9/volumes" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.449207 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4965d23d-0756-438c-9c82-74ffcec602d5" path="/var/lib/kubelet/pods/4965d23d-0756-438c-9c82-74ffcec602d5/volumes" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.450168 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-config-data\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.470709 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zczwh\" (UniqueName: \"kubernetes.io/projected/6d77cf2f-1599-4fe2-84fa-925dec6a7e26-kube-api-access-zczwh\") pod \"nova-scheduler-0\" (UID: \"6d77cf2f-1599-4fe2-84fa-925dec6a7e26\") " pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.534640 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-config-data\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.534732 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.534823 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3c60297-64e7-4110-a1ec-410b35338011-logs\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.534880 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.534947 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kvv5n\" (UniqueName: \"kubernetes.io/projected/d3c60297-64e7-4110-a1ec-410b35338011-kube-api-access-kvv5n\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.535654 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3c60297-64e7-4110-a1ec-410b35338011-logs\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.539922 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.540358 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-config-data\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.553063 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3c60297-64e7-4110-a1ec-410b35338011-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.557724 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvv5n\" (UniqueName: \"kubernetes.io/projected/d3c60297-64e7-4110-a1ec-410b35338011-kube-api-access-kvv5n\") pod \"nova-metadata-0\" (UID: \"d3c60297-64e7-4110-a1ec-410b35338011\") " pod="openstack/nova-metadata-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.562618 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Mar 20 15:52:07 crc kubenswrapper[3552]: I0320 15:52:07.585834 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Mar 20 15:52:08 crc kubenswrapper[3552]: W0320 15:52:08.053558 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d77cf2f_1599_4fe2_84fa_925dec6a7e26.slice/crio-2de698e4485df215409ce6af59f018b09d0cb3743406a491f62494c6111fb0ac WatchSource:0}: Error finding container 2de698e4485df215409ce6af59f018b09d0cb3743406a491f62494c6111fb0ac: Status 404 returned error can't find the container with id 2de698e4485df215409ce6af59f018b09d0cb3743406a491f62494c6111fb0ac Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.056940 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.141914 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Mar 20 15:52:08 crc kubenswrapper[3552]: W0320 15:52:08.142963 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3c60297_64e7_4110_a1ec_410b35338011.slice/crio-d15d24437f4101fda41ce29a209b95f538b8e5c216defba67381454ecd53fa83 WatchSource:0}: Error finding container d15d24437f4101fda41ce29a209b95f538b8e5c216defba67381454ecd53fa83: Status 404 returned error can't find the container with id d15d24437f4101fda41ce29a209b95f538b8e5c216defba67381454ecd53fa83 Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.737478 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.859161 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r59sm\" (UniqueName: \"kubernetes.io/projected/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-kube-api-access-r59sm\") pod \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.859229 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-config-data\") pod \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.859307 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-public-tls-certs\") pod \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.859371 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-logs\") pod \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.859452 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-combined-ca-bundle\") pod \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.859533 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-internal-tls-certs\") pod \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\" (UID: \"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35\") " Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.861057 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-logs" (OuterVolumeSpecName: "logs") pod "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" (UID: "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.875611 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-kube-api-access-r59sm" (OuterVolumeSpecName: "kube-api-access-r59sm") pod "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" (UID: "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35"). InnerVolumeSpecName "kube-api-access-r59sm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.884357 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3c60297-64e7-4110-a1ec-410b35338011","Type":"ContainerStarted","Data":"0943096d3e9beddc8abd916a7b144916146f1ea89df48cfed807601b94b15a95"} Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.884414 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3c60297-64e7-4110-a1ec-410b35338011","Type":"ContainerStarted","Data":"d15d24437f4101fda41ce29a209b95f538b8e5c216defba67381454ecd53fa83"} Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.886378 3552 generic.go:334] "Generic (PLEG): container finished" podID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerID="8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7" exitCode=0 Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.886458 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35","Type":"ContainerDied","Data":"8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7"} Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.886478 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a8f1d47d-5cfb-4340-b8bd-22da11a5fb35","Type":"ContainerDied","Data":"24e0a89142579c716dca89cedf903b338da1ec3869fbbee8303567a60cc3505e"} Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.886529 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.886551 3552 scope.go:117] "RemoveContainer" containerID="8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.897137 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6d77cf2f-1599-4fe2-84fa-925dec6a7e26","Type":"ContainerStarted","Data":"5728db92ec9f5611a66557657e62bf3eb81f0e7ccff9c4003f10edcdc9c9935d"} Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.897202 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6d77cf2f-1599-4fe2-84fa-925dec6a7e26","Type":"ContainerStarted","Data":"2de698e4485df215409ce6af59f018b09d0cb3743406a491f62494c6111fb0ac"} Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.899221 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-config-data" (OuterVolumeSpecName: "config-data") pod "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" (UID: "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.900986 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" (UID: "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.917934 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" (UID: "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.932875 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.932834418 podStartE2EDuration="1.932834418s" podCreationTimestamp="2026-03-20 15:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:52:08.929861603 +0000 UTC m=+1628.623558443" watchObservedRunningTime="2026-03-20 15:52:08.932834418 +0000 UTC m=+1628.626531248" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.933167 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" (UID: "a8f1d47d-5cfb-4340-b8bd-22da11a5fb35"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.942200 3552 scope.go:117] "RemoveContainer" containerID="b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.965861 3552 reconciler_common.go:300] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-public-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.965920 3552 reconciler_common.go:300] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-logs\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.965932 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.965942 3552 reconciler_common.go:300] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.965951 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-r59sm\" (UniqueName: \"kubernetes.io/projected/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-kube-api-access-r59sm\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.965962 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.978801 3552 scope.go:117] "RemoveContainer" containerID="8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7" Mar 20 15:52:08 crc kubenswrapper[3552]: E0320 15:52:08.979904 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7\": container with ID starting with 8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7 not found: ID does not exist" containerID="8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.980167 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7"} err="failed to get container status \"8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7\": rpc error: code = NotFound desc = could not find container \"8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7\": container with ID starting with 8492d5be7d279b5481501a8f2a94f1fe30244a03e9ccbcc4d062d368412e08a7 not found: ID does not exist" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.980186 3552 scope.go:117] "RemoveContainer" containerID="b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2" Mar 20 15:52:08 crc kubenswrapper[3552]: E0320 15:52:08.980765 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2\": container with ID starting with b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2 not found: ID does not exist" containerID="b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2" Mar 20 15:52:08 crc kubenswrapper[3552]: I0320 15:52:08.980824 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2"} err="failed to get container status \"b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2\": rpc error: code = NotFound desc = could not find container \"b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2\": container with ID starting with b2ba2a13b4f089f59fe73cb070bd5af5ec128b66686a0f8e68a0d7c4df3f75d2 not found: ID does not exist" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.284480 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.298092 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.307933 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.308208 3552 topology_manager.go:215] "Topology Admit Handler" podUID="82d5b176-ae10-446e-8994-39e52dd4611d" podNamespace="openstack" podName="nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: E0320 15:52:09.308637 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-log" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.308664 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-log" Mar 20 15:52:09 crc kubenswrapper[3552]: E0320 15:52:09.308685 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-api" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.308694 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-api" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.310076 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-api" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.310113 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" containerName="nova-api-log" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.311292 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.314387 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.316690 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.316931 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.320025 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.444951 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8f1d47d-5cfb-4340-b8bd-22da11a5fb35" path="/var/lib/kubelet/pods/a8f1d47d-5cfb-4340-b8bd-22da11a5fb35/volumes" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.477810 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.477855 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-config-data\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.477880 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.477928 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdwfj\" (UniqueName: \"kubernetes.io/projected/82d5b176-ae10-446e-8994-39e52dd4611d-kube-api-access-gdwfj\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.477981 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82d5b176-ae10-446e-8994-39e52dd4611d-logs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.478034 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-public-tls-certs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.579558 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82d5b176-ae10-446e-8994-39e52dd4611d-logs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.579671 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-public-tls-certs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.579735 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.579764 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-config-data\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.579796 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.579863 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gdwfj\" (UniqueName: \"kubernetes.io/projected/82d5b176-ae10-446e-8994-39e52dd4611d-kube-api-access-gdwfj\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.580637 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82d5b176-ae10-446e-8994-39e52dd4611d-logs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.585094 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.585355 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-config-data\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.587123 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.588890 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/82d5b176-ae10-446e-8994-39e52dd4611d-public-tls-certs\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.601226 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdwfj\" (UniqueName: \"kubernetes.io/projected/82d5b176-ae10-446e-8994-39e52dd4611d-kube-api-access-gdwfj\") pod \"nova-api-0\" (UID: \"82d5b176-ae10-446e-8994-39e52dd4611d\") " pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.747984 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.914422 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3c60297-64e7-4110-a1ec-410b35338011","Type":"ContainerStarted","Data":"9f978b0f885af86078cd245aa134278489f8157fdea86393cd7823c48bf7cc55"} Mar 20 15:52:09 crc kubenswrapper[3552]: I0320 15:52:09.934828 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.934725546 podStartE2EDuration="2.934725546s" podCreationTimestamp="2026-03-20 15:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:52:09.931606847 +0000 UTC m=+1629.625303687" watchObservedRunningTime="2026-03-20 15:52:09.934725546 +0000 UTC m=+1629.628422386" Mar 20 15:52:10 crc kubenswrapper[3552]: I0320 15:52:10.227138 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Mar 20 15:52:10 crc kubenswrapper[3552]: W0320 15:52:10.229942 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82d5b176_ae10_446e_8994_39e52dd4611d.slice/crio-541661f9fdeecd1e7e232c066306410e0772f0e78dff89b45d5b164435ca4730 WatchSource:0}: Error finding container 541661f9fdeecd1e7e232c066306410e0772f0e78dff89b45d5b164435ca4730: Status 404 returned error can't find the container with id 541661f9fdeecd1e7e232c066306410e0772f0e78dff89b45d5b164435ca4730 Mar 20 15:52:10 crc kubenswrapper[3552]: I0320 15:52:10.936877 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82d5b176-ae10-446e-8994-39e52dd4611d","Type":"ContainerStarted","Data":"dcfdd7782cdce060f21902eae4371fb33c85743e33fb4f90c2411bf3888fc586"} Mar 20 15:52:10 crc kubenswrapper[3552]: I0320 15:52:10.937236 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82d5b176-ae10-446e-8994-39e52dd4611d","Type":"ContainerStarted","Data":"b2be233dcc54daa58ac1ebeda8fae65f8d1a7c32cb06f452769279d8b34ee1df"} Mar 20 15:52:10 crc kubenswrapper[3552]: I0320 15:52:10.937266 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82d5b176-ae10-446e-8994-39e52dd4611d","Type":"ContainerStarted","Data":"541661f9fdeecd1e7e232c066306410e0772f0e78dff89b45d5b164435ca4730"} Mar 20 15:52:12 crc kubenswrapper[3552]: I0320 15:52:12.562784 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Mar 20 15:52:12 crc kubenswrapper[3552]: I0320 15:52:12.779113 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:52:12 crc kubenswrapper[3552]: I0320 15:52:12.779222 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:52:17 crc kubenswrapper[3552]: I0320 15:52:17.562874 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Mar 20 15:52:17 crc kubenswrapper[3552]: I0320 15:52:17.587237 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Mar 20 15:52:17 crc kubenswrapper[3552]: I0320 15:52:17.587287 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Mar 20 15:52:17 crc kubenswrapper[3552]: I0320 15:52:17.600951 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Mar 20 15:52:17 crc kubenswrapper[3552]: I0320 15:52:17.621235 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=8.621199801 podStartE2EDuration="8.621199801s" podCreationTimestamp="2026-03-20 15:52:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:52:10.966923673 +0000 UTC m=+1630.660620573" watchObservedRunningTime="2026-03-20 15:52:17.621199801 +0000 UTC m=+1637.314896631" Mar 20 15:52:18 crc kubenswrapper[3552]: I0320 15:52:18.068220 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Mar 20 15:52:18 crc kubenswrapper[3552]: I0320 15:52:18.600654 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d3c60297-64e7-4110-a1ec-410b35338011" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.231:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:52:18 crc kubenswrapper[3552]: I0320 15:52:18.600688 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d3c60297-64e7-4110-a1ec-410b35338011" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.231:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:52:19 crc kubenswrapper[3552]: I0320 15:52:19.749068 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:52:19 crc kubenswrapper[3552]: I0320 15:52:19.749323 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Mar 20 15:52:20 crc kubenswrapper[3552]: I0320 15:52:20.761646 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="82d5b176-ae10-446e-8994-39e52dd4611d" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.232:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:52:20 crc kubenswrapper[3552]: I0320 15:52:20.761885 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="82d5b176-ae10-446e-8994-39e52dd4611d" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.232:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Mar 20 15:52:22 crc kubenswrapper[3552]: I0320 15:52:22.149421 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Mar 20 15:52:25 crc kubenswrapper[3552]: I0320 15:52:25.586507 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Mar 20 15:52:25 crc kubenswrapper[3552]: I0320 15:52:25.586983 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Mar 20 15:52:27 crc kubenswrapper[3552]: I0320 15:52:27.609071 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Mar 20 15:52:27 crc kubenswrapper[3552]: I0320 15:52:27.612222 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Mar 20 15:52:27 crc kubenswrapper[3552]: I0320 15:52:27.615712 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Mar 20 15:52:27 crc kubenswrapper[3552]: I0320 15:52:27.749169 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Mar 20 15:52:27 crc kubenswrapper[3552]: I0320 15:52:27.749214 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Mar 20 15:52:28 crc kubenswrapper[3552]: I0320 15:52:28.108757 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Mar 20 15:52:29 crc kubenswrapper[3552]: I0320 15:52:29.757819 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Mar 20 15:52:29 crc kubenswrapper[3552]: I0320 15:52:29.759469 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Mar 20 15:52:29 crc kubenswrapper[3552]: I0320 15:52:29.765169 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Mar 20 15:52:29 crc kubenswrapper[3552]: I0320 15:52:29.770388 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Mar 20 15:52:37 crc kubenswrapper[3552]: I0320 15:52:37.823351 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:52:39 crc kubenswrapper[3552]: I0320 15:52:39.310269 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:52:42 crc kubenswrapper[3552]: I0320 15:52:42.386419 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerName="rabbitmq" containerID="cri-o://1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0" gracePeriod=604796 Mar 20 15:52:42 crc kubenswrapper[3552]: I0320 15:52:42.778501 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:52:42 crc kubenswrapper[3552]: I0320 15:52:42.780935 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:52:43 crc kubenswrapper[3552]: I0320 15:52:43.200174 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerName="rabbitmq" containerID="cri-o://a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e" gracePeriod=604797 Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.241290 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.269881 3552 generic.go:334] "Generic (PLEG): container finished" podID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerID="1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0" exitCode=0 Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.269918 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.269920 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f1e0de34-e3ac-4691-94c4-d5ac03353099","Type":"ContainerDied","Data":"1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0"} Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.269950 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f1e0de34-e3ac-4691-94c4-d5ac03353099","Type":"ContainerDied","Data":"79bb3a0438dea1fca0ed0c4c4c959e46f47c58f65ba8d5374cfa30395b26a4a4"} Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.269979 3552 scope.go:117] "RemoveContainer" containerID="1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.303322 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.303655 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f1e0de34-e3ac-4691-94c4-d5ac03353099-erlang-cookie-secret\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.303708 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-tls\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.303889 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-plugins\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304136 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-erlang-cookie\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304204 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-plugins-conf\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304236 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8ttb\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-kube-api-access-f8ttb\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304298 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-server-conf\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304332 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f1e0de34-e3ac-4691-94c4-d5ac03353099-pod-info\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304357 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-confd\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.304453 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-config-data\") pod \"f1e0de34-e3ac-4691-94c4-d5ac03353099\" (UID: \"f1e0de34-e3ac-4691-94c4-d5ac03353099\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.314043 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.314148 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.314674 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.320302 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e0de34-e3ac-4691-94c4-d5ac03353099-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.324865 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "persistence") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.329281 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f1e0de34-e3ac-4691-94c4-d5ac03353099-pod-info" (OuterVolumeSpecName: "pod-info") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.343620 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.345094 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-kube-api-access-f8ttb" (OuterVolumeSpecName: "kube-api-access-f8ttb") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "kube-api-access-f8ttb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.371661 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-config-data" (OuterVolumeSpecName: "config-data") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.405996 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-server-conf" (OuterVolumeSpecName: "server-conf") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409646 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409681 3552 reconciler_common.go:300] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f1e0de34-e3ac-4691-94c4-d5ac03353099-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409698 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409712 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409726 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409739 3552 reconciler_common.go:300] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-plugins-conf\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409753 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-f8ttb\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-kube-api-access-f8ttb\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409766 3552 reconciler_common.go:300] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-server-conf\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409779 3552 reconciler_common.go:300] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f1e0de34-e3ac-4691-94c4-d5ac03353099-pod-info\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.409793 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1e0de34-e3ac-4691-94c4-d5ac03353099-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.442515 3552 scope.go:117] "RemoveContainer" containerID="d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.452507 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.512352 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.568709 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f1e0de34-e3ac-4691-94c4-d5ac03353099" (UID: "f1e0de34-e3ac-4691-94c4-d5ac03353099"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.607605 3552 scope.go:117] "RemoveContainer" containerID="1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0" Mar 20 15:52:49 crc kubenswrapper[3552]: E0320 15:52:49.608909 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0\": container with ID starting with 1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0 not found: ID does not exist" containerID="1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.608965 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0"} err="failed to get container status \"1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0\": rpc error: code = NotFound desc = could not find container \"1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0\": container with ID starting with 1d20e491e0534b247f004013159f7439a4bf319117b637ecdf58e115a794e9d0 not found: ID does not exist" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.608980 3552 scope.go:117] "RemoveContainer" containerID="d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021" Mar 20 15:52:49 crc kubenswrapper[3552]: E0320 15:52:49.610778 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021\": container with ID starting with d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021 not found: ID does not exist" containerID="d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.610823 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021"} err="failed to get container status \"d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021\": rpc error: code = NotFound desc = could not find container \"d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021\": container with ID starting with d33595f641fa2f9f5060582995aaff7fc360eeb8234c976dedc61f874cbac021 not found: ID does not exist" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.615563 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f1e0de34-e3ac-4691-94c4-d5ac03353099-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.634591 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.651953 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.670814 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.671003 3552 topology_manager.go:215] "Topology Admit Handler" podUID="58197235-68b0-45dd-9df3-6825c76c4df8" podNamespace="openstack" podName="rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: E0320 15:52:49.672396 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerName="setup-container" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.672443 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerName="setup-container" Mar 20 15:52:49 crc kubenswrapper[3552]: E0320 15:52:49.672485 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerName="rabbitmq" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.672492 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerName="rabbitmq" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.672691 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" containerName="rabbitmq" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.673778 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.678410 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.678698 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.678736 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wt2xv" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.678743 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.678765 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.678976 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.679022 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.688729 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.821748 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58197235-68b0-45dd-9df3-6825c76c4df8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.821822 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822035 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-config-data\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822099 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822155 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58197235-68b0-45dd-9df3-6825c76c4df8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822266 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822289 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822332 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822444 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822495 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.822558 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njjhz\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-kube-api-access-njjhz\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.839685 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.924243 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-plugins-conf\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.924304 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/90a6e0ae-40a5-47b1-8495-26b369c628c4-pod-info\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.924374 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.924464 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-plugins\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.924597 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/90a6e0ae-40a5-47b1-8495-26b369c628c4-erlang-cookie-secret\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.926604 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.927071 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.928785 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-confd\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.928920 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-erlang-cookie\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.928959 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-config-data\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.928996 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-tls\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.929032 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bq5mc\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-kube-api-access-bq5mc\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.929123 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-server-conf\") pod \"90a6e0ae-40a5-47b1-8495-26b369c628c4\" (UID: \"90a6e0ae-40a5-47b1-8495-26b369c628c4\") " Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.932074 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.932366 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.932468 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.932608 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933098 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933182 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-njjhz\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-kube-api-access-njjhz\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933362 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58197235-68b0-45dd-9df3-6825c76c4df8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933425 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933522 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-config-data\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933557 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933791 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90a6e0ae-40a5-47b1-8495-26b369c628c4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.933892 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58197235-68b0-45dd-9df3-6825c76c4df8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.934159 3552 reconciler_common.go:300] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/90a6e0ae-40a5-47b1-8495-26b369c628c4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.934175 3552 reconciler_common.go:300] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-plugins-conf\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.934187 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.935000 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/90a6e0ae-40a5-47b1-8495-26b369c628c4-pod-info" (OuterVolumeSpecName: "pod-info") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.935387 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.936184 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.940095 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.941834 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.942137 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.942691 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.942933 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.943055 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58197235-68b0-45dd-9df3-6825c76c4df8-config-data\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.946228 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.954997 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58197235-68b0-45dd-9df3-6825c76c4df8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.956526 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.958636 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58197235-68b0-45dd-9df3-6825c76c4df8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.962009 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.963549 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-njjhz\" (UniqueName: \"kubernetes.io/projected/58197235-68b0-45dd-9df3-6825c76c4df8-kube-api-access-njjhz\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:49 crc kubenswrapper[3552]: I0320 15:52:49.965551 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-kube-api-access-bq5mc" (OuterVolumeSpecName: "kube-api-access-bq5mc") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "kube-api-access-bq5mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.008101 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-config-data" (OuterVolumeSpecName: "config-data") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.027858 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"58197235-68b0-45dd-9df3-6825c76c4df8\") " pod="openstack/rabbitmq-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.036955 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.037001 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.037014 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.036969 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-server-conf" (OuterVolumeSpecName: "server-conf") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.037030 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-bq5mc\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-kube-api-access-bq5mc\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.039109 3552 reconciler_common.go:300] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/90a6e0ae-40a5-47b1-8495-26b369c628c4-pod-info\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.039141 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.109511 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.141787 3552 reconciler_common.go:300] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/90a6e0ae-40a5-47b1-8495-26b369c628c4-server-conf\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.141862 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.146968 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "90a6e0ae-40a5-47b1-8495-26b369c628c4" (UID: "90a6e0ae-40a5-47b1-8495-26b369c628c4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.243360 3552 reconciler_common.go:300] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/90a6e0ae-40a5-47b1-8495-26b369c628c4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.282275 3552 generic.go:334] "Generic (PLEG): container finished" podID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerID="a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e" exitCode=0 Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.282349 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"90a6e0ae-40a5-47b1-8495-26b369c628c4","Type":"ContainerDied","Data":"a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e"} Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.282357 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.282370 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"90a6e0ae-40a5-47b1-8495-26b369c628c4","Type":"ContainerDied","Data":"67923631f2dd5e17336701dc732e95bab6d2b84c0567435937e8969f640a0801"} Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.282387 3552 scope.go:117] "RemoveContainer" containerID="a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.299155 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.351206 3552 scope.go:117] "RemoveContainer" containerID="51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.364084 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.377493 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.412232 3552 scope.go:117] "RemoveContainer" containerID="a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e" Mar 20 15:52:50 crc kubenswrapper[3552]: E0320 15:52:50.414758 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e\": container with ID starting with a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e not found: ID does not exist" containerID="a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.414825 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e"} err="failed to get container status \"a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e\": rpc error: code = NotFound desc = could not find container \"a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e\": container with ID starting with a21bef6d7a315a659400cc4c3ce20916f848acd29e14d4a79805d0ca0247881e not found: ID does not exist" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.414843 3552 scope.go:117] "RemoveContainer" containerID="51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6" Mar 20 15:52:50 crc kubenswrapper[3552]: E0320 15:52:50.419139 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6\": container with ID starting with 51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6 not found: ID does not exist" containerID="51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.419195 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6"} err="failed to get container status \"51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6\": rpc error: code = NotFound desc = could not find container \"51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6\": container with ID starting with 51ca7b51c7ddb9c0b1bcc6973694044a30de02f438cd027b9c73c7c2570797e6 not found: ID does not exist" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.421494 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.421703 3552 topology_manager.go:215] "Topology Admit Handler" podUID="f71c29cd-5055-41bb-b3f8-6183a9be2b7f" podNamespace="openstack" podName="rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: E0320 15:52:50.422047 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerName="rabbitmq" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.422064 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerName="rabbitmq" Mar 20 15:52:50 crc kubenswrapper[3552]: E0320 15:52:50.422098 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerName="setup-container" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.422106 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerName="setup-container" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.422300 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" containerName="rabbitmq" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.423323 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.428968 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kr2lk" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.429177 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.429320 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.429465 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.429604 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.429698 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.429797 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.436995 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.550503 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.550803 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.550844 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551145 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551225 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551458 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551502 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551524 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551559 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551589 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkjsn\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-kube-api-access-bkjsn\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.551653 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653358 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653437 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653468 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653501 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653548 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653572 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653610 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653635 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653653 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653683 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.653710 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-bkjsn\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-kube-api-access-bkjsn\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.654725 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.655005 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.655470 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.656244 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.658566 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.658839 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.660044 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.660463 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.660847 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.663384 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.676691 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkjsn\" (UniqueName: \"kubernetes.io/projected/f71c29cd-5055-41bb-b3f8-6183a9be2b7f-kube-api-access-bkjsn\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.692140 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f71c29cd-5055-41bb-b3f8-6183a9be2b7f\") " pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.757237 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:52:50 crc kubenswrapper[3552]: I0320 15:52:50.933215 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Mar 20 15:52:51 crc kubenswrapper[3552]: I0320 15:52:51.098872 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Mar 20 15:52:51 crc kubenswrapper[3552]: W0320 15:52:51.107819 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf71c29cd_5055_41bb_b3f8_6183a9be2b7f.slice/crio-f489e5e634846cbb29ea3831a44774dce5876b3a18c4c0c9255d99d546a38831 WatchSource:0}: Error finding container f489e5e634846cbb29ea3831a44774dce5876b3a18c4c0c9255d99d546a38831: Status 404 returned error can't find the container with id f489e5e634846cbb29ea3831a44774dce5876b3a18c4c0c9255d99d546a38831 Mar 20 15:52:51 crc kubenswrapper[3552]: I0320 15:52:51.294343 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f71c29cd-5055-41bb-b3f8-6183a9be2b7f","Type":"ContainerStarted","Data":"f489e5e634846cbb29ea3831a44774dce5876b3a18c4c0c9255d99d546a38831"} Mar 20 15:52:51 crc kubenswrapper[3552]: I0320 15:52:51.296049 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58197235-68b0-45dd-9df3-6825c76c4df8","Type":"ContainerStarted","Data":"b5159049d3a270777d5b6ef5fd119c50b92920c9c4204868643bf7543df70098"} Mar 20 15:52:51 crc kubenswrapper[3552]: I0320 15:52:51.439990 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90a6e0ae-40a5-47b1-8495-26b369c628c4" path="/var/lib/kubelet/pods/90a6e0ae-40a5-47b1-8495-26b369c628c4/volumes" Mar 20 15:52:51 crc kubenswrapper[3552]: I0320 15:52:51.442468 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e0de34-e3ac-4691-94c4-d5ac03353099" path="/var/lib/kubelet/pods/f1e0de34-e3ac-4691-94c4-d5ac03353099/volumes" Mar 20 15:52:54 crc kubenswrapper[3552]: I0320 15:52:54.333521 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58197235-68b0-45dd-9df3-6825c76c4df8","Type":"ContainerStarted","Data":"a85302128f82babdc16853aa64e420893bd404eeb7b3e4dd1981557a6570ca6e"} Mar 20 15:52:54 crc kubenswrapper[3552]: I0320 15:52:54.338092 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f71c29cd-5055-41bb-b3f8-6183a9be2b7f","Type":"ContainerStarted","Data":"a163d68e1c4e18d6dc2413245b167a1b1f5106a8dfe13cd79fcbfc8e9afc95f4"} Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.233355 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d857c9dbc-t8lz7"] Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.233873 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" podNamespace="openstack" podName="dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.235239 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.251989 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.252164 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d857c9dbc-t8lz7"] Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355242 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-nb\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355322 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-swift-storage-0\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355368 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-sb\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355484 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbk64\" (UniqueName: \"kubernetes.io/projected/0f0e3469-698d-424b-b4de-142a61f90b11-kube-api-access-mbk64\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355550 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-openstack-edpm-ipam\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355573 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-config\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.355599 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-svc\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.457276 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-sb\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.457818 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mbk64\" (UniqueName: \"kubernetes.io/projected/0f0e3469-698d-424b-b4de-142a61f90b11-kube-api-access-mbk64\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.457941 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-openstack-edpm-ipam\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.457980 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-config\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.458034 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-svc\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.458074 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-nb\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.458125 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-swift-storage-0\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.458373 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-sb\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.459943 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-openstack-edpm-ipam\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.460547 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-swift-storage-0\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.460847 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-nb\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.461049 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-config\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.462115 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-svc\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.486237 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbk64\" (UniqueName: \"kubernetes.io/projected/0f0e3469-698d-424b-b4de-142a61f90b11-kube-api-access-mbk64\") pod \"dnsmasq-dns-d857c9dbc-t8lz7\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:55 crc kubenswrapper[3552]: I0320 15:52:55.552811 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:52:56 crc kubenswrapper[3552]: I0320 15:52:56.035118 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d857c9dbc-t8lz7"] Mar 20 15:52:56 crc kubenswrapper[3552]: I0320 15:52:56.356218 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" event={"ID":"0f0e3469-698d-424b-b4de-142a61f90b11","Type":"ContainerStarted","Data":"9e03c9ffcd9fd77f72c3575430acb11f3ca9b3829e6c83646f9ac08450e509f7"} Mar 20 15:52:57 crc kubenswrapper[3552]: I0320 15:52:57.365525 3552 generic.go:334] "Generic (PLEG): container finished" podID="0f0e3469-698d-424b-b4de-142a61f90b11" containerID="fc7ef613e5c54c79c76c860042bbb85c17232e2259df2fe36b717b5f8c33eef7" exitCode=0 Mar 20 15:52:57 crc kubenswrapper[3552]: I0320 15:52:57.365579 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" event={"ID":"0f0e3469-698d-424b-b4de-142a61f90b11","Type":"ContainerDied","Data":"fc7ef613e5c54c79c76c860042bbb85c17232e2259df2fe36b717b5f8c33eef7"} Mar 20 15:52:58 crc kubenswrapper[3552]: I0320 15:52:58.377938 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" event={"ID":"0f0e3469-698d-424b-b4de-142a61f90b11","Type":"ContainerStarted","Data":"ac2a876e5126e33f5c59e7aff8f8769bc2d52e13ea353807ecd7442604348263"} Mar 20 15:52:58 crc kubenswrapper[3552]: I0320 15:52:58.400960 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" podStartSLOduration=3.40090555 podStartE2EDuration="3.40090555s" podCreationTimestamp="2026-03-20 15:52:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:52:58.398487136 +0000 UTC m=+1678.092183986" watchObservedRunningTime="2026-03-20 15:52:58.40090555 +0000 UTC m=+1678.094602380" Mar 20 15:52:59 crc kubenswrapper[3552]: I0320 15:52:59.395022 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:53:01 crc kubenswrapper[3552]: I0320 15:53:01.320860 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:53:01 crc kubenswrapper[3552]: I0320 15:53:01.321213 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:53:01 crc kubenswrapper[3552]: I0320 15:53:01.321243 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:53:01 crc kubenswrapper[3552]: I0320 15:53:01.321275 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:53:01 crc kubenswrapper[3552]: I0320 15:53:01.321325 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.554643 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.617523 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fc4f48d55-bzfz6"] Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.617778 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" podUID="39f1f950-065d-4899-a254-363f2279cde1" containerName="dnsmasq-dns" containerID="cri-o://02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9" gracePeriod=10 Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.773656 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68599879df-x6nqg"] Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.774083 3552 topology_manager.go:215] "Topology Admit Handler" podUID="95aeed66-e4e9-42ec-8bf3-88b0a6947263" podNamespace="openstack" podName="dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.775552 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.802919 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68599879df-x6nqg"] Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.972942 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-ovsdbserver-sb\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.973134 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-dns-svc\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.973217 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-ovsdbserver-nb\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.973319 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs9gd\" (UniqueName: \"kubernetes.io/projected/95aeed66-e4e9-42ec-8bf3-88b0a6947263-kube-api-access-qs9gd\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.973470 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-openstack-edpm-ipam\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.973913 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-config\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:05 crc kubenswrapper[3552]: I0320 15:53:05.973972 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-dns-swift-storage-0\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.076060 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-config\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.076118 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-dns-swift-storage-0\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.076323 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-ovsdbserver-sb\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.076491 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-dns-svc\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.076613 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-ovsdbserver-nb\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077113 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-dns-swift-storage-0\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077186 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-config\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077251 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-ovsdbserver-sb\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077484 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-ovsdbserver-nb\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077484 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-dns-svc\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077593 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-qs9gd\" (UniqueName: \"kubernetes.io/projected/95aeed66-e4e9-42ec-8bf3-88b0a6947263-kube-api-access-qs9gd\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.077787 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-openstack-edpm-ipam\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.078591 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/95aeed66-e4e9-42ec-8bf3-88b0a6947263-openstack-edpm-ipam\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.096755 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs9gd\" (UniqueName: \"kubernetes.io/projected/95aeed66-e4e9-42ec-8bf3-88b0a6947263-kube-api-access-qs9gd\") pod \"dnsmasq-dns-68599879df-x6nqg\" (UID: \"95aeed66-e4e9-42ec-8bf3-88b0a6947263\") " pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.103532 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.193804 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.383833 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfc8k\" (UniqueName: \"kubernetes.io/projected/39f1f950-065d-4899-a254-363f2279cde1-kube-api-access-hfc8k\") pod \"39f1f950-065d-4899-a254-363f2279cde1\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.383888 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-nb\") pod \"39f1f950-065d-4899-a254-363f2279cde1\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.383927 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-config\") pod \"39f1f950-065d-4899-a254-363f2279cde1\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.383984 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-swift-storage-0\") pod \"39f1f950-065d-4899-a254-363f2279cde1\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.384015 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-svc\") pod \"39f1f950-065d-4899-a254-363f2279cde1\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.384131 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-sb\") pod \"39f1f950-065d-4899-a254-363f2279cde1\" (UID: \"39f1f950-065d-4899-a254-363f2279cde1\") " Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.412227 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39f1f950-065d-4899-a254-363f2279cde1-kube-api-access-hfc8k" (OuterVolumeSpecName: "kube-api-access-hfc8k") pod "39f1f950-065d-4899-a254-363f2279cde1" (UID: "39f1f950-065d-4899-a254-363f2279cde1"). InnerVolumeSpecName "kube-api-access-hfc8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.439447 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "39f1f950-065d-4899-a254-363f2279cde1" (UID: "39f1f950-065d-4899-a254-363f2279cde1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.450660 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "39f1f950-065d-4899-a254-363f2279cde1" (UID: "39f1f950-065d-4899-a254-363f2279cde1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.470899 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "39f1f950-065d-4899-a254-363f2279cde1" (UID: "39f1f950-065d-4899-a254-363f2279cde1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.472737 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "39f1f950-065d-4899-a254-363f2279cde1" (UID: "39f1f950-065d-4899-a254-363f2279cde1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.477490 3552 generic.go:334] "Generic (PLEG): container finished" podID="39f1f950-065d-4899-a254-363f2279cde1" containerID="02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9" exitCode=0 Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.477531 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" event={"ID":"39f1f950-065d-4899-a254-363f2279cde1","Type":"ContainerDied","Data":"02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9"} Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.477554 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" event={"ID":"39f1f950-065d-4899-a254-363f2279cde1","Type":"ContainerDied","Data":"14e30c3577ce27a3b5705457ac20b6103fa847793b740a96d3954a084f1c39e1"} Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.477562 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fc4f48d55-bzfz6" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.477572 3552 scope.go:117] "RemoveContainer" containerID="02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.484091 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-config" (OuterVolumeSpecName: "config") pod "39f1f950-065d-4899-a254-363f2279cde1" (UID: "39f1f950-065d-4899-a254-363f2279cde1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.486806 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hfc8k\" (UniqueName: \"kubernetes.io/projected/39f1f950-065d-4899-a254-363f2279cde1-kube-api-access-hfc8k\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.486964 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.489148 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.489173 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.489185 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.489195 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/39f1f950-065d-4899-a254-363f2279cde1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.544976 3552 scope.go:117] "RemoveContainer" containerID="db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.596631 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68599879df-x6nqg"] Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.599729 3552 scope.go:117] "RemoveContainer" containerID="02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9" Mar 20 15:53:06 crc kubenswrapper[3552]: E0320 15:53:06.600211 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9\": container with ID starting with 02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9 not found: ID does not exist" containerID="02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.600255 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9"} err="failed to get container status \"02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9\": rpc error: code = NotFound desc = could not find container \"02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9\": container with ID starting with 02c1672a71b787c294b5d0765f92f86d6f5f9232389d7ca74ae45197ece296d9 not found: ID does not exist" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.600265 3552 scope.go:117] "RemoveContainer" containerID="db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee" Mar 20 15:53:06 crc kubenswrapper[3552]: E0320 15:53:06.600708 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee\": container with ID starting with db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee not found: ID does not exist" containerID="db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee" Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.600741 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee"} err="failed to get container status \"db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee\": rpc error: code = NotFound desc = could not find container \"db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee\": container with ID starting with db6febaf28d9e854186faa9ea8c47fa5e5f35b9e882a08dd62fd94d785d70fee not found: ID does not exist" Mar 20 15:53:06 crc kubenswrapper[3552]: W0320 15:53:06.606732 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95aeed66_e4e9_42ec_8bf3_88b0a6947263.slice/crio-5c81245c3af7f5c58995983773e712509ff39ccfb73610feb9210d9f97f35285 WatchSource:0}: Error finding container 5c81245c3af7f5c58995983773e712509ff39ccfb73610feb9210d9f97f35285: Status 404 returned error can't find the container with id 5c81245c3af7f5c58995983773e712509ff39ccfb73610feb9210d9f97f35285 Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.822240 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fc4f48d55-bzfz6"] Mar 20 15:53:06 crc kubenswrapper[3552]: I0320 15:53:06.877303 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fc4f48d55-bzfz6"] Mar 20 15:53:07 crc kubenswrapper[3552]: I0320 15:53:07.442088 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39f1f950-065d-4899-a254-363f2279cde1" path="/var/lib/kubelet/pods/39f1f950-065d-4899-a254-363f2279cde1/volumes" Mar 20 15:53:07 crc kubenswrapper[3552]: I0320 15:53:07.488510 3552 generic.go:334] "Generic (PLEG): container finished" podID="95aeed66-e4e9-42ec-8bf3-88b0a6947263" containerID="a5854c50e337221a10685321c98881e056a20bf479f6b5e19be0bead303352ae" exitCode=0 Mar 20 15:53:07 crc kubenswrapper[3552]: I0320 15:53:07.488579 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68599879df-x6nqg" event={"ID":"95aeed66-e4e9-42ec-8bf3-88b0a6947263","Type":"ContainerDied","Data":"a5854c50e337221a10685321c98881e056a20bf479f6b5e19be0bead303352ae"} Mar 20 15:53:07 crc kubenswrapper[3552]: I0320 15:53:07.488606 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68599879df-x6nqg" event={"ID":"95aeed66-e4e9-42ec-8bf3-88b0a6947263","Type":"ContainerStarted","Data":"5c81245c3af7f5c58995983773e712509ff39ccfb73610feb9210d9f97f35285"} Mar 20 15:53:09 crc kubenswrapper[3552]: I0320 15:53:09.508831 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68599879df-x6nqg" event={"ID":"95aeed66-e4e9-42ec-8bf3-88b0a6947263","Type":"ContainerStarted","Data":"8fa602efa27c9e3222c701a05e45a9dc989adfe937eeae2dc414fdb6124cccc2"} Mar 20 15:53:09 crc kubenswrapper[3552]: I0320 15:53:09.529802 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68599879df-x6nqg" podStartSLOduration=4.529754912 podStartE2EDuration="4.529754912s" podCreationTimestamp="2026-03-20 15:53:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:53:09.528717904 +0000 UTC m=+1689.222414754" watchObservedRunningTime="2026-03-20 15:53:09.529754912 +0000 UTC m=+1689.223451742" Mar 20 15:53:11 crc kubenswrapper[3552]: I0320 15:53:11.105071 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:12 crc kubenswrapper[3552]: I0320 15:53:12.779186 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 15:53:12 crc kubenswrapper[3552]: I0320 15:53:12.779716 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 15:53:12 crc kubenswrapper[3552]: I0320 15:53:12.779761 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 15:53:12 crc kubenswrapper[3552]: I0320 15:53:12.780756 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 15:53:12 crc kubenswrapper[3552]: I0320 15:53:12.780925 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" gracePeriod=600 Mar 20 15:53:12 crc kubenswrapper[3552]: E0320 15:53:12.924421 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:53:13 crc kubenswrapper[3552]: I0320 15:53:13.557830 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" exitCode=0 Mar 20 15:53:13 crc kubenswrapper[3552]: I0320 15:53:13.557877 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b"} Mar 20 15:53:13 crc kubenswrapper[3552]: I0320 15:53:13.557907 3552 scope.go:117] "RemoveContainer" containerID="3e2f09fb5251918a00eedeadde2f6289e4a42a00c71de21ae5afa976b5070f51" Mar 20 15:53:13 crc kubenswrapper[3552]: I0320 15:53:13.558852 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:53:13 crc kubenswrapper[3552]: E0320 15:53:13.559612 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.105668 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68599879df-x6nqg" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.183342 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d857c9dbc-t8lz7"] Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.183609 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" containerName="dnsmasq-dns" containerID="cri-o://ac2a876e5126e33f5c59e7aff8f8769bc2d52e13ea353807ecd7442604348263" gracePeriod=10 Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.609266 3552 generic.go:334] "Generic (PLEG): container finished" podID="0f0e3469-698d-424b-b4de-142a61f90b11" containerID="ac2a876e5126e33f5c59e7aff8f8769bc2d52e13ea353807ecd7442604348263" exitCode=0 Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.609558 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" event={"ID":"0f0e3469-698d-424b-b4de-142a61f90b11","Type":"ContainerDied","Data":"ac2a876e5126e33f5c59e7aff8f8769bc2d52e13ea353807ecd7442604348263"} Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.609599 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" event={"ID":"0f0e3469-698d-424b-b4de-142a61f90b11","Type":"ContainerDied","Data":"9e03c9ffcd9fd77f72c3575430acb11f3ca9b3829e6c83646f9ac08450e509f7"} Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.609613 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e03c9ffcd9fd77f72c3575430acb11f3ca9b3829e6c83646f9ac08450e509f7" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.638282 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.719644 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-sb\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.719706 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-swift-storage-0\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.719763 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-openstack-edpm-ipam\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.719832 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-svc\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.719892 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-nb\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.719981 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbk64\" (UniqueName: \"kubernetes.io/projected/0f0e3469-698d-424b-b4de-142a61f90b11-kube-api-access-mbk64\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.720014 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-config\") pod \"0f0e3469-698d-424b-b4de-142a61f90b11\" (UID: \"0f0e3469-698d-424b-b4de-142a61f90b11\") " Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.724684 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f0e3469-698d-424b-b4de-142a61f90b11-kube-api-access-mbk64" (OuterVolumeSpecName: "kube-api-access-mbk64") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "kube-api-access-mbk64". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.777477 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.778805 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.791885 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.792623 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-config" (OuterVolumeSpecName: "config") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.797611 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.802532 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0f0e3469-698d-424b-b4de-142a61f90b11" (UID: "0f0e3469-698d-424b-b4de-142a61f90b11"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822393 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822458 3552 reconciler_common.go:300] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822475 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822491 3552 reconciler_common.go:300] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-dns-svc\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822503 3552 reconciler_common.go:300] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822516 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mbk64\" (UniqueName: \"kubernetes.io/projected/0f0e3469-698d-424b-b4de-142a61f90b11-kube-api-access-mbk64\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:16 crc kubenswrapper[3552]: I0320 15:53:16.822528 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f0e3469-698d-424b-b4de-142a61f90b11-config\") on node \"crc\" DevicePath \"\"" Mar 20 15:53:17 crc kubenswrapper[3552]: I0320 15:53:17.618250 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d857c9dbc-t8lz7" Mar 20 15:53:17 crc kubenswrapper[3552]: I0320 15:53:17.709043 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d857c9dbc-t8lz7"] Mar 20 15:53:17 crc kubenswrapper[3552]: I0320 15:53:17.717929 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d857c9dbc-t8lz7"] Mar 20 15:53:19 crc kubenswrapper[3552]: I0320 15:53:19.447038 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" path="/var/lib/kubelet/pods/0f0e3469-698d-424b-b4de-142a61f90b11/volumes" Mar 20 15:53:22 crc kubenswrapper[3552]: I0320 15:53:22.691750 3552 scope.go:117] "RemoveContainer" containerID="0347dbb00e01a8a90af7702baeab73fc3bd47a1218381f6fe48198f266e1ac2e" Mar 20 15:53:22 crc kubenswrapper[3552]: I0320 15:53:22.757511 3552 scope.go:117] "RemoveContainer" containerID="faa0c566e51de8183bc011bbfaf90e9b9ce83f79a0a83437d577d076ecfab5e3" Mar 20 15:53:22 crc kubenswrapper[3552]: I0320 15:53:22.846279 3552 scope.go:117] "RemoveContainer" containerID="b19470232bec3ad152455755485915b5992d607b8fb9c8f08865b93bf743c298" Mar 20 15:53:25 crc kubenswrapper[3552]: I0320 15:53:25.686507 3552 generic.go:334] "Generic (PLEG): container finished" podID="f71c29cd-5055-41bb-b3f8-6183a9be2b7f" containerID="a163d68e1c4e18d6dc2413245b167a1b1f5106a8dfe13cd79fcbfc8e9afc95f4" exitCode=0 Mar 20 15:53:25 crc kubenswrapper[3552]: I0320 15:53:25.687225 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f71c29cd-5055-41bb-b3f8-6183a9be2b7f","Type":"ContainerDied","Data":"a163d68e1c4e18d6dc2413245b167a1b1f5106a8dfe13cd79fcbfc8e9afc95f4"} Mar 20 15:53:25 crc kubenswrapper[3552]: I0320 15:53:25.689726 3552 generic.go:334] "Generic (PLEG): container finished" podID="58197235-68b0-45dd-9df3-6825c76c4df8" containerID="a85302128f82babdc16853aa64e420893bd404eeb7b3e4dd1981557a6570ca6e" exitCode=0 Mar 20 15:53:25 crc kubenswrapper[3552]: I0320 15:53:25.689747 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58197235-68b0-45dd-9df3-6825c76c4df8","Type":"ContainerDied","Data":"a85302128f82babdc16853aa64e420893bd404eeb7b3e4dd1981557a6570ca6e"} Mar 20 15:53:26 crc kubenswrapper[3552]: I0320 15:53:26.698981 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f71c29cd-5055-41bb-b3f8-6183a9be2b7f","Type":"ContainerStarted","Data":"a684e2d6894b18becde6cc30827f9a544e4ac79642f81271a285cdcb7848f34a"} Mar 20 15:53:26 crc kubenswrapper[3552]: I0320 15:53:26.701765 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58197235-68b0-45dd-9df3-6825c76c4df8","Type":"ContainerStarted","Data":"f042f47673d208f3e365eb105340dadf89aee4ef4b66829f22b88e94f13fa5f8"} Mar 20 15:53:26 crc kubenswrapper[3552]: I0320 15:53:26.726232 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.726191718 podStartE2EDuration="36.726191718s" podCreationTimestamp="2026-03-20 15:52:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:53:26.719802828 +0000 UTC m=+1706.413499668" watchObservedRunningTime="2026-03-20 15:53:26.726191718 +0000 UTC m=+1706.419888548" Mar 20 15:53:26 crc kubenswrapper[3552]: I0320 15:53:26.746640 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.746598253 podStartE2EDuration="37.746598253s" podCreationTimestamp="2026-03-20 15:52:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 15:53:26.742623997 +0000 UTC m=+1706.436320837" watchObservedRunningTime="2026-03-20 15:53:26.746598253 +0000 UTC m=+1706.440295083" Mar 20 15:53:28 crc kubenswrapper[3552]: I0320 15:53:28.431565 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:53:28 crc kubenswrapper[3552]: E0320 15:53:28.433343 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:53:30 crc kubenswrapper[3552]: I0320 15:53:30.301086 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Mar 20 15:53:30 crc kubenswrapper[3552]: I0320 15:53:30.758323 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.757725 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk"] Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.758344 3552 topology_manager.go:215] "Topology Admit Handler" podUID="fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" podNamespace="openstack" podName="repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: E0320 15:53:36.758621 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" containerName="dnsmasq-dns" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.758639 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" containerName="dnsmasq-dns" Mar 20 15:53:36 crc kubenswrapper[3552]: E0320 15:53:36.758662 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="39f1f950-065d-4899-a254-363f2279cde1" containerName="dnsmasq-dns" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.758671 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="39f1f950-065d-4899-a254-363f2279cde1" containerName="dnsmasq-dns" Mar 20 15:53:36 crc kubenswrapper[3552]: E0320 15:53:36.758700 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" containerName="init" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.758711 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" containerName="init" Mar 20 15:53:36 crc kubenswrapper[3552]: E0320 15:53:36.758721 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="39f1f950-065d-4899-a254-363f2279cde1" containerName="init" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.758728 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="39f1f950-065d-4899-a254-363f2279cde1" containerName="init" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.758972 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="39f1f950-065d-4899-a254-363f2279cde1" containerName="dnsmasq-dns" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.759000 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f0e3469-698d-424b-b4de-142a61f90b11" containerName="dnsmasq-dns" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.759766 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.771273 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.771273 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.771594 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.772864 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.774492 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk"] Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.817197 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.817302 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.817428 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqqq6\" (UniqueName: \"kubernetes.io/projected/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-kube-api-access-kqqq6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.817480 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.918898 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.918990 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.919035 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kqqq6\" (UniqueName: \"kubernetes.io/projected/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-kube-api-access-kqqq6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.919071 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.925318 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.926005 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.926423 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:36 crc kubenswrapper[3552]: I0320 15:53:36.937323 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqqq6\" (UniqueName: \"kubernetes.io/projected/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-kube-api-access-kqqq6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:37 crc kubenswrapper[3552]: I0320 15:53:37.080247 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:53:37 crc kubenswrapper[3552]: I0320 15:53:37.471040 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk"] Mar 20 15:53:37 crc kubenswrapper[3552]: I0320 15:53:37.780463 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" event={"ID":"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2","Type":"ContainerStarted","Data":"1c4075ca3717e118225a8238316087dc0fe4aa36582338c983f966800d357a37"} Mar 20 15:53:40 crc kubenswrapper[3552]: I0320 15:53:40.303679 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Mar 20 15:53:40 crc kubenswrapper[3552]: I0320 15:53:40.760994 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Mar 20 15:53:43 crc kubenswrapper[3552]: I0320 15:53:43.431893 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:53:43 crc kubenswrapper[3552]: E0320 15:53:43.433126 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:53:49 crc kubenswrapper[3552]: I0320 15:53:49.887961 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" event={"ID":"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2","Type":"ContainerStarted","Data":"90beefb0a636fcc3b2e7d1111396ced65d731e924113092fa5b7252cef1931fb"} Mar 20 15:53:49 crc kubenswrapper[3552]: I0320 15:53:49.922863 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" podStartSLOduration=2.47502801 podStartE2EDuration="13.922784159s" podCreationTimestamp="2026-03-20 15:53:36 +0000 UTC" firstStartedPulling="2026-03-20 15:53:37.479005006 +0000 UTC m=+1717.172701846" lastFinishedPulling="2026-03-20 15:53:48.926761165 +0000 UTC m=+1728.620457995" observedRunningTime="2026-03-20 15:53:49.903581366 +0000 UTC m=+1729.597278216" watchObservedRunningTime="2026-03-20 15:53:49.922784159 +0000 UTC m=+1729.616481039" Mar 20 15:53:57 crc kubenswrapper[3552]: I0320 15:53:57.430750 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:53:57 crc kubenswrapper[3552]: E0320 15:53:57.431801 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:54:00 crc kubenswrapper[3552]: I0320 15:54:00.986008 3552 generic.go:334] "Generic (PLEG): container finished" podID="fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" containerID="90beefb0a636fcc3b2e7d1111396ced65d731e924113092fa5b7252cef1931fb" exitCode=0 Mar 20 15:54:00 crc kubenswrapper[3552]: I0320 15:54:00.986082 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" event={"ID":"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2","Type":"ContainerDied","Data":"90beefb0a636fcc3b2e7d1111396ced65d731e924113092fa5b7252cef1931fb"} Mar 20 15:54:01 crc kubenswrapper[3552]: I0320 15:54:01.322696 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:54:01 crc kubenswrapper[3552]: I0320 15:54:01.323176 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:54:01 crc kubenswrapper[3552]: I0320 15:54:01.323220 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:54:01 crc kubenswrapper[3552]: I0320 15:54:01.323246 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:54:01 crc kubenswrapper[3552]: I0320 15:54:01.323280 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.423545 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.470590 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-inventory\") pod \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.470756 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-repo-setup-combined-ca-bundle\") pod \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.470788 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqqq6\" (UniqueName: \"kubernetes.io/projected/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-kube-api-access-kqqq6\") pod \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.470820 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-ssh-key-openstack-edpm-ipam\") pod \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\" (UID: \"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2\") " Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.485889 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-kube-api-access-kqqq6" (OuterVolumeSpecName: "kube-api-access-kqqq6") pod "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" (UID: "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2"). InnerVolumeSpecName "kube-api-access-kqqq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.486222 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" (UID: "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.504591 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-inventory" (OuterVolumeSpecName: "inventory") pod "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" (UID: "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.510718 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" (UID: "fd195b5e-d78c-40c1-8e30-e9f2464b2eb2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.572460 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.572496 3552 reconciler_common.go:300] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.572510 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kqqq6\" (UniqueName: \"kubernetes.io/projected/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-kube-api-access-kqqq6\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:02 crc kubenswrapper[3552]: I0320 15:54:02.572521 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fd195b5e-d78c-40c1-8e30-e9f2464b2eb2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.005489 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" event={"ID":"fd195b5e-d78c-40c1-8e30-e9f2464b2eb2","Type":"ContainerDied","Data":"1c4075ca3717e118225a8238316087dc0fe4aa36582338c983f966800d357a37"} Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.005522 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c4075ca3717e118225a8238316087dc0fe4aa36582338c983f966800d357a37" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.005557 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.093493 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2"] Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.094028 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0b3fef6a-f2e0-4b73-8254-f79a9dd63846" podNamespace="openstack" podName="redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: E0320 15:54:03.094292 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.094309 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.094545 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd195b5e-d78c-40c1-8e30-e9f2464b2eb2" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.095149 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.097926 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.098468 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.100044 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.101253 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.108595 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2"] Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.182708 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.182919 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.182975 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4h6s\" (UniqueName: \"kubernetes.io/projected/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-kube-api-access-w4h6s\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.284683 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.284760 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-w4h6s\" (UniqueName: \"kubernetes.io/projected/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-kube-api-access-w4h6s\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.284867 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.289696 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.290014 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.306264 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4h6s\" (UniqueName: \"kubernetes.io/projected/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-kube-api-access-w4h6s\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rdsr2\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.420784 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:03 crc kubenswrapper[3552]: I0320 15:54:03.989022 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2"] Mar 20 15:54:03 crc kubenswrapper[3552]: W0320 15:54:03.990851 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b3fef6a_f2e0_4b73_8254_f79a9dd63846.slice/crio-4174bdc77313ce846eb2aba668ff330b9705d080ec11b6d20e61cf4db72138ad WatchSource:0}: Error finding container 4174bdc77313ce846eb2aba668ff330b9705d080ec11b6d20e61cf4db72138ad: Status 404 returned error can't find the container with id 4174bdc77313ce846eb2aba668ff330b9705d080ec11b6d20e61cf4db72138ad Mar 20 15:54:04 crc kubenswrapper[3552]: I0320 15:54:04.017903 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" event={"ID":"0b3fef6a-f2e0-4b73-8254-f79a9dd63846","Type":"ContainerStarted","Data":"4174bdc77313ce846eb2aba668ff330b9705d080ec11b6d20e61cf4db72138ad"} Mar 20 15:54:05 crc kubenswrapper[3552]: I0320 15:54:05.037852 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" event={"ID":"0b3fef6a-f2e0-4b73-8254-f79a9dd63846","Type":"ContainerStarted","Data":"f09500ac3f8dfba30580013d40f266d08978482ef40dd0b92494af83a88c5e1f"} Mar 20 15:54:05 crc kubenswrapper[3552]: I0320 15:54:05.062624 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" podStartSLOduration=1.734755486 podStartE2EDuration="2.062576222s" podCreationTimestamp="2026-03-20 15:54:03 +0000 UTC" firstStartedPulling="2026-03-20 15:54:03.993669933 +0000 UTC m=+1743.687366793" lastFinishedPulling="2026-03-20 15:54:04.321490659 +0000 UTC m=+1744.015187529" observedRunningTime="2026-03-20 15:54:05.059299435 +0000 UTC m=+1744.752996325" watchObservedRunningTime="2026-03-20 15:54:05.062576222 +0000 UTC m=+1744.756273072" Mar 20 15:54:08 crc kubenswrapper[3552]: I0320 15:54:08.083670 3552 generic.go:334] "Generic (PLEG): container finished" podID="0b3fef6a-f2e0-4b73-8254-f79a9dd63846" containerID="f09500ac3f8dfba30580013d40f266d08978482ef40dd0b92494af83a88c5e1f" exitCode=0 Mar 20 15:54:08 crc kubenswrapper[3552]: I0320 15:54:08.083824 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" event={"ID":"0b3fef6a-f2e0-4b73-8254-f79a9dd63846","Type":"ContainerDied","Data":"f09500ac3f8dfba30580013d40f266d08978482ef40dd0b92494af83a88c5e1f"} Mar 20 15:54:08 crc kubenswrapper[3552]: I0320 15:54:08.431702 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:54:08 crc kubenswrapper[3552]: E0320 15:54:08.432176 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.450538 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.615965 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4h6s\" (UniqueName: \"kubernetes.io/projected/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-kube-api-access-w4h6s\") pod \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.616018 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-inventory\") pod \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.616124 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-ssh-key-openstack-edpm-ipam\") pod \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\" (UID: \"0b3fef6a-f2e0-4b73-8254-f79a9dd63846\") " Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.626141 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-kube-api-access-w4h6s" (OuterVolumeSpecName: "kube-api-access-w4h6s") pod "0b3fef6a-f2e0-4b73-8254-f79a9dd63846" (UID: "0b3fef6a-f2e0-4b73-8254-f79a9dd63846"). InnerVolumeSpecName "kube-api-access-w4h6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.650604 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0b3fef6a-f2e0-4b73-8254-f79a9dd63846" (UID: "0b3fef6a-f2e0-4b73-8254-f79a9dd63846"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.651593 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-inventory" (OuterVolumeSpecName: "inventory") pod "0b3fef6a-f2e0-4b73-8254-f79a9dd63846" (UID: "0b3fef6a-f2e0-4b73-8254-f79a9dd63846"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.718261 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-w4h6s\" (UniqueName: \"kubernetes.io/projected/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-kube-api-access-w4h6s\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.718296 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:09 crc kubenswrapper[3552]: I0320 15:54:09.718310 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b3fef6a-f2e0-4b73-8254-f79a9dd63846-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.104980 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" event={"ID":"0b3fef6a-f2e0-4b73-8254-f79a9dd63846","Type":"ContainerDied","Data":"4174bdc77313ce846eb2aba668ff330b9705d080ec11b6d20e61cf4db72138ad"} Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.105027 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4174bdc77313ce846eb2aba668ff330b9705d080ec11b6d20e61cf4db72138ad" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.105084 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rdsr2" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.227383 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn"] Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.227630 3552 topology_manager.go:215] "Topology Admit Handler" podUID="91e771c4-0b23-4802-ba94-e2596a5b688f" podNamespace="openstack" podName="bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: E0320 15:54:10.228006 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0b3fef6a-f2e0-4b73-8254-f79a9dd63846" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.228031 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3fef6a-f2e0-4b73-8254-f79a9dd63846" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.228304 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b3fef6a-f2e0-4b73-8254-f79a9dd63846" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.229219 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.232103 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.232240 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.234469 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.234468 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.248722 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn"] Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.329531 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.329707 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgd6t\" (UniqueName: \"kubernetes.io/projected/91e771c4-0b23-4802-ba94-e2596a5b688f-kube-api-access-rgd6t\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.329777 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.329824 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.431633 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rgd6t\" (UniqueName: \"kubernetes.io/projected/91e771c4-0b23-4802-ba94-e2596a5b688f-kube-api-access-rgd6t\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.431690 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.431717 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.431821 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.436063 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.436248 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.436587 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.455380 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgd6t\" (UniqueName: \"kubernetes.io/projected/91e771c4-0b23-4802-ba94-e2596a5b688f-kube-api-access-rgd6t\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:10 crc kubenswrapper[3552]: I0320 15:54:10.551378 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:54:11 crc kubenswrapper[3552]: I0320 15:54:11.160493 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn"] Mar 20 15:54:12 crc kubenswrapper[3552]: I0320 15:54:12.123887 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" event={"ID":"91e771c4-0b23-4802-ba94-e2596a5b688f","Type":"ContainerStarted","Data":"ee5ee36e4aaa1df25d54900c0f751589135d5c20f06f5f3220e7691e323f006b"} Mar 20 15:54:12 crc kubenswrapper[3552]: I0320 15:54:12.124196 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" event={"ID":"91e771c4-0b23-4802-ba94-e2596a5b688f","Type":"ContainerStarted","Data":"3f34dc0251db70da4b8a580463687301b76214afec051c9b0af00f364aad26ef"} Mar 20 15:54:12 crc kubenswrapper[3552]: I0320 15:54:12.140335 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" podStartSLOduration=1.582233717 podStartE2EDuration="2.14028051s" podCreationTimestamp="2026-03-20 15:54:10 +0000 UTC" firstStartedPulling="2026-03-20 15:54:11.17042921 +0000 UTC m=+1750.864126060" lastFinishedPulling="2026-03-20 15:54:11.728476033 +0000 UTC m=+1751.422172853" observedRunningTime="2026-03-20 15:54:12.137399153 +0000 UTC m=+1751.831095983" watchObservedRunningTime="2026-03-20 15:54:12.14028051 +0000 UTC m=+1751.833977340" Mar 20 15:54:23 crc kubenswrapper[3552]: I0320 15:54:23.095169 3552 scope.go:117] "RemoveContainer" containerID="f341dabebe9ed0ad5bb2ee9a8d89a89629dbfcc253e54f96f0383b88efadd11e" Mar 20 15:54:23 crc kubenswrapper[3552]: I0320 15:54:23.439768 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:54:23 crc kubenswrapper[3552]: E0320 15:54:23.441125 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:54:38 crc kubenswrapper[3552]: I0320 15:54:38.430651 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:54:38 crc kubenswrapper[3552]: E0320 15:54:38.432048 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:54:51 crc kubenswrapper[3552]: I0320 15:54:51.441107 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:54:51 crc kubenswrapper[3552]: E0320 15:54:51.442983 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:55:01 crc kubenswrapper[3552]: I0320 15:55:01.323656 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:55:01 crc kubenswrapper[3552]: I0320 15:55:01.324342 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:55:01 crc kubenswrapper[3552]: I0320 15:55:01.324385 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:55:01 crc kubenswrapper[3552]: I0320 15:55:01.324493 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:55:01 crc kubenswrapper[3552]: I0320 15:55:01.324520 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:55:02 crc kubenswrapper[3552]: I0320 15:55:02.430290 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:55:02 crc kubenswrapper[3552]: E0320 15:55:02.431202 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:55:16 crc kubenswrapper[3552]: I0320 15:55:16.431633 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:55:16 crc kubenswrapper[3552]: E0320 15:55:16.432736 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:55:23 crc kubenswrapper[3552]: I0320 15:55:23.177490 3552 scope.go:117] "RemoveContainer" containerID="faa13e4909c7f27c43f26d7faf1ff882bc40db36a83156367863990e5c4ad153" Mar 20 15:55:23 crc kubenswrapper[3552]: I0320 15:55:23.232325 3552 scope.go:117] "RemoveContainer" containerID="8f5c9b67b5f5b8e7408be1dd5ce4379ec13dcd233c3c719e9c200b2a858b8642" Mar 20 15:55:29 crc kubenswrapper[3552]: I0320 15:55:29.431178 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:55:29 crc kubenswrapper[3552]: E0320 15:55:29.432349 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:55:40 crc kubenswrapper[3552]: I0320 15:55:40.430185 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:55:40 crc kubenswrapper[3552]: E0320 15:55:40.431199 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:55:51 crc kubenswrapper[3552]: I0320 15:55:51.434291 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:55:51 crc kubenswrapper[3552]: E0320 15:55:51.436939 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:56:01 crc kubenswrapper[3552]: I0320 15:56:01.326189 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:56:01 crc kubenswrapper[3552]: I0320 15:56:01.326770 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:56:01 crc kubenswrapper[3552]: I0320 15:56:01.326802 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:56:01 crc kubenswrapper[3552]: I0320 15:56:01.326822 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:56:01 crc kubenswrapper[3552]: I0320 15:56:01.326864 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:56:05 crc kubenswrapper[3552]: I0320 15:56:05.431581 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:56:05 crc kubenswrapper[3552]: E0320 15:56:05.432801 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:56:19 crc kubenswrapper[3552]: I0320 15:56:19.430270 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:56:19 crc kubenswrapper[3552]: E0320 15:56:19.431504 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:56:23 crc kubenswrapper[3552]: I0320 15:56:23.324602 3552 scope.go:117] "RemoveContainer" containerID="36a8e5daffc0ba2fa859692b09141c1230ad5a69794a2cf64c593a5c1b19ed49" Mar 20 15:56:23 crc kubenswrapper[3552]: I0320 15:56:23.409597 3552 scope.go:117] "RemoveContainer" containerID="5f8c3cf550d8ef34d533693e0dcd2e0040bcd974fa417f0715cb8b48f9906109" Mar 20 15:56:23 crc kubenswrapper[3552]: I0320 15:56:23.448670 3552 scope.go:117] "RemoveContainer" containerID="2b8a5936718b6d1697b1ebff9cc7c18b94d099d30df4be65ef9bfd16940e8bca" Mar 20 15:56:23 crc kubenswrapper[3552]: I0320 15:56:23.489953 3552 scope.go:117] "RemoveContainer" containerID="50dae54f02da9e300e6d56f253f319e2a6dd546bc3994a5b206177495ad97a7e" Mar 20 15:56:33 crc kubenswrapper[3552]: I0320 15:56:33.076723 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-4t82z"] Mar 20 15:56:33 crc kubenswrapper[3552]: I0320 15:56:33.087198 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-6cf8-account-create-update-n8mhq"] Mar 20 15:56:33 crc kubenswrapper[3552]: I0320 15:56:33.094973 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-4t82z"] Mar 20 15:56:33 crc kubenswrapper[3552]: I0320 15:56:33.102602 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-6cf8-account-create-update-n8mhq"] Mar 20 15:56:33 crc kubenswrapper[3552]: I0320 15:56:33.443938 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3121cfff-17ad-42d7-b737-f68b3de66a9f" path="/var/lib/kubelet/pods/3121cfff-17ad-42d7-b737-f68b3de66a9f/volumes" Mar 20 15:56:33 crc kubenswrapper[3552]: I0320 15:56:33.447029 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dc74a35-3b06-4f41-9684-f35bbdd1078b" path="/var/lib/kubelet/pods/8dc74a35-3b06-4f41-9684-f35bbdd1078b/volumes" Mar 20 15:56:34 crc kubenswrapper[3552]: I0320 15:56:34.430643 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:56:34 crc kubenswrapper[3552]: E0320 15:56:34.431697 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:56:45 crc kubenswrapper[3552]: I0320 15:56:45.062971 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-ll6vw"] Mar 20 15:56:45 crc kubenswrapper[3552]: I0320 15:56:45.077346 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/keystone-2002-account-create-update-mnjhb"] Mar 20 15:56:45 crc kubenswrapper[3552]: I0320 15:56:45.088736 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-2002-account-create-update-mnjhb"] Mar 20 15:56:45 crc kubenswrapper[3552]: I0320 15:56:45.098711 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-ll6vw"] Mar 20 15:56:45 crc kubenswrapper[3552]: I0320 15:56:45.458220 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e0924e3-d91e-44d3-a060-0e400a85cfc9" path="/var/lib/kubelet/pods/3e0924e3-d91e-44d3-a060-0e400a85cfc9/volumes" Mar 20 15:56:45 crc kubenswrapper[3552]: I0320 15:56:45.460578 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55543b44-cb0a-4ffe-997e-d94d126eb91b" path="/var/lib/kubelet/pods/55543b44-cb0a-4ffe-997e-d94d126eb91b/volumes" Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.041340 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-zxrpw"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.057620 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-c3b1-account-create-update-2ngjl"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.068366 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/placement-449e-account-create-update-r8fxj"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.075924 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-84nn5"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.082937 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-zxrpw"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.090845 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/placement-449e-account-create-update-r8fxj"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.097854 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-c3b1-account-create-update-2ngjl"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.104741 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-84nn5"] Mar 20 15:56:46 crc kubenswrapper[3552]: I0320 15:56:46.431016 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:56:46 crc kubenswrapper[3552]: E0320 15:56:46.431576 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:56:47 crc kubenswrapper[3552]: I0320 15:56:47.441011 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d13a1a3-677a-43ea-bb63-305d67463ec7" path="/var/lib/kubelet/pods/6d13a1a3-677a-43ea-bb63-305d67463ec7/volumes" Mar 20 15:56:47 crc kubenswrapper[3552]: I0320 15:56:47.443772 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8da21953-3ce2-4d5a-abab-6bf0fc7f16aa" path="/var/lib/kubelet/pods/8da21953-3ce2-4d5a-abab-6bf0fc7f16aa/volumes" Mar 20 15:56:47 crc kubenswrapper[3552]: I0320 15:56:47.446452 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa27c1f0-656c-4972-aeb9-8279bd8cc6da" path="/var/lib/kubelet/pods/aa27c1f0-656c-4972-aeb9-8279bd8cc6da/volumes" Mar 20 15:56:47 crc kubenswrapper[3552]: I0320 15:56:47.448332 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c56721e1-6189-4d50-bb51-cdf6a0f163a8" path="/var/lib/kubelet/pods/c56721e1-6189-4d50-bb51-cdf6a0f163a8/volumes" Mar 20 15:56:51 crc kubenswrapper[3552]: I0320 15:56:51.049436 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-q4mwz"] Mar 20 15:56:51 crc kubenswrapper[3552]: I0320 15:56:51.064702 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-q4mwz"] Mar 20 15:56:51 crc kubenswrapper[3552]: I0320 15:56:51.452211 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22af4aca-4214-48f2-9741-44a70d1c7245" path="/var/lib/kubelet/pods/22af4aca-4214-48f2-9741-44a70d1c7245/volumes" Mar 20 15:57:01 crc kubenswrapper[3552]: I0320 15:57:01.327160 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:57:01 crc kubenswrapper[3552]: I0320 15:57:01.327705 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:57:01 crc kubenswrapper[3552]: I0320 15:57:01.327731 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:57:01 crc kubenswrapper[3552]: I0320 15:57:01.327777 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:57:01 crc kubenswrapper[3552]: I0320 15:57:01.327815 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:57:01 crc kubenswrapper[3552]: I0320 15:57:01.435865 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:57:01 crc kubenswrapper[3552]: E0320 15:57:01.436563 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:57:12 crc kubenswrapper[3552]: I0320 15:57:12.053648 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-1ec3-account-create-update-q4lwl"] Mar 20 15:57:12 crc kubenswrapper[3552]: I0320 15:57:12.068687 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-tpfhd"] Mar 20 15:57:12 crc kubenswrapper[3552]: I0320 15:57:12.076230 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-tpfhd"] Mar 20 15:57:12 crc kubenswrapper[3552]: I0320 15:57:12.085154 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-1ec3-account-create-update-q4lwl"] Mar 20 15:57:13 crc kubenswrapper[3552]: I0320 15:57:13.445527 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="682a86ca-28d3-4309-95a2-80458ddd5e31" path="/var/lib/kubelet/pods/682a86ca-28d3-4309-95a2-80458ddd5e31/volumes" Mar 20 15:57:13 crc kubenswrapper[3552]: I0320 15:57:13.450439 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4a0b1bc-7138-4e84-b566-86bbe003cd2c" path="/var/lib/kubelet/pods/c4a0b1bc-7138-4e84-b566-86bbe003cd2c/volumes" Mar 20 15:57:15 crc kubenswrapper[3552]: I0320 15:57:15.435809 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:57:15 crc kubenswrapper[3552]: E0320 15:57:15.437002 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:57:16 crc kubenswrapper[3552]: I0320 15:57:16.033356 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-eb5f-account-create-update-x72h6"] Mar 20 15:57:16 crc kubenswrapper[3552]: I0320 15:57:16.044886 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-eb5f-account-create-update-x72h6"] Mar 20 15:57:17 crc kubenswrapper[3552]: I0320 15:57:17.442496 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="823cdc27-b1a5-4032-8081-3d0466e6a40c" path="/var/lib/kubelet/pods/823cdc27-b1a5-4032-8081-3d0466e6a40c/volumes" Mar 20 15:57:20 crc kubenswrapper[3552]: I0320 15:57:20.033769 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/barbican-2e8b-account-create-update-nl2l7"] Mar 20 15:57:20 crc kubenswrapper[3552]: I0320 15:57:20.048487 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-2e8b-account-create-update-nl2l7"] Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.034748 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-t949z"] Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.048215 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-h9hxv"] Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.057822 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-t949z"] Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.067089 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-h9hxv"] Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.449729 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12bbf46a-6ef5-49fb-98c2-dd0e48affa63" path="/var/lib/kubelet/pods/12bbf46a-6ef5-49fb-98c2-dd0e48affa63/volumes" Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.455465 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97cb48d9-1b5c-45e8-b8e7-a30354047d3d" path="/var/lib/kubelet/pods/97cb48d9-1b5c-45e8-b8e7-a30354047d3d/volumes" Mar 20 15:57:21 crc kubenswrapper[3552]: I0320 15:57:21.458857 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c88f4219-6cb4-4fd9-8414-239ef9a7c25e" path="/var/lib/kubelet/pods/c88f4219-6cb4-4fd9-8414-239ef9a7c25e/volumes" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.601690 3552 scope.go:117] "RemoveContainer" containerID="23a3d250d223953e770ae5ebd80c62e8fd019941008d2385e4dd5cc15af2210c" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.645610 3552 scope.go:117] "RemoveContainer" containerID="95e6f9f09b1adf70685ba8b0dc1c4bd430acf4fbbca2999e959ea6783954846e" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.701080 3552 scope.go:117] "RemoveContainer" containerID="309a7f8f52133ac9bff7c70cd77d3902694211b4c28b187f526dd5afc5ed21d0" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.752898 3552 scope.go:117] "RemoveContainer" containerID="b426825943a654d5621b1a0be0a6b9382b87b477a65f0d8db585fd124aa352c5" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.781467 3552 scope.go:117] "RemoveContainer" containerID="e2ed61726af561dafe4831dea479f72e5df2afcdc85ff550a691541feb211048" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.821329 3552 scope.go:117] "RemoveContainer" containerID="167b9625a6956f61ebbc3f161ae18e1c62999da2959da536f4d12cf7af6fc059" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.867144 3552 scope.go:117] "RemoveContainer" containerID="bb0582e5adc02085eb8cf47852e5e9da42051e43f99309362e55cb9d24f7a99c" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.915335 3552 scope.go:117] "RemoveContainer" containerID="367b7a6fb761a5ffbd64b3e9ac3b844b1614ca34094a51b4f7b87ef7d33edec4" Mar 20 15:57:23 crc kubenswrapper[3552]: I0320 15:57:23.963468 3552 scope.go:117] "RemoveContainer" containerID="0791022f3fc145812764d5cb666c1b28d78717f628f50c5bcf062b59a20e6eae" Mar 20 15:57:24 crc kubenswrapper[3552]: I0320 15:57:24.001264 3552 scope.go:117] "RemoveContainer" containerID="dd9ba912db86217994c5b6b924300df66f8da27b4c706444731209518cf43011" Mar 20 15:57:24 crc kubenswrapper[3552]: I0320 15:57:24.054102 3552 scope.go:117] "RemoveContainer" containerID="7055113e467cd08a20577eb913ad8a320e9a8311e69fbe9f950325829d9b540c" Mar 20 15:57:24 crc kubenswrapper[3552]: I0320 15:57:24.081225 3552 scope.go:117] "RemoveContainer" containerID="e691b528c59ae7f167e883811d6b8c8a0d8c23280fa9cff74dd75230c5a589d1" Mar 20 15:57:24 crc kubenswrapper[3552]: I0320 15:57:24.111600 3552 scope.go:117] "RemoveContainer" containerID="7722e7a3f95ddabd6ae756fa839f757e13141bfa04a46738a3449bc3bcd308bb" Mar 20 15:57:24 crc kubenswrapper[3552]: I0320 15:57:24.186795 3552 scope.go:117] "RemoveContainer" containerID="3126c7c8e97ac4e36fef9be8bc5a4791ac35fcecdb186a2b5a9d88c92059776e" Mar 20 15:57:24 crc kubenswrapper[3552]: I0320 15:57:24.238684 3552 scope.go:117] "RemoveContainer" containerID="bed04f0fd40a44758c7f282f8f3c568253168401c10ced2a33beedb681dc067e" Mar 20 15:57:28 crc kubenswrapper[3552]: I0320 15:57:28.048814 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-4ntxp"] Mar 20 15:57:28 crc kubenswrapper[3552]: I0320 15:57:28.058182 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-4ntxp"] Mar 20 15:57:28 crc kubenswrapper[3552]: I0320 15:57:28.432332 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:57:28 crc kubenswrapper[3552]: E0320 15:57:28.434035 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:57:29 crc kubenswrapper[3552]: I0320 15:57:29.441523 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e04aaab3-651f-4523-ab35-250a33f54f4d" path="/var/lib/kubelet/pods/e04aaab3-651f-4523-ab35-250a33f54f4d/volumes" Mar 20 15:57:31 crc kubenswrapper[3552]: I0320 15:57:31.564364 3552 generic.go:334] "Generic (PLEG): container finished" podID="91e771c4-0b23-4802-ba94-e2596a5b688f" containerID="ee5ee36e4aaa1df25d54900c0f751589135d5c20f06f5f3220e7691e323f006b" exitCode=0 Mar 20 15:57:31 crc kubenswrapper[3552]: I0320 15:57:31.564472 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" event={"ID":"91e771c4-0b23-4802-ba94-e2596a5b688f","Type":"ContainerDied","Data":"ee5ee36e4aaa1df25d54900c0f751589135d5c20f06f5f3220e7691e323f006b"} Mar 20 15:57:32 crc kubenswrapper[3552]: I0320 15:57:32.900624 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.007546 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-bootstrap-combined-ca-bundle\") pod \"91e771c4-0b23-4802-ba94-e2596a5b688f\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.007622 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-ssh-key-openstack-edpm-ipam\") pod \"91e771c4-0b23-4802-ba94-e2596a5b688f\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.007773 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-inventory\") pod \"91e771c4-0b23-4802-ba94-e2596a5b688f\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.007824 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgd6t\" (UniqueName: \"kubernetes.io/projected/91e771c4-0b23-4802-ba94-e2596a5b688f-kube-api-access-rgd6t\") pod \"91e771c4-0b23-4802-ba94-e2596a5b688f\" (UID: \"91e771c4-0b23-4802-ba94-e2596a5b688f\") " Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.016266 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "91e771c4-0b23-4802-ba94-e2596a5b688f" (UID: "91e771c4-0b23-4802-ba94-e2596a5b688f"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.016338 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91e771c4-0b23-4802-ba94-e2596a5b688f-kube-api-access-rgd6t" (OuterVolumeSpecName: "kube-api-access-rgd6t") pod "91e771c4-0b23-4802-ba94-e2596a5b688f" (UID: "91e771c4-0b23-4802-ba94-e2596a5b688f"). InnerVolumeSpecName "kube-api-access-rgd6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.039788 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-inventory" (OuterVolumeSpecName: "inventory") pod "91e771c4-0b23-4802-ba94-e2596a5b688f" (UID: "91e771c4-0b23-4802-ba94-e2596a5b688f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.046933 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "91e771c4-0b23-4802-ba94-e2596a5b688f" (UID: "91e771c4-0b23-4802-ba94-e2596a5b688f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.110118 3552 reconciler_common.go:300] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.110169 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.110185 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91e771c4-0b23-4802-ba94-e2596a5b688f-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.110200 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rgd6t\" (UniqueName: \"kubernetes.io/projected/91e771c4-0b23-4802-ba94-e2596a5b688f-kube-api-access-rgd6t\") on node \"crc\" DevicePath \"\"" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.583252 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" event={"ID":"91e771c4-0b23-4802-ba94-e2596a5b688f","Type":"ContainerDied","Data":"3f34dc0251db70da4b8a580463687301b76214afec051c9b0af00f364aad26ef"} Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.583286 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f34dc0251db70da4b8a580463687301b76214afec051c9b0af00f364aad26ef" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.583283 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.690436 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz"] Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.690604 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" podNamespace="openstack" podName="download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: E0320 15:57:33.690840 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="91e771c4-0b23-4802-ba94-e2596a5b688f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.690853 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="91e771c4-0b23-4802-ba94-e2596a5b688f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.691040 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="91e771c4-0b23-4802-ba94-e2596a5b688f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.691677 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.694570 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.695699 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.696463 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.696853 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.708116 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz"] Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.826932 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ckst\" (UniqueName: \"kubernetes.io/projected/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-kube-api-access-5ckst\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.827034 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.827289 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.928945 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.929046 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.929156 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-5ckst\" (UniqueName: \"kubernetes.io/projected/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-kube-api-access-5ckst\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.934838 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.935220 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:33 crc kubenswrapper[3552]: I0320 15:57:33.947114 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ckst\" (UniqueName: \"kubernetes.io/projected/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-kube-api-access-5ckst\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:34 crc kubenswrapper[3552]: I0320 15:57:34.007831 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:57:34 crc kubenswrapper[3552]: I0320 15:57:34.556753 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz"] Mar 20 15:57:34 crc kubenswrapper[3552]: I0320 15:57:34.558224 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 15:57:34 crc kubenswrapper[3552]: I0320 15:57:34.590027 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" event={"ID":"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89","Type":"ContainerStarted","Data":"b0204d9dc02e9ba206b002f4a5a1bdaf95846d30cb3e8d389650a746bc7fa35d"} Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.028424 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-77zr7"] Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.051044 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-rnz7c"] Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.060018 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-77zr7"] Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.070646 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-rnz7c"] Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.481283 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="148d3ae9-f212-412a-b4a3-95c60681e8e3" path="/var/lib/kubelet/pods/148d3ae9-f212-412a-b4a3-95c60681e8e3/volumes" Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.483581 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8f364e0-b915-4bdc-969a-469119a78a2d" path="/var/lib/kubelet/pods/c8f364e0-b915-4bdc-969a-469119a78a2d/volumes" Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.603896 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" event={"ID":"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89","Type":"ContainerStarted","Data":"f2021211a163a0ed7358502b9ad931a842a836fe60e1bba629123c99ba99ef42"} Mar 20 15:57:35 crc kubenswrapper[3552]: I0320 15:57:35.638385 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" podStartSLOduration=2.312014309 podStartE2EDuration="2.638322783s" podCreationTimestamp="2026-03-20 15:57:33 +0000 UTC" firstStartedPulling="2026-03-20 15:57:34.558046373 +0000 UTC m=+1954.251743193" lastFinishedPulling="2026-03-20 15:57:34.884354837 +0000 UTC m=+1954.578051667" observedRunningTime="2026-03-20 15:57:35.634646665 +0000 UTC m=+1955.328343495" watchObservedRunningTime="2026-03-20 15:57:35.638322783 +0000 UTC m=+1955.332019623" Mar 20 15:57:39 crc kubenswrapper[3552]: I0320 15:57:39.430543 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:57:39 crc kubenswrapper[3552]: E0320 15:57:39.431631 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:57:54 crc kubenswrapper[3552]: I0320 15:57:54.430548 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:57:54 crc kubenswrapper[3552]: E0320 15:57:54.431852 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:58:01 crc kubenswrapper[3552]: I0320 15:58:01.328504 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:58:01 crc kubenswrapper[3552]: I0320 15:58:01.329184 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:58:01 crc kubenswrapper[3552]: I0320 15:58:01.329222 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:58:01 crc kubenswrapper[3552]: I0320 15:58:01.329251 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:58:01 crc kubenswrapper[3552]: I0320 15:58:01.329314 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:58:07 crc kubenswrapper[3552]: I0320 15:58:07.430802 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:58:07 crc kubenswrapper[3552]: E0320 15:58:07.432068 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 15:58:19 crc kubenswrapper[3552]: I0320 15:58:19.055660 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-mrt5x"] Mar 20 15:58:19 crc kubenswrapper[3552]: I0320 15:58:19.074534 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-mrt5x"] Mar 20 15:58:19 crc kubenswrapper[3552]: I0320 15:58:19.447678 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55759243-c923-4bbc-8693-f0c35e30f6a1" path="/var/lib/kubelet/pods/55759243-c923-4bbc-8693-f0c35e30f6a1/volumes" Mar 20 15:58:22 crc kubenswrapper[3552]: I0320 15:58:22.429879 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 15:58:23 crc kubenswrapper[3552]: I0320 15:58:23.030182 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"f0b09ae54bea2ff083e11bf57e9ad0b3e8c74d921c18b9a3d158e51bbcd5f477"} Mar 20 15:58:24 crc kubenswrapper[3552]: I0320 15:58:24.566880 3552 scope.go:117] "RemoveContainer" containerID="935c10c4456ee8e387170ebbb81b8702783462a33b6dfd6c23a114af7a4b915e" Mar 20 15:58:24 crc kubenswrapper[3552]: I0320 15:58:24.628388 3552 scope.go:117] "RemoveContainer" containerID="c75f2060be9c78c13e97a92cd5fce4452ab57bb77e0480c2be041bbe17f45e04" Mar 20 15:58:24 crc kubenswrapper[3552]: I0320 15:58:24.682596 3552 scope.go:117] "RemoveContainer" containerID="b894dcd0bf3e8492a78c9f1d16364c2e1221ed4fd909f679f92ec7792dfa6a8f" Mar 20 15:58:24 crc kubenswrapper[3552]: I0320 15:58:24.735590 3552 scope.go:117] "RemoveContainer" containerID="0d23070d42c5a0c319759363cf63f21a27f12d4aeb0f461bbcac501e2bbed392" Mar 20 15:58:29 crc kubenswrapper[3552]: I0320 15:58:29.033719 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-l664w"] Mar 20 15:58:29 crc kubenswrapper[3552]: I0320 15:58:29.042151 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-l664w"] Mar 20 15:58:29 crc kubenswrapper[3552]: I0320 15:58:29.449684 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dfce561-7659-4b99-8f83-02573b343f5e" path="/var/lib/kubelet/pods/9dfce561-7659-4b99-8f83-02573b343f5e/volumes" Mar 20 15:58:52 crc kubenswrapper[3552]: I0320 15:58:52.052630 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vbvt2"] Mar 20 15:58:52 crc kubenswrapper[3552]: I0320 15:58:52.063392 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vbvt2"] Mar 20 15:58:53 crc kubenswrapper[3552]: I0320 15:58:53.442130 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ae12b08-a678-4f33-8b7c-d23c6aca08fe" path="/var/lib/kubelet/pods/8ae12b08-a678-4f33-8b7c-d23c6aca08fe/volumes" Mar 20 15:58:56 crc kubenswrapper[3552]: I0320 15:58:56.054292 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-z758k"] Mar 20 15:58:56 crc kubenswrapper[3552]: I0320 15:58:56.065548 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-z758k"] Mar 20 15:58:57 crc kubenswrapper[3552]: I0320 15:58:57.442258 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="739e2036-5958-4ee3-9fe3-4734696fdc6a" path="/var/lib/kubelet/pods/739e2036-5958-4ee3-9fe3-4734696fdc6a/volumes" Mar 20 15:58:58 crc kubenswrapper[3552]: I0320 15:58:58.025788 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-dncfs"] Mar 20 15:58:58 crc kubenswrapper[3552]: I0320 15:58:58.035117 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-dncfs"] Mar 20 15:58:59 crc kubenswrapper[3552]: I0320 15:58:59.440907 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="625d76ad-7531-4be1-ab6d-769f15e6a7e5" path="/var/lib/kubelet/pods/625d76ad-7531-4be1-ab6d-769f15e6a7e5/volumes" Mar 20 15:59:01 crc kubenswrapper[3552]: I0320 15:59:01.329910 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 15:59:01 crc kubenswrapper[3552]: I0320 15:59:01.330530 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 15:59:01 crc kubenswrapper[3552]: I0320 15:59:01.330656 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 15:59:01 crc kubenswrapper[3552]: I0320 15:59:01.330760 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 15:59:01 crc kubenswrapper[3552]: I0320 15:59:01.330806 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 15:59:16 crc kubenswrapper[3552]: I0320 15:59:16.465836 3552 generic.go:334] "Generic (PLEG): container finished" podID="ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" containerID="f2021211a163a0ed7358502b9ad931a842a836fe60e1bba629123c99ba99ef42" exitCode=0 Mar 20 15:59:16 crc kubenswrapper[3552]: I0320 15:59:16.465928 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" event={"ID":"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89","Type":"ContainerDied","Data":"f2021211a163a0ed7358502b9ad931a842a836fe60e1bba629123c99ba99ef42"} Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.782430 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.853944 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-ssh-key-openstack-edpm-ipam\") pod \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.854166 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ckst\" (UniqueName: \"kubernetes.io/projected/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-kube-api-access-5ckst\") pod \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.854247 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-inventory\") pod \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\" (UID: \"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89\") " Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.859352 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-kube-api-access-5ckst" (OuterVolumeSpecName: "kube-api-access-5ckst") pod "ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" (UID: "ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89"). InnerVolumeSpecName "kube-api-access-5ckst". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.880280 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-inventory" (OuterVolumeSpecName: "inventory") pod "ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" (UID: "ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.882321 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" (UID: "ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.957139 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.957188 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-5ckst\" (UniqueName: \"kubernetes.io/projected/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-kube-api-access-5ckst\") on node \"crc\" DevicePath \"\"" Mar 20 15:59:17 crc kubenswrapper[3552]: I0320 15:59:17.957212 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.482938 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" event={"ID":"ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89","Type":"ContainerDied","Data":"b0204d9dc02e9ba206b002f4a5a1bdaf95846d30cb3e8d389650a746bc7fa35d"} Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.483260 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0204d9dc02e9ba206b002f4a5a1bdaf95846d30cb3e8d389650a746bc7fa35d" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.483002 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.595977 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn"] Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.596233 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7f951458-ca42-4880-b3f2-faefb8cdbb2a" podNamespace="openstack" podName="configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: E0320 15:59:18.596741 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.596772 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.597161 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.598342 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.604174 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.604482 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.604898 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.605708 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.609548 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn"] Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.672115 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.672177 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.672281 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngjlb\" (UniqueName: \"kubernetes.io/projected/7f951458-ca42-4880-b3f2-faefb8cdbb2a-kube-api-access-ngjlb\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.774331 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.774441 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.774593 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-ngjlb\" (UniqueName: \"kubernetes.io/projected/7f951458-ca42-4880-b3f2-faefb8cdbb2a-kube-api-access-ngjlb\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.779627 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.781640 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.790960 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngjlb\" (UniqueName: \"kubernetes.io/projected/7f951458-ca42-4880-b3f2-faefb8cdbb2a-kube-api-access-ngjlb\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:18 crc kubenswrapper[3552]: I0320 15:59:18.943676 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 15:59:19 crc kubenswrapper[3552]: I0320 15:59:19.508690 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn"] Mar 20 15:59:20 crc kubenswrapper[3552]: I0320 15:59:20.510024 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" event={"ID":"7f951458-ca42-4880-b3f2-faefb8cdbb2a","Type":"ContainerStarted","Data":"3a67a5d8487c12af3220ca5ba721d9eac12a3a477daff207a0b2eec4f8583a51"} Mar 20 15:59:20 crc kubenswrapper[3552]: I0320 15:59:20.510432 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" event={"ID":"7f951458-ca42-4880-b3f2-faefb8cdbb2a","Type":"ContainerStarted","Data":"1798f69c9a3a76a67f9d12aa3f58f7dfd7a895af7a01a0ade6bfd54f4263c6ea"} Mar 20 15:59:20 crc kubenswrapper[3552]: I0320 15:59:20.534054 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" podStartSLOduration=2.218143769 podStartE2EDuration="2.534010796s" podCreationTimestamp="2026-03-20 15:59:18 +0000 UTC" firstStartedPulling="2026-03-20 15:59:19.521955554 +0000 UTC m=+2059.215652384" lastFinishedPulling="2026-03-20 15:59:19.837822581 +0000 UTC m=+2059.531519411" observedRunningTime="2026-03-20 15:59:20.525802268 +0000 UTC m=+2060.219499098" watchObservedRunningTime="2026-03-20 15:59:20.534010796 +0000 UTC m=+2060.227707616" Mar 20 15:59:24 crc kubenswrapper[3552]: I0320 15:59:24.852296 3552 scope.go:117] "RemoveContainer" containerID="60fd76bb1637a9197587ef589c085e368feed4145a6273d9e562f9fcd4d7a941" Mar 20 15:59:24 crc kubenswrapper[3552]: I0320 15:59:24.913010 3552 scope.go:117] "RemoveContainer" containerID="ac2a876e5126e33f5c59e7aff8f8769bc2d52e13ea353807ecd7442604348263" Mar 20 15:59:24 crc kubenswrapper[3552]: I0320 15:59:24.945926 3552 scope.go:117] "RemoveContainer" containerID="8ae763f3fbcc142bbe20ff962a7dae27043bae90a1b8f2a09bb3e5af65f18c80" Mar 20 15:59:25 crc kubenswrapper[3552]: I0320 15:59:25.013733 3552 scope.go:117] "RemoveContainer" containerID="593e5c4d98df339765f31ca5e81de748b1d378067e8cb0190f51b4c70ac56db9" Mar 20 15:59:25 crc kubenswrapper[3552]: I0320 15:59:25.086389 3552 scope.go:117] "RemoveContainer" containerID="f8494ea855028bd40e206f89694c62395409d373aa19726fdd6e6f3ee407ab65" Mar 20 15:59:25 crc kubenswrapper[3552]: I0320 15:59:25.142718 3552 scope.go:117] "RemoveContainer" containerID="fc7ef613e5c54c79c76c860042bbb85c17232e2259df2fe36b717b5f8c33eef7" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.309235 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pqkjk"] Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.310188 3552 topology_manager.go:215] "Topology Admit Handler" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" podNamespace="openshift-marketplace" podName="certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.312551 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.341045 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pqkjk"] Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.386324 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-catalog-content\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.386436 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5bwn\" (UniqueName: \"kubernetes.io/projected/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-kube-api-access-m5bwn\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.386535 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-utilities\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.488533 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-catalog-content\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.488582 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-m5bwn\" (UniqueName: \"kubernetes.io/projected/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-kube-api-access-m5bwn\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.488647 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-utilities\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.490972 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-catalog-content\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.491023 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-utilities\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.508111 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5bwn\" (UniqueName: \"kubernetes.io/projected/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-kube-api-access-m5bwn\") pod \"certified-operators-pqkjk\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:47 crc kubenswrapper[3552]: I0320 15:59:47.651079 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 15:59:48 crc kubenswrapper[3552]: I0320 15:59:48.087998 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pqkjk"] Mar 20 15:59:48 crc kubenswrapper[3552]: I0320 15:59:48.758756 3552 generic.go:334] "Generic (PLEG): container finished" podID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerID="67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b" exitCode=0 Mar 20 15:59:48 crc kubenswrapper[3552]: I0320 15:59:48.758832 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerDied","Data":"67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b"} Mar 20 15:59:48 crc kubenswrapper[3552]: I0320 15:59:48.758911 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerStarted","Data":"a99a30b1f25489fe0bfe5868c57604ad41bba09bdd0c3a8a56d37127ae1f8292"} Mar 20 15:59:49 crc kubenswrapper[3552]: I0320 15:59:49.766299 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerStarted","Data":"4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c"} Mar 20 15:59:56 crc kubenswrapper[3552]: I0320 15:59:56.826873 3552 generic.go:334] "Generic (PLEG): container finished" podID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerID="4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c" exitCode=0 Mar 20 15:59:56 crc kubenswrapper[3552]: I0320 15:59:56.826937 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerDied","Data":"4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c"} Mar 20 15:59:58 crc kubenswrapper[3552]: I0320 15:59:58.850128 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerStarted","Data":"d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23"} Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.143242 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pqkjk" podStartSLOduration=4.782042858 podStartE2EDuration="13.143189248s" podCreationTimestamp="2026-03-20 15:59:47 +0000 UTC" firstStartedPulling="2026-03-20 15:59:48.760998472 +0000 UTC m=+2088.454695302" lastFinishedPulling="2026-03-20 15:59:57.122144852 +0000 UTC m=+2096.815841692" observedRunningTime="2026-03-20 15:59:58.875493258 +0000 UTC m=+2098.569190148" watchObservedRunningTime="2026-03-20 16:00:00.143189248 +0000 UTC m=+2099.836886078" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.149441 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4"] Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.149647 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.151103 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.154429 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.154471 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.160210 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4"] Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.248074 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-secret-volume\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.248175 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-config-volume\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.248413 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58f8t\" (UniqueName: \"kubernetes.io/projected/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-kube-api-access-58f8t\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.350279 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-config-volume\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.350376 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-58f8t\" (UniqueName: \"kubernetes.io/projected/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-kube-api-access-58f8t\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.350546 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-secret-volume\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.351325 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-config-volume\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.357243 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-secret-volume\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.365751 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-58f8t\" (UniqueName: \"kubernetes.io/projected/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-kube-api-access-58f8t\") pod \"collect-profiles-29567040-27kr4\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.476389 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:00 crc kubenswrapper[3552]: I0320 16:00:00.943661 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4"] Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.337745 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.338233 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.338388 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.338617 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.338723 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.881837 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" event={"ID":"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2","Type":"ContainerStarted","Data":"c857bc69d61890ea5276f45fac8c4684e45fa41a051ffecccfced34dd846e99a"} Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.882156 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" event={"ID":"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2","Type":"ContainerStarted","Data":"c9fb0a0f984784317de1087b05285cdd809a7e6f90dcd3b25f97d4570338e145"} Mar 20 16:00:01 crc kubenswrapper[3552]: I0320 16:00:01.907048 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" podStartSLOduration=1.907006992 podStartE2EDuration="1.907006992s" podCreationTimestamp="2026-03-20 16:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 16:00:01.905688687 +0000 UTC m=+2101.599385537" watchObservedRunningTime="2026-03-20 16:00:01.907006992 +0000 UTC m=+2101.600703822" Mar 20 16:00:02 crc kubenswrapper[3552]: I0320 16:00:02.893267 3552 generic.go:334] "Generic (PLEG): container finished" podID="4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" containerID="c857bc69d61890ea5276f45fac8c4684e45fa41a051ffecccfced34dd846e99a" exitCode=0 Mar 20 16:00:02 crc kubenswrapper[3552]: I0320 16:00:02.893323 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" event={"ID":"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2","Type":"ContainerDied","Data":"c857bc69d61890ea5276f45fac8c4684e45fa41a051ffecccfced34dd846e99a"} Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.140283 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.227718 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58f8t\" (UniqueName: \"kubernetes.io/projected/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-kube-api-access-58f8t\") pod \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.227868 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-secret-volume\") pod \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.228043 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-config-volume\") pod \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\" (UID: \"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2\") " Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.229645 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-config-volume" (OuterVolumeSpecName: "config-volume") pod "4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" (UID: "4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.234933 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" (UID: "4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.235065 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-kube-api-access-58f8t" (OuterVolumeSpecName: "kube-api-access-58f8t") pod "4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" (UID: "4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2"). InnerVolumeSpecName "kube-api-access-58f8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.331688 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.331758 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.331816 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-58f8t\" (UniqueName: \"kubernetes.io/projected/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2-kube-api-access-58f8t\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.910985 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" event={"ID":"4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2","Type":"ContainerDied","Data":"c9fb0a0f984784317de1087b05285cdd809a7e6f90dcd3b25f97d4570338e145"} Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.911395 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9fb0a0f984784317de1087b05285cdd809a7e6f90dcd3b25f97d4570338e145" Mar 20 16:00:04 crc kubenswrapper[3552]: I0320 16:00:04.911079 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4" Mar 20 16:00:05 crc kubenswrapper[3552]: I0320 16:00:05.044477 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t"] Mar 20 16:00:05 crc kubenswrapper[3552]: I0320 16:00:05.062152 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29566995-vzn2t"] Mar 20 16:00:05 crc kubenswrapper[3552]: E0320 16:00:05.091040 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d5a577d_5e87_40ef_b5c2_a3303ec4a2b2.slice/crio-c9fb0a0f984784317de1087b05285cdd809a7e6f90dcd3b25f97d4570338e145\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d5a577d_5e87_40ef_b5c2_a3303ec4a2b2.slice\": RecentStats: unable to find data in memory cache]" Mar 20 16:00:05 crc kubenswrapper[3552]: I0320 16:00:05.447197 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23dd33fe-d710-4638-9e0e-72cb27cb3e84" path="/var/lib/kubelet/pods/23dd33fe-d710-4638-9e0e-72cb27cb3e84/volumes" Mar 20 16:00:07 crc kubenswrapper[3552]: I0320 16:00:07.651804 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 16:00:07 crc kubenswrapper[3552]: I0320 16:00:07.652463 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 16:00:07 crc kubenswrapper[3552]: I0320 16:00:07.769736 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 16:00:08 crc kubenswrapper[3552]: I0320 16:00:08.036333 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 16:00:08 crc kubenswrapper[3552]: I0320 16:00:08.089194 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pqkjk"] Mar 20 16:00:09 crc kubenswrapper[3552]: I0320 16:00:09.969831 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pqkjk" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="registry-server" containerID="cri-o://d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23" gracePeriod=2 Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.293770 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.457021 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-catalog-content\") pod \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.460658 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5bwn\" (UniqueName: \"kubernetes.io/projected/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-kube-api-access-m5bwn\") pod \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.460851 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-utilities\") pod \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\" (UID: \"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2\") " Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.461765 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-utilities" (OuterVolumeSpecName: "utilities") pod "e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" (UID: "e44d0e21-8b6f-4ec9-a165-f00ec358d5e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.465824 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-kube-api-access-m5bwn" (OuterVolumeSpecName: "kube-api-access-m5bwn") pod "e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" (UID: "e44d0e21-8b6f-4ec9-a165-f00ec358d5e2"). InnerVolumeSpecName "kube-api-access-m5bwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.562660 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.562695 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-m5bwn\" (UniqueName: \"kubernetes.io/projected/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-kube-api-access-m5bwn\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.729536 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" (UID: "e44d0e21-8b6f-4ec9-a165-f00ec358d5e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.764825 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.985444 3552 generic.go:334] "Generic (PLEG): container finished" podID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerID="d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23" exitCode=0 Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.985500 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerDied","Data":"d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23"} Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.985506 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqkjk" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.986787 3552 scope.go:117] "RemoveContainer" containerID="d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23" Mar 20 16:00:10 crc kubenswrapper[3552]: I0320 16:00:10.986751 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqkjk" event={"ID":"e44d0e21-8b6f-4ec9-a165-f00ec358d5e2","Type":"ContainerDied","Data":"a99a30b1f25489fe0bfe5868c57604ad41bba09bdd0c3a8a56d37127ae1f8292"} Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.046240 3552 scope.go:117] "RemoveContainer" containerID="4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.051509 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pqkjk"] Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.068066 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pqkjk"] Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.094335 3552 scope.go:117] "RemoveContainer" containerID="67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.139706 3552 scope.go:117] "RemoveContainer" containerID="d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23" Mar 20 16:00:11 crc kubenswrapper[3552]: E0320 16:00:11.140262 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23\": container with ID starting with d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23 not found: ID does not exist" containerID="d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.140314 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23"} err="failed to get container status \"d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23\": rpc error: code = NotFound desc = could not find container \"d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23\": container with ID starting with d5f39854a58a184674ad9b12776d1163176a181a8246bfcffda968dc15649c23 not found: ID does not exist" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.140327 3552 scope.go:117] "RemoveContainer" containerID="4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c" Mar 20 16:00:11 crc kubenswrapper[3552]: E0320 16:00:11.141081 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c\": container with ID starting with 4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c not found: ID does not exist" containerID="4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.141112 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c"} err="failed to get container status \"4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c\": rpc error: code = NotFound desc = could not find container \"4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c\": container with ID starting with 4df0dfcfa5b16083ab6ae6ec47b78cab541af84b1fa586b7a96d4eb342c3919c not found: ID does not exist" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.141126 3552 scope.go:117] "RemoveContainer" containerID="67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b" Mar 20 16:00:11 crc kubenswrapper[3552]: E0320 16:00:11.141478 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b\": container with ID starting with 67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b not found: ID does not exist" containerID="67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.141514 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b"} err="failed to get container status \"67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b\": rpc error: code = NotFound desc = could not find container \"67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b\": container with ID starting with 67c233ea56b710f5711a69f8e67bf3358c56544dbb3af1a4cca381b48c1c133b not found: ID does not exist" Mar 20 16:00:11 crc kubenswrapper[3552]: I0320 16:00:11.444606 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" path="/var/lib/kubelet/pods/e44d0e21-8b6f-4ec9-a165-f00ec358d5e2/volumes" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.650564 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6gsdq"] Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.651310 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" podNamespace="openshift-marketplace" podName="community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: E0320 16:00:18.654427 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="extract-utilities" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.654464 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="extract-utilities" Mar 20 16:00:18 crc kubenswrapper[3552]: E0320 16:00:18.654480 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="extract-content" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.654490 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="extract-content" Mar 20 16:00:18 crc kubenswrapper[3552]: E0320 16:00:18.654514 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" containerName="collect-profiles" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.654523 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" containerName="collect-profiles" Mar 20 16:00:18 crc kubenswrapper[3552]: E0320 16:00:18.654556 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="registry-server" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.654565 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="registry-server" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.654918 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" containerName="collect-profiles" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.654940 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="e44d0e21-8b6f-4ec9-a165-f00ec358d5e2" containerName="registry-server" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.656641 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.664497 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gsdq"] Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.749676 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhpgj\" (UniqueName: \"kubernetes.io/projected/b552217d-6c3c-4484-bf59-ec22adaa1077-kube-api-access-hhpgj\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.749803 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-utilities\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.749984 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-catalog-content\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.851681 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-utilities\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.851811 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-catalog-content\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.851843 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hhpgj\" (UniqueName: \"kubernetes.io/projected/b552217d-6c3c-4484-bf59-ec22adaa1077-kube-api-access-hhpgj\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.852548 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-utilities\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.852787 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-catalog-content\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.872242 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhpgj\" (UniqueName: \"kubernetes.io/projected/b552217d-6c3c-4484-bf59-ec22adaa1077-kube-api-access-hhpgj\") pod \"community-operators-6gsdq\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:18 crc kubenswrapper[3552]: I0320 16:00:18.980715 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:19 crc kubenswrapper[3552]: I0320 16:00:19.487865 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gsdq"] Mar 20 16:00:20 crc kubenswrapper[3552]: I0320 16:00:20.062703 3552 generic.go:334] "Generic (PLEG): container finished" podID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerID="ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251" exitCode=0 Mar 20 16:00:20 crc kubenswrapper[3552]: I0320 16:00:20.062763 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerDied","Data":"ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251"} Mar 20 16:00:20 crc kubenswrapper[3552]: I0320 16:00:20.062788 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerStarted","Data":"4c78fe2fa72163f2e593aa6eeb798f214d5b99759cdc00bf4f681681c20d095b"} Mar 20 16:00:21 crc kubenswrapper[3552]: I0320 16:00:21.071061 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerStarted","Data":"6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2"} Mar 20 16:00:23 crc kubenswrapper[3552]: I0320 16:00:23.093129 3552 generic.go:334] "Generic (PLEG): container finished" podID="7f951458-ca42-4880-b3f2-faefb8cdbb2a" containerID="3a67a5d8487c12af3220ca5ba721d9eac12a3a477daff207a0b2eec4f8583a51" exitCode=0 Mar 20 16:00:23 crc kubenswrapper[3552]: I0320 16:00:23.093230 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" event={"ID":"7f951458-ca42-4880-b3f2-faefb8cdbb2a","Type":"ContainerDied","Data":"3a67a5d8487c12af3220ca5ba721d9eac12a3a477daff207a0b2eec4f8583a51"} Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.554085 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.661147 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-inventory\") pod \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.661303 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngjlb\" (UniqueName: \"kubernetes.io/projected/7f951458-ca42-4880-b3f2-faefb8cdbb2a-kube-api-access-ngjlb\") pod \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.661370 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-ssh-key-openstack-edpm-ipam\") pod \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\" (UID: \"7f951458-ca42-4880-b3f2-faefb8cdbb2a\") " Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.667165 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f951458-ca42-4880-b3f2-faefb8cdbb2a-kube-api-access-ngjlb" (OuterVolumeSpecName: "kube-api-access-ngjlb") pod "7f951458-ca42-4880-b3f2-faefb8cdbb2a" (UID: "7f951458-ca42-4880-b3f2-faefb8cdbb2a"). InnerVolumeSpecName "kube-api-access-ngjlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.692538 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7f951458-ca42-4880-b3f2-faefb8cdbb2a" (UID: "7f951458-ca42-4880-b3f2-faefb8cdbb2a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.693117 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-inventory" (OuterVolumeSpecName: "inventory") pod "7f951458-ca42-4880-b3f2-faefb8cdbb2a" (UID: "7f951458-ca42-4880-b3f2-faefb8cdbb2a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.763771 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-ngjlb\" (UniqueName: \"kubernetes.io/projected/7f951458-ca42-4880-b3f2-faefb8cdbb2a-kube-api-access-ngjlb\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.763809 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:24 crc kubenswrapper[3552]: I0320 16:00:24.763823 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f951458-ca42-4880-b3f2-faefb8cdbb2a-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.068218 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-61e5-account-create-update-k5rvg"] Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.077841 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-61e5-account-create-update-k5rvg"] Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.110969 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" event={"ID":"7f951458-ca42-4880-b3f2-faefb8cdbb2a","Type":"ContainerDied","Data":"1798f69c9a3a76a67f9d12aa3f58f7dfd7a895af7a01a0ade6bfd54f4263c6ea"} Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.111005 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1798f69c9a3a76a67f9d12aa3f58f7dfd7a895af7a01a0ade6bfd54f4263c6ea" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.111044 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.223804 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6"] Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.224486 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6f4150fa-2847-4186-91f2-144e7e53ffae" podNamespace="openstack" podName="validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: E0320 16:00:25.224881 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7f951458-ca42-4880-b3f2-faefb8cdbb2a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.224917 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f951458-ca42-4880-b3f2-faefb8cdbb2a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.225186 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f951458-ca42-4880-b3f2-faefb8cdbb2a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.226053 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.228200 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.228352 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.229267 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.229309 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.233883 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6"] Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.261175 3552 scope.go:117] "RemoveContainer" containerID="87f51db12651656002390720995a7715c0ad623eb8ffece9221a44d3609130bf" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.375603 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.375737 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.375775 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92z55\" (UniqueName: \"kubernetes.io/projected/6f4150fa-2847-4186-91f2-144e7e53ffae-kube-api-access-92z55\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.447992 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82b3f1de-b3b7-4401-bb4a-5f3d3a009041" path="/var/lib/kubelet/pods/82b3f1de-b3b7-4401-bb4a-5f3d3a009041/volumes" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.480888 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.480959 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-92z55\" (UniqueName: \"kubernetes.io/projected/6f4150fa-2847-4186-91f2-144e7e53ffae-kube-api-access-92z55\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.481039 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.486415 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.486959 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.511108 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-92z55\" (UniqueName: \"kubernetes.io/projected/6f4150fa-2847-4186-91f2-144e7e53ffae-kube-api-access-92z55\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:25 crc kubenswrapper[3552]: I0320 16:00:25.548194 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.037723 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-lbvs5"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.055581 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-f5d68"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.117488 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-adfe-account-create-update-7t7fl"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.125746 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-zvnmw"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.133605 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-aae5-account-create-update-xncvz"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.141241 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-lbvs5"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.150170 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-f5d68"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.157804 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-zvnmw"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.164974 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-aae5-account-create-update-xncvz"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.177057 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-adfe-account-create-update-7t7fl"] Mar 20 16:00:26 crc kubenswrapper[3552]: I0320 16:00:26.183533 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6"] Mar 20 16:00:27 crc kubenswrapper[3552]: I0320 16:00:27.130385 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" event={"ID":"6f4150fa-2847-4186-91f2-144e7e53ffae","Type":"ContainerStarted","Data":"e498a5a12368f08b8c24d0cebc4b33ae080311ede1e81a1af6a93536906669f1"} Mar 20 16:00:27 crc kubenswrapper[3552]: I0320 16:00:27.443343 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="084c549c-51f1-4d1d-92bf-1e18655e07f8" path="/var/lib/kubelet/pods/084c549c-51f1-4d1d-92bf-1e18655e07f8/volumes" Mar 20 16:00:27 crc kubenswrapper[3552]: I0320 16:00:27.444155 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4103ddf5-a1fa-417e-8948-6f54835b9ff2" path="/var/lib/kubelet/pods/4103ddf5-a1fa-417e-8948-6f54835b9ff2/volumes" Mar 20 16:00:27 crc kubenswrapper[3552]: I0320 16:00:27.444989 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bc966f8-44aa-4dd7-bbd6-4ce4edb35211" path="/var/lib/kubelet/pods/6bc966f8-44aa-4dd7-bbd6-4ce4edb35211/volumes" Mar 20 16:00:27 crc kubenswrapper[3552]: I0320 16:00:27.461243 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a1a135a-ae66-4349-80ed-bef49b8632e4" path="/var/lib/kubelet/pods/8a1a135a-ae66-4349-80ed-bef49b8632e4/volumes" Mar 20 16:00:27 crc kubenswrapper[3552]: I0320 16:00:27.462003 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6543e96-134c-4d66-8410-a2f517b9d896" path="/var/lib/kubelet/pods/b6543e96-134c-4d66-8410-a2f517b9d896/volumes" Mar 20 16:00:28 crc kubenswrapper[3552]: I0320 16:00:28.140479 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" event={"ID":"6f4150fa-2847-4186-91f2-144e7e53ffae","Type":"ContainerStarted","Data":"b192dde32862bb3d0d7fb6779488365f55b11e764b31f12b8c4c989e37ca90c0"} Mar 20 16:00:28 crc kubenswrapper[3552]: I0320 16:00:28.165489 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" podStartSLOduration=2.72593899 podStartE2EDuration="3.165430819s" podCreationTimestamp="2026-03-20 16:00:25 +0000 UTC" firstStartedPulling="2026-03-20 16:00:26.177068184 +0000 UTC m=+2125.870765014" lastFinishedPulling="2026-03-20 16:00:26.616559993 +0000 UTC m=+2126.310256843" observedRunningTime="2026-03-20 16:00:28.155845375 +0000 UTC m=+2127.849542215" watchObservedRunningTime="2026-03-20 16:00:28.165430819 +0000 UTC m=+2127.859127659" Mar 20 16:00:32 crc kubenswrapper[3552]: I0320 16:00:32.180599 3552 generic.go:334] "Generic (PLEG): container finished" podID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerID="6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2" exitCode=0 Mar 20 16:00:32 crc kubenswrapper[3552]: I0320 16:00:32.180689 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerDied","Data":"6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2"} Mar 20 16:00:32 crc kubenswrapper[3552]: I0320 16:00:32.188156 3552 generic.go:334] "Generic (PLEG): container finished" podID="6f4150fa-2847-4186-91f2-144e7e53ffae" containerID="b192dde32862bb3d0d7fb6779488365f55b11e764b31f12b8c4c989e37ca90c0" exitCode=0 Mar 20 16:00:32 crc kubenswrapper[3552]: I0320 16:00:32.188389 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" event={"ID":"6f4150fa-2847-4186-91f2-144e7e53ffae","Type":"ContainerDied","Data":"b192dde32862bb3d0d7fb6779488365f55b11e764b31f12b8c4c989e37ca90c0"} Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.567677 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.655120 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92z55\" (UniqueName: \"kubernetes.io/projected/6f4150fa-2847-4186-91f2-144e7e53ffae-kube-api-access-92z55\") pod \"6f4150fa-2847-4186-91f2-144e7e53ffae\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.655210 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-ssh-key-openstack-edpm-ipam\") pod \"6f4150fa-2847-4186-91f2-144e7e53ffae\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.655430 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-inventory\") pod \"6f4150fa-2847-4186-91f2-144e7e53ffae\" (UID: \"6f4150fa-2847-4186-91f2-144e7e53ffae\") " Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.660949 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f4150fa-2847-4186-91f2-144e7e53ffae-kube-api-access-92z55" (OuterVolumeSpecName: "kube-api-access-92z55") pod "6f4150fa-2847-4186-91f2-144e7e53ffae" (UID: "6f4150fa-2847-4186-91f2-144e7e53ffae"). InnerVolumeSpecName "kube-api-access-92z55". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.682383 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-inventory" (OuterVolumeSpecName: "inventory") pod "6f4150fa-2847-4186-91f2-144e7e53ffae" (UID: "6f4150fa-2847-4186-91f2-144e7e53ffae"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.698538 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6f4150fa-2847-4186-91f2-144e7e53ffae" (UID: "6f4150fa-2847-4186-91f2-144e7e53ffae"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.757386 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.757444 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-92z55\" (UniqueName: \"kubernetes.io/projected/6f4150fa-2847-4186-91f2-144e7e53ffae-kube-api-access-92z55\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:33 crc kubenswrapper[3552]: I0320 16:00:33.757458 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6f4150fa-2847-4186-91f2-144e7e53ffae-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.212565 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerStarted","Data":"f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538"} Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.217200 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" event={"ID":"6f4150fa-2847-4186-91f2-144e7e53ffae","Type":"ContainerDied","Data":"e498a5a12368f08b8c24d0cebc4b33ae080311ede1e81a1af6a93536906669f1"} Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.217248 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.217275 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e498a5a12368f08b8c24d0cebc4b33ae080311ede1e81a1af6a93536906669f1" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.260331 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6gsdq" podStartSLOduration=3.860089988 podStartE2EDuration="16.260293764s" podCreationTimestamp="2026-03-20 16:00:18 +0000 UTC" firstStartedPulling="2026-03-20 16:00:20.06450732 +0000 UTC m=+2119.758204150" lastFinishedPulling="2026-03-20 16:00:32.464711096 +0000 UTC m=+2132.158407926" observedRunningTime="2026-03-20 16:00:34.256128344 +0000 UTC m=+2133.949825204" watchObservedRunningTime="2026-03-20 16:00:34.260293764 +0000 UTC m=+2133.953990594" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.341810 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc"] Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.341989 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" podNamespace="openstack" podName="install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: E0320 16:00:34.342267 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6f4150fa-2847-4186-91f2-144e7e53ffae" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.342284 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f4150fa-2847-4186-91f2-144e7e53ffae" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.342669 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f4150fa-2847-4186-91f2-144e7e53ffae" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.343458 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.346886 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.346990 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.347222 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.347458 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.350723 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc"] Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.474449 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.474797 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4868\" (UniqueName: \"kubernetes.io/projected/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-kube-api-access-c4868\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.474827 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.576747 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.576871 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-c4868\" (UniqueName: \"kubernetes.io/projected/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-kube-api-access-c4868\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.576899 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.582152 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.582166 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.592667 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4868\" (UniqueName: \"kubernetes.io/projected/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-kube-api-access-c4868\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-77jvc\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:34 crc kubenswrapper[3552]: I0320 16:00:34.672311 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:00:35 crc kubenswrapper[3552]: I0320 16:00:35.253228 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc"] Mar 20 16:00:36 crc kubenswrapper[3552]: I0320 16:00:36.232419 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" event={"ID":"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d","Type":"ContainerStarted","Data":"5d905fa8d2a7a7e8fddda37e034287f99b0df2d9dd3f46f43e7f5f38973d9302"} Mar 20 16:00:37 crc kubenswrapper[3552]: I0320 16:00:37.241468 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" event={"ID":"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d","Type":"ContainerStarted","Data":"321e75aa4eece819cc7334be1d8f0f45cbc342f7050491502a8e219f51327ea0"} Mar 20 16:00:37 crc kubenswrapper[3552]: I0320 16:00:37.268227 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" podStartSLOduration=2.805476765 podStartE2EDuration="3.26812094s" podCreationTimestamp="2026-03-20 16:00:34 +0000 UTC" firstStartedPulling="2026-03-20 16:00:35.257588794 +0000 UTC m=+2134.951285624" lastFinishedPulling="2026-03-20 16:00:35.720232969 +0000 UTC m=+2135.413929799" observedRunningTime="2026-03-20 16:00:37.262248324 +0000 UTC m=+2136.955945164" watchObservedRunningTime="2026-03-20 16:00:37.26812094 +0000 UTC m=+2136.961817810" Mar 20 16:00:38 crc kubenswrapper[3552]: I0320 16:00:38.981129 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:38 crc kubenswrapper[3552]: I0320 16:00:38.981441 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:40 crc kubenswrapper[3552]: I0320 16:00:40.069334 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-6gsdq" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="registry-server" probeResult="failure" output=< Mar 20 16:00:40 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:00:40 crc kubenswrapper[3552]: > Mar 20 16:00:42 crc kubenswrapper[3552]: I0320 16:00:42.778851 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:00:42 crc kubenswrapper[3552]: I0320 16:00:42.779231 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:00:49 crc kubenswrapper[3552]: I0320 16:00:49.091596 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:49 crc kubenswrapper[3552]: I0320 16:00:49.192141 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:49 crc kubenswrapper[3552]: I0320 16:00:49.249842 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gsdq"] Mar 20 16:00:50 crc kubenswrapper[3552]: I0320 16:00:50.984855 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6gsdq" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="registry-server" containerID="cri-o://f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538" gracePeriod=2 Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.497020 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.628503 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-utilities\") pod \"b552217d-6c3c-4484-bf59-ec22adaa1077\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.628676 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhpgj\" (UniqueName: \"kubernetes.io/projected/b552217d-6c3c-4484-bf59-ec22adaa1077-kube-api-access-hhpgj\") pod \"b552217d-6c3c-4484-bf59-ec22adaa1077\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.628821 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-catalog-content\") pod \"b552217d-6c3c-4484-bf59-ec22adaa1077\" (UID: \"b552217d-6c3c-4484-bf59-ec22adaa1077\") " Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.629924 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-utilities" (OuterVolumeSpecName: "utilities") pod "b552217d-6c3c-4484-bf59-ec22adaa1077" (UID: "b552217d-6c3c-4484-bf59-ec22adaa1077"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.643669 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b552217d-6c3c-4484-bf59-ec22adaa1077-kube-api-access-hhpgj" (OuterVolumeSpecName: "kube-api-access-hhpgj") pod "b552217d-6c3c-4484-bf59-ec22adaa1077" (UID: "b552217d-6c3c-4484-bf59-ec22adaa1077"). InnerVolumeSpecName "kube-api-access-hhpgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.732449 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:51 crc kubenswrapper[3552]: I0320 16:00:51.732525 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hhpgj\" (UniqueName: \"kubernetes.io/projected/b552217d-6c3c-4484-bf59-ec22adaa1077-kube-api-access-hhpgj\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.001276 3552 generic.go:334] "Generic (PLEG): container finished" podID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerID="f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538" exitCode=0 Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.001334 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerDied","Data":"f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538"} Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.001347 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gsdq" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.001372 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gsdq" event={"ID":"b552217d-6c3c-4484-bf59-ec22adaa1077","Type":"ContainerDied","Data":"4c78fe2fa72163f2e593aa6eeb798f214d5b99759cdc00bf4f681681c20d095b"} Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.001390 3552 scope.go:117] "RemoveContainer" containerID="f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.052344 3552 scope.go:117] "RemoveContainer" containerID="6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.125197 3552 scope.go:117] "RemoveContainer" containerID="ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.160846 3552 scope.go:117] "RemoveContainer" containerID="f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538" Mar 20 16:00:52 crc kubenswrapper[3552]: E0320 16:00:52.161388 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538\": container with ID starting with f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538 not found: ID does not exist" containerID="f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.161472 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538"} err="failed to get container status \"f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538\": rpc error: code = NotFound desc = could not find container \"f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538\": container with ID starting with f6e37cb917abfcd72f8ac389796594a0503004efbf6da133d8ad47e248665538 not found: ID does not exist" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.161490 3552 scope.go:117] "RemoveContainer" containerID="6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2" Mar 20 16:00:52 crc kubenswrapper[3552]: E0320 16:00:52.161838 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2\": container with ID starting with 6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2 not found: ID does not exist" containerID="6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.161875 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2"} err="failed to get container status \"6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2\": rpc error: code = NotFound desc = could not find container \"6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2\": container with ID starting with 6114f7cb427bf1e842021f1b25388da4d24e7a5558dde45e684103fced7272e2 not found: ID does not exist" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.161888 3552 scope.go:117] "RemoveContainer" containerID="ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251" Mar 20 16:00:52 crc kubenswrapper[3552]: E0320 16:00:52.162172 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251\": container with ID starting with ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251 not found: ID does not exist" containerID="ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.162207 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251"} err="failed to get container status \"ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251\": rpc error: code = NotFound desc = could not find container \"ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251\": container with ID starting with ef26acd3aa47e8016637e2e9746dd9ec47871a1ecc63119d90f6e6dc88a3d251 not found: ID does not exist" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.253875 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b552217d-6c3c-4484-bf59-ec22adaa1077" (UID: "b552217d-6c3c-4484-bf59-ec22adaa1077"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.335239 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gsdq"] Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.343895 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6gsdq"] Mar 20 16:00:52 crc kubenswrapper[3552]: I0320 16:00:52.345598 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b552217d-6c3c-4484-bf59-ec22adaa1077-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:00:53 crc kubenswrapper[3552]: I0320 16:00:53.061393 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5sbpr"] Mar 20 16:00:53 crc kubenswrapper[3552]: I0320 16:00:53.071444 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5sbpr"] Mar 20 16:00:53 crc kubenswrapper[3552]: I0320 16:00:53.445330 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23c8de82-3582-4efe-9b18-b448fd4c8776" path="/var/lib/kubelet/pods/23c8de82-3582-4efe-9b18-b448fd4c8776/volumes" Mar 20 16:00:53 crc kubenswrapper[3552]: I0320 16:00:53.446095 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" path="/var/lib/kubelet/pods/b552217d-6c3c-4484-bf59-ec22adaa1077/volumes" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.158130 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29567041-xdqml"] Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.158777 3552 topology_manager.go:215] "Topology Admit Handler" podUID="176300c2-c75c-415a-8a97-8fc1227581cb" podNamespace="openstack" podName="keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: E0320 16:01:00.159111 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="extract-content" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.159125 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="extract-content" Mar 20 16:01:00 crc kubenswrapper[3552]: E0320 16:01:00.159167 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="extract-utilities" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.159176 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="extract-utilities" Mar 20 16:01:00 crc kubenswrapper[3552]: E0320 16:01:00.159193 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="registry-server" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.159203 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="registry-server" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.159476 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="b552217d-6c3c-4484-bf59-ec22adaa1077" containerName="registry-server" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.160184 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.180184 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29567041-xdqml"] Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.206426 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mswpc\" (UniqueName: \"kubernetes.io/projected/176300c2-c75c-415a-8a97-8fc1227581cb-kube-api-access-mswpc\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.206653 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-config-data\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.206750 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-combined-ca-bundle\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.206849 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-fernet-keys\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.309027 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mswpc\" (UniqueName: \"kubernetes.io/projected/176300c2-c75c-415a-8a97-8fc1227581cb-kube-api-access-mswpc\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.309157 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-config-data\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.309208 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-combined-ca-bundle\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.309255 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-fernet-keys\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.314977 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-combined-ca-bundle\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.318471 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-config-data\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.319523 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-fernet-keys\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.326367 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mswpc\" (UniqueName: \"kubernetes.io/projected/176300c2-c75c-415a-8a97-8fc1227581cb-kube-api-access-mswpc\") pod \"keystone-cron-29567041-xdqml\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.480802 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:00 crc kubenswrapper[3552]: I0320 16:01:00.975579 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29567041-xdqml"] Mar 20 16:01:01 crc kubenswrapper[3552]: I0320 16:01:01.092257 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29567041-xdqml" event={"ID":"176300c2-c75c-415a-8a97-8fc1227581cb","Type":"ContainerStarted","Data":"26e1afa9d151f095ac63bee474c2e474f1bd82b0db58a9fc4cbe0e837b982f89"} Mar 20 16:01:01 crc kubenswrapper[3552]: I0320 16:01:01.339381 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:01:01 crc kubenswrapper[3552]: I0320 16:01:01.340842 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:01:01 crc kubenswrapper[3552]: I0320 16:01:01.341014 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:01:01 crc kubenswrapper[3552]: I0320 16:01:01.341196 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:01:01 crc kubenswrapper[3552]: I0320 16:01:01.341343 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:01:02 crc kubenswrapper[3552]: I0320 16:01:02.107821 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29567041-xdqml" event={"ID":"176300c2-c75c-415a-8a97-8fc1227581cb","Type":"ContainerStarted","Data":"f8f815ce082cc9cb52d6b004006aa78f97b76348e09c064afcb6b3199ed62207"} Mar 20 16:01:02 crc kubenswrapper[3552]: I0320 16:01:02.139115 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/keystone-cron-29567041-xdqml" podStartSLOduration=2.139049845 podStartE2EDuration="2.139049845s" podCreationTimestamp="2026-03-20 16:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 16:01:02.127094938 +0000 UTC m=+2161.820791808" watchObservedRunningTime="2026-03-20 16:01:02.139049845 +0000 UTC m=+2161.832746675" Mar 20 16:01:04 crc kubenswrapper[3552]: I0320 16:01:04.125176 3552 generic.go:334] "Generic (PLEG): container finished" podID="176300c2-c75c-415a-8a97-8fc1227581cb" containerID="f8f815ce082cc9cb52d6b004006aa78f97b76348e09c064afcb6b3199ed62207" exitCode=0 Mar 20 16:01:04 crc kubenswrapper[3552]: I0320 16:01:04.125266 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29567041-xdqml" event={"ID":"176300c2-c75c-415a-8a97-8fc1227581cb","Type":"ContainerDied","Data":"f8f815ce082cc9cb52d6b004006aa78f97b76348e09c064afcb6b3199ed62207"} Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.468053 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.625310 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mswpc\" (UniqueName: \"kubernetes.io/projected/176300c2-c75c-415a-8a97-8fc1227581cb-kube-api-access-mswpc\") pod \"176300c2-c75c-415a-8a97-8fc1227581cb\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.625562 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-config-data\") pod \"176300c2-c75c-415a-8a97-8fc1227581cb\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.625618 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-fernet-keys\") pod \"176300c2-c75c-415a-8a97-8fc1227581cb\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.625673 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-combined-ca-bundle\") pod \"176300c2-c75c-415a-8a97-8fc1227581cb\" (UID: \"176300c2-c75c-415a-8a97-8fc1227581cb\") " Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.631703 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/176300c2-c75c-415a-8a97-8fc1227581cb-kube-api-access-mswpc" (OuterVolumeSpecName: "kube-api-access-mswpc") pod "176300c2-c75c-415a-8a97-8fc1227581cb" (UID: "176300c2-c75c-415a-8a97-8fc1227581cb"). InnerVolumeSpecName "kube-api-access-mswpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.631851 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "176300c2-c75c-415a-8a97-8fc1227581cb" (UID: "176300c2-c75c-415a-8a97-8fc1227581cb"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.663729 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "176300c2-c75c-415a-8a97-8fc1227581cb" (UID: "176300c2-c75c-415a-8a97-8fc1227581cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.677986 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-config-data" (OuterVolumeSpecName: "config-data") pod "176300c2-c75c-415a-8a97-8fc1227581cb" (UID: "176300c2-c75c-415a-8a97-8fc1227581cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.728688 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.728727 3552 reconciler_common.go:300] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-fernet-keys\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.728743 3552 reconciler_common.go:300] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/176300c2-c75c-415a-8a97-8fc1227581cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:05 crc kubenswrapper[3552]: I0320 16:01:05.728758 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mswpc\" (UniqueName: \"kubernetes.io/projected/176300c2-c75c-415a-8a97-8fc1227581cb-kube-api-access-mswpc\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:06 crc kubenswrapper[3552]: I0320 16:01:06.143390 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29567041-xdqml" event={"ID":"176300c2-c75c-415a-8a97-8fc1227581cb","Type":"ContainerDied","Data":"26e1afa9d151f095ac63bee474c2e474f1bd82b0db58a9fc4cbe0e837b982f89"} Mar 20 16:01:06 crc kubenswrapper[3552]: I0320 16:01:06.143473 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26e1afa9d151f095ac63bee474c2e474f1bd82b0db58a9fc4cbe0e837b982f89" Mar 20 16:01:06 crc kubenswrapper[3552]: I0320 16:01:06.143480 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29567041-xdqml" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.778660 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.779191 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.825344 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c2dxq"] Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.825824 3552 topology_manager.go:215] "Topology Admit Handler" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" podNamespace="openshift-marketplace" podName="redhat-operators-c2dxq" Mar 20 16:01:12 crc kubenswrapper[3552]: E0320 16:01:12.826257 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="176300c2-c75c-415a-8a97-8fc1227581cb" containerName="keystone-cron" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.826278 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="176300c2-c75c-415a-8a97-8fc1227581cb" containerName="keystone-cron" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.826660 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="176300c2-c75c-415a-8a97-8fc1227581cb" containerName="keystone-cron" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.832451 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.839082 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c2dxq"] Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.896221 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-utilities\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.896378 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-catalog-content\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:12 crc kubenswrapper[3552]: I0320 16:01:12.897193 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkbpr\" (UniqueName: \"kubernetes.io/projected/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-kube-api-access-wkbpr\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:12.999602 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wkbpr\" (UniqueName: \"kubernetes.io/projected/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-kube-api-access-wkbpr\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:12.999693 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-utilities\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:12.999733 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-catalog-content\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:13.000283 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-catalog-content\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:13.000453 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-utilities\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:13.029865 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkbpr\" (UniqueName: \"kubernetes.io/projected/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-kube-api-access-wkbpr\") pod \"redhat-operators-c2dxq\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:13.153724 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:01:13 crc kubenswrapper[3552]: I0320 16:01:13.663645 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c2dxq"] Mar 20 16:01:14 crc kubenswrapper[3552]: I0320 16:01:14.220116 3552 generic.go:334] "Generic (PLEG): container finished" podID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerID="9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a" exitCode=0 Mar 20 16:01:14 crc kubenswrapper[3552]: I0320 16:01:14.220207 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerDied","Data":"9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a"} Mar 20 16:01:14 crc kubenswrapper[3552]: I0320 16:01:14.220563 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerStarted","Data":"854b20cb886785ca0e72cf36834451b90053039572018a0a3a3fb726addad370"} Mar 20 16:01:14 crc kubenswrapper[3552]: I0320 16:01:14.224945 3552 generic.go:334] "Generic (PLEG): container finished" podID="ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" containerID="321e75aa4eece819cc7334be1d8f0f45cbc342f7050491502a8e219f51327ea0" exitCode=0 Mar 20 16:01:14 crc kubenswrapper[3552]: I0320 16:01:14.224982 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" event={"ID":"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d","Type":"ContainerDied","Data":"321e75aa4eece819cc7334be1d8f0f45cbc342f7050491502a8e219f51327ea0"} Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.236163 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerStarted","Data":"5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40"} Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.649174 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.669218 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-inventory\") pod \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.669496 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-ssh-key-openstack-edpm-ipam\") pod \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.669556 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4868\" (UniqueName: \"kubernetes.io/projected/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-kube-api-access-c4868\") pod \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\" (UID: \"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d\") " Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.696652 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-kube-api-access-c4868" (OuterVolumeSpecName: "kube-api-access-c4868") pod "ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" (UID: "ae6c8b60-dfd9-432a-9c27-4768c6cbe57d"). InnerVolumeSpecName "kube-api-access-c4868". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.708694 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" (UID: "ae6c8b60-dfd9-432a-9c27-4768c6cbe57d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.720670 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-inventory" (OuterVolumeSpecName: "inventory") pod "ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" (UID: "ae6c8b60-dfd9-432a-9c27-4768c6cbe57d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.771684 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.771726 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:15 crc kubenswrapper[3552]: I0320 16:01:15.771737 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-c4868\" (UniqueName: \"kubernetes.io/projected/ae6c8b60-dfd9-432a-9c27-4768c6cbe57d-kube-api-access-c4868\") on node \"crc\" DevicePath \"\"" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.243552 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" event={"ID":"ae6c8b60-dfd9-432a-9c27-4768c6cbe57d","Type":"ContainerDied","Data":"5d905fa8d2a7a7e8fddda37e034287f99b0df2d9dd3f46f43e7f5f38973d9302"} Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.243592 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d905fa8d2a7a7e8fddda37e034287f99b0df2d9dd3f46f43e7f5f38973d9302" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.243655 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-77jvc" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.347026 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb"] Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.347269 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4da7fc22-4c28-40c6-93f3-6b7e09f855be" podNamespace="openstack" podName="configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: E0320 16:01:16.347707 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.347729 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.348286 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae6c8b60-dfd9-432a-9c27-4768c6cbe57d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.349136 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.351057 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.351558 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.353103 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.354120 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.363023 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb"] Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.383372 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.383479 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.383536 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zw2v\" (UniqueName: \"kubernetes.io/projected/4da7fc22-4c28-40c6-93f3-6b7e09f855be-kube-api-access-2zw2v\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.485321 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.486509 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.486940 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2zw2v\" (UniqueName: \"kubernetes.io/projected/4da7fc22-4c28-40c6-93f3-6b7e09f855be-kube-api-access-2zw2v\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.490011 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.490022 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.506273 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zw2v\" (UniqueName: \"kubernetes.io/projected/4da7fc22-4c28-40c6-93f3-6b7e09f855be-kube-api-access-2zw2v\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:16 crc kubenswrapper[3552]: I0320 16:01:16.665630 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:01:17 crc kubenswrapper[3552]: I0320 16:01:17.052884 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-w7mf5"] Mar 20 16:01:17 crc kubenswrapper[3552]: I0320 16:01:17.063607 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-w7mf5"] Mar 20 16:01:17 crc kubenswrapper[3552]: I0320 16:01:17.222099 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb"] Mar 20 16:01:17 crc kubenswrapper[3552]: W0320 16:01:17.231075 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4da7fc22_4c28_40c6_93f3_6b7e09f855be.slice/crio-9a2c4fc80f676ea4619d51e7cd555fd70f0cb12d80e4cbf835248f7dc017d7bd WatchSource:0}: Error finding container 9a2c4fc80f676ea4619d51e7cd555fd70f0cb12d80e4cbf835248f7dc017d7bd: Status 404 returned error can't find the container with id 9a2c4fc80f676ea4619d51e7cd555fd70f0cb12d80e4cbf835248f7dc017d7bd Mar 20 16:01:17 crc kubenswrapper[3552]: I0320 16:01:17.276212 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" event={"ID":"4da7fc22-4c28-40c6-93f3-6b7e09f855be","Type":"ContainerStarted","Data":"9a2c4fc80f676ea4619d51e7cd555fd70f0cb12d80e4cbf835248f7dc017d7bd"} Mar 20 16:01:17 crc kubenswrapper[3552]: I0320 16:01:17.445916 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="773e318d-c001-447d-99e0-c351b3782d0d" path="/var/lib/kubelet/pods/773e318d-c001-447d-99e0-c351b3782d0d/volumes" Mar 20 16:01:19 crc kubenswrapper[3552]: I0320 16:01:19.295202 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" event={"ID":"4da7fc22-4c28-40c6-93f3-6b7e09f855be","Type":"ContainerStarted","Data":"5b1b815044e9663434bffeb1bfef87e5830157bd2833017fda1b80694cf4f557"} Mar 20 16:01:19 crc kubenswrapper[3552]: I0320 16:01:19.313690 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" podStartSLOduration=2.601425203 podStartE2EDuration="3.313633814s" podCreationTimestamp="2026-03-20 16:01:16 +0000 UTC" firstStartedPulling="2026-03-20 16:01:17.233543543 +0000 UTC m=+2176.927240373" lastFinishedPulling="2026-03-20 16:01:17.945752144 +0000 UTC m=+2177.639448984" observedRunningTime="2026-03-20 16:01:19.308739754 +0000 UTC m=+2179.002436604" watchObservedRunningTime="2026-03-20 16:01:19.313633814 +0000 UTC m=+2179.007330644" Mar 20 16:01:25 crc kubenswrapper[3552]: I0320 16:01:25.423507 3552 scope.go:117] "RemoveContainer" containerID="d5ac1d7793cb31283017aa5481d427a38fb2bfb427499424d5226c984cd5cec7" Mar 20 16:01:25 crc kubenswrapper[3552]: I0320 16:01:25.532756 3552 scope.go:117] "RemoveContainer" containerID="7fafa5b51087234d8d0a61eb64e29053ab8afcc0fb946e770ce31f800b8323ca" Mar 20 16:01:25 crc kubenswrapper[3552]: I0320 16:01:25.602028 3552 scope.go:117] "RemoveContainer" containerID="23e82a487853058a5b68c6d0737232c3cb0ff11505f6c0e3255ae6020f901a15" Mar 20 16:01:25 crc kubenswrapper[3552]: I0320 16:01:25.673427 3552 scope.go:117] "RemoveContainer" containerID="2e70252a8d7ea4b7a2db3ebb595c9247b1c9f888c76a425f7db4ddbc6a98f888" Mar 20 16:01:25 crc kubenswrapper[3552]: I0320 16:01:25.783932 3552 scope.go:117] "RemoveContainer" containerID="80e049e27f64b60997e81ae27313558fd5a297164ae94fde489aed360e16ddca" Mar 20 16:01:25 crc kubenswrapper[3552]: I0320 16:01:25.981830 3552 scope.go:117] "RemoveContainer" containerID="1c2c7fe7711cd14faa9b682e958d7cdefdedad08f1a5328f09d3fc4f6c168c75" Mar 20 16:01:26 crc kubenswrapper[3552]: I0320 16:01:26.016213 3552 scope.go:117] "RemoveContainer" containerID="5fd3c9312436411d0fb3bc40ac6b3a98ea6249c887ec93793c8f307614f03379" Mar 20 16:01:26 crc kubenswrapper[3552]: I0320 16:01:26.095665 3552 scope.go:117] "RemoveContainer" containerID="4a530425b07693de2e4e0194ace026aefd776a1eb85e6c3679f86da236480140" Mar 20 16:01:27 crc kubenswrapper[3552]: I0320 16:01:27.059840 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-kk9lv"] Mar 20 16:01:27 crc kubenswrapper[3552]: I0320 16:01:27.073122 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-kk9lv"] Mar 20 16:01:27 crc kubenswrapper[3552]: I0320 16:01:27.440869 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8cee03b-a095-46b4-aa33-ebb7498fdc43" path="/var/lib/kubelet/pods/e8cee03b-a095-46b4-aa33-ebb7498fdc43/volumes" Mar 20 16:01:42 crc kubenswrapper[3552]: I0320 16:01:42.778692 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:01:42 crc kubenswrapper[3552]: I0320 16:01:42.779265 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:01:42 crc kubenswrapper[3552]: I0320 16:01:42.779297 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:01:42 crc kubenswrapper[3552]: I0320 16:01:42.780043 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f0b09ae54bea2ff083e11bf57e9ad0b3e8c74d921c18b9a3d158e51bbcd5f477"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:01:42 crc kubenswrapper[3552]: I0320 16:01:42.780205 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://f0b09ae54bea2ff083e11bf57e9ad0b3e8c74d921c18b9a3d158e51bbcd5f477" gracePeriod=600 Mar 20 16:01:43 crc kubenswrapper[3552]: I0320 16:01:43.528133 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="f0b09ae54bea2ff083e11bf57e9ad0b3e8c74d921c18b9a3d158e51bbcd5f477" exitCode=0 Mar 20 16:01:43 crc kubenswrapper[3552]: I0320 16:01:43.528206 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"f0b09ae54bea2ff083e11bf57e9ad0b3e8c74d921c18b9a3d158e51bbcd5f477"} Mar 20 16:01:43 crc kubenswrapper[3552]: I0320 16:01:43.528742 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617"} Mar 20 16:01:43 crc kubenswrapper[3552]: I0320 16:01:43.528760 3552 scope.go:117] "RemoveContainer" containerID="f7ec4088524f32bdc92c90c68b69911accdbe422a7c208c2046560df7e208b2b" Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.827552 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dtqsf"] Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.828222 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" podNamespace="openshift-marketplace" podName="redhat-marketplace-dtqsf" Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.829966 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.839115 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtqsf"] Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.989704 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-catalog-content\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.989902 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-utilities\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:56 crc kubenswrapper[3552]: I0320 16:01:56.990006 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gq59\" (UniqueName: \"kubernetes.io/projected/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-kube-api-access-2gq59\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.091147 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-utilities\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.091237 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-2gq59\" (UniqueName: \"kubernetes.io/projected/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-kube-api-access-2gq59\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.091278 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-catalog-content\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.188254 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-catalog-content\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.188265 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-utilities\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.253528 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gq59\" (UniqueName: \"kubernetes.io/projected/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-kube-api-access-2gq59\") pod \"redhat-marketplace-dtqsf\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:01:57 crc kubenswrapper[3552]: I0320 16:01:57.515230 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:00 crc kubenswrapper[3552]: I0320 16:02:00.441369 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtqsf"] Mar 20 16:02:00 crc kubenswrapper[3552]: W0320 16:02:00.530177 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fba62f6_9636_405e_bbaa_7b5784ad0a1c.slice/crio-bcde8f50d8fda6882152f30253e825d46981334b5bfaf9f39bce04557b6859ff WatchSource:0}: Error finding container bcde8f50d8fda6882152f30253e825d46981334b5bfaf9f39bce04557b6859ff: Status 404 returned error can't find the container with id bcde8f50d8fda6882152f30253e825d46981334b5bfaf9f39bce04557b6859ff Mar 20 16:02:00 crc kubenswrapper[3552]: I0320 16:02:00.662427 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerStarted","Data":"bcde8f50d8fda6882152f30253e825d46981334b5bfaf9f39bce04557b6859ff"} Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.342045 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.342377 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.342422 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.342445 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.342465 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.673108 3552 generic.go:334] "Generic (PLEG): container finished" podID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerID="0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e" exitCode=0 Mar 20 16:02:01 crc kubenswrapper[3552]: I0320 16:02:01.674226 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerDied","Data":"0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e"} Mar 20 16:02:02 crc kubenswrapper[3552]: I0320 16:02:02.096747 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-6zdxq"] Mar 20 16:02:02 crc kubenswrapper[3552]: I0320 16:02:02.108924 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-6zdxq"] Mar 20 16:02:02 crc kubenswrapper[3552]: I0320 16:02:02.683003 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerStarted","Data":"4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397"} Mar 20 16:02:03 crc kubenswrapper[3552]: I0320 16:02:03.471252 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88c7817a-b4f0-4685-a348-8cf8a662632b" path="/var/lib/kubelet/pods/88c7817a-b4f0-4685-a348-8cf8a662632b/volumes" Mar 20 16:02:10 crc kubenswrapper[3552]: I0320 16:02:10.753192 3552 generic.go:334] "Generic (PLEG): container finished" podID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerID="5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40" exitCode=0 Mar 20 16:02:10 crc kubenswrapper[3552]: I0320 16:02:10.753217 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerDied","Data":"5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40"} Mar 20 16:02:11 crc kubenswrapper[3552]: I0320 16:02:11.762648 3552 generic.go:334] "Generic (PLEG): container finished" podID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerID="4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397" exitCode=0 Mar 20 16:02:11 crc kubenswrapper[3552]: I0320 16:02:11.762934 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerDied","Data":"4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397"} Mar 20 16:02:12 crc kubenswrapper[3552]: I0320 16:02:12.776536 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerStarted","Data":"8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1"} Mar 20 16:02:12 crc kubenswrapper[3552]: I0320 16:02:12.803855 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c2dxq" podStartSLOduration=3.908824757 podStartE2EDuration="1m0.803800441s" podCreationTimestamp="2026-03-20 16:01:12 +0000 UTC" firstStartedPulling="2026-03-20 16:01:14.225293546 +0000 UTC m=+2173.918990386" lastFinishedPulling="2026-03-20 16:02:11.12026924 +0000 UTC m=+2230.813966070" observedRunningTime="2026-03-20 16:02:12.79997898 +0000 UTC m=+2232.493675840" watchObservedRunningTime="2026-03-20 16:02:12.803800441 +0000 UTC m=+2232.497497271" Mar 20 16:02:13 crc kubenswrapper[3552]: I0320 16:02:13.154710 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:02:13 crc kubenswrapper[3552]: I0320 16:02:13.155055 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:02:13 crc kubenswrapper[3552]: I0320 16:02:13.785691 3552 generic.go:334] "Generic (PLEG): container finished" podID="4da7fc22-4c28-40c6-93f3-6b7e09f855be" containerID="5b1b815044e9663434bffeb1bfef87e5830157bd2833017fda1b80694cf4f557" exitCode=0 Mar 20 16:02:13 crc kubenswrapper[3552]: I0320 16:02:13.785758 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" event={"ID":"4da7fc22-4c28-40c6-93f3-6b7e09f855be","Type":"ContainerDied","Data":"5b1b815044e9663434bffeb1bfef87e5830157bd2833017fda1b80694cf4f557"} Mar 20 16:02:13 crc kubenswrapper[3552]: I0320 16:02:13.787799 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerStarted","Data":"92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896"} Mar 20 16:02:13 crc kubenswrapper[3552]: I0320 16:02:13.822481 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dtqsf" podStartSLOduration=7.391692967 podStartE2EDuration="17.822426789s" podCreationTimestamp="2026-03-20 16:01:56 +0000 UTC" firstStartedPulling="2026-03-20 16:02:01.675338223 +0000 UTC m=+2221.369035053" lastFinishedPulling="2026-03-20 16:02:12.106072045 +0000 UTC m=+2231.799768875" observedRunningTime="2026-03-20 16:02:13.819112761 +0000 UTC m=+2233.512809601" watchObservedRunningTime="2026-03-20 16:02:13.822426789 +0000 UTC m=+2233.516123629" Mar 20 16:02:14 crc kubenswrapper[3552]: I0320 16:02:14.242477 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2dxq" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" probeResult="failure" output=< Mar 20 16:02:14 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:02:14 crc kubenswrapper[3552]: > Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.655385 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.760923 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-ssh-key-openstack-edpm-ipam\") pod \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.760974 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zw2v\" (UniqueName: \"kubernetes.io/projected/4da7fc22-4c28-40c6-93f3-6b7e09f855be-kube-api-access-2zw2v\") pod \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.761086 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-inventory\") pod \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\" (UID: \"4da7fc22-4c28-40c6-93f3-6b7e09f855be\") " Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.784653 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4da7fc22-4c28-40c6-93f3-6b7e09f855be-kube-api-access-2zw2v" (OuterVolumeSpecName: "kube-api-access-2zw2v") pod "4da7fc22-4c28-40c6-93f3-6b7e09f855be" (UID: "4da7fc22-4c28-40c6-93f3-6b7e09f855be"). InnerVolumeSpecName "kube-api-access-2zw2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.804746 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4da7fc22-4c28-40c6-93f3-6b7e09f855be" (UID: "4da7fc22-4c28-40c6-93f3-6b7e09f855be"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.805204 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" event={"ID":"4da7fc22-4c28-40c6-93f3-6b7e09f855be","Type":"ContainerDied","Data":"9a2c4fc80f676ea4619d51e7cd555fd70f0cb12d80e4cbf835248f7dc017d7bd"} Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.805241 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a2c4fc80f676ea4619d51e7cd555fd70f0cb12d80e4cbf835248f7dc017d7bd" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.805300 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.811125 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-inventory" (OuterVolumeSpecName: "inventory") pod "4da7fc22-4c28-40c6-93f3-6b7e09f855be" (UID: "4da7fc22-4c28-40c6-93f3-6b7e09f855be"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.863437 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.863702 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2zw2v\" (UniqueName: \"kubernetes.io/projected/4da7fc22-4c28-40c6-93f3-6b7e09f855be-kube-api-access-2zw2v\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.863779 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4da7fc22-4c28-40c6-93f3-6b7e09f855be-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.917759 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-dmpxq"] Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.917923 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c0a9da87-25f8-42b8-94cd-9c25f87989d3" podNamespace="openstack" podName="ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:15 crc kubenswrapper[3552]: E0320 16:02:15.918147 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4da7fc22-4c28-40c6-93f3-6b7e09f855be" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.918158 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4da7fc22-4c28-40c6-93f3-6b7e09f855be" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.918378 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4da7fc22-4c28-40c6-93f3-6b7e09f855be" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.919003 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.939187 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-dmpxq"] Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.965225 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.965299 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj2s7\" (UniqueName: \"kubernetes.io/projected/c0a9da87-25f8-42b8-94cd-9c25f87989d3-kube-api-access-nj2s7\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:15 crc kubenswrapper[3552]: I0320 16:02:15.965335 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.066934 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.066991 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-nj2s7\" (UniqueName: \"kubernetes.io/projected/c0a9da87-25f8-42b8-94cd-9c25f87989d3-kube-api-access-nj2s7\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.067015 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.071244 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.072102 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.086270 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj2s7\" (UniqueName: \"kubernetes.io/projected/c0a9da87-25f8-42b8-94cd-9c25f87989d3-kube-api-access-nj2s7\") pod \"ssh-known-hosts-edpm-deployment-dmpxq\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.235123 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:16 crc kubenswrapper[3552]: I0320 16:02:16.868104 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-dmpxq"] Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.515970 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.516358 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.617046 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.821974 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" event={"ID":"c0a9da87-25f8-42b8-94cd-9c25f87989d3","Type":"ContainerStarted","Data":"66274f8a77e33cd3e7d513e99ffd66d107cf285ad1c02c54594f1902e2e68a3a"} Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.822018 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" event={"ID":"c0a9da87-25f8-42b8-94cd-9c25f87989d3","Type":"ContainerStarted","Data":"ca001c7369b049efe3d639392c0619aa901e375e41e7025e1bc81c576cf7ee39"} Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.917006 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:17 crc kubenswrapper[3552]: I0320 16:02:17.956736 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtqsf"] Mar 20 16:02:18 crc kubenswrapper[3552]: I0320 16:02:18.860584 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" podStartSLOduration=3.460989975 podStartE2EDuration="3.860500023s" podCreationTimestamp="2026-03-20 16:02:15 +0000 UTC" firstStartedPulling="2026-03-20 16:02:16.864230216 +0000 UTC m=+2236.557927046" lastFinishedPulling="2026-03-20 16:02:17.263740264 +0000 UTC m=+2236.957437094" observedRunningTime="2026-03-20 16:02:18.851066842 +0000 UTC m=+2238.544763672" watchObservedRunningTime="2026-03-20 16:02:18.860500023 +0000 UTC m=+2238.554196853" Mar 20 16:02:19 crc kubenswrapper[3552]: I0320 16:02:19.842118 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dtqsf" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="registry-server" containerID="cri-o://92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896" gracePeriod=2 Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.284955 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.448423 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gq59\" (UniqueName: \"kubernetes.io/projected/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-kube-api-access-2gq59\") pod \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.448549 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-catalog-content\") pod \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.448605 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-utilities\") pod \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\" (UID: \"6fba62f6-9636-405e-bbaa-7b5784ad0a1c\") " Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.449274 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-utilities" (OuterVolumeSpecName: "utilities") pod "6fba62f6-9636-405e-bbaa-7b5784ad0a1c" (UID: "6fba62f6-9636-405e-bbaa-7b5784ad0a1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.454563 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-kube-api-access-2gq59" (OuterVolumeSpecName: "kube-api-access-2gq59") pod "6fba62f6-9636-405e-bbaa-7b5784ad0a1c" (UID: "6fba62f6-9636-405e-bbaa-7b5784ad0a1c"). InnerVolumeSpecName "kube-api-access-2gq59". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.551303 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-2gq59\" (UniqueName: \"kubernetes.io/projected/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-kube-api-access-2gq59\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.551343 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.590743 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fba62f6-9636-405e-bbaa-7b5784ad0a1c" (UID: "6fba62f6-9636-405e-bbaa-7b5784ad0a1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.653487 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fba62f6-9636-405e-bbaa-7b5784ad0a1c-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.853222 3552 generic.go:334] "Generic (PLEG): container finished" podID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerID="92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896" exitCode=0 Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.853269 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerDied","Data":"92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896"} Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.853679 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtqsf" event={"ID":"6fba62f6-9636-405e-bbaa-7b5784ad0a1c","Type":"ContainerDied","Data":"bcde8f50d8fda6882152f30253e825d46981334b5bfaf9f39bce04557b6859ff"} Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.853317 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtqsf" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.853703 3552 scope.go:117] "RemoveContainer" containerID="92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896" Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.927488 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtqsf"] Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.942014 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtqsf"] Mar 20 16:02:20 crc kubenswrapper[3552]: I0320 16:02:20.966742 3552 scope.go:117] "RemoveContainer" containerID="4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.038764 3552 scope.go:117] "RemoveContainer" containerID="0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.094033 3552 scope.go:117] "RemoveContainer" containerID="92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896" Mar 20 16:02:21 crc kubenswrapper[3552]: E0320 16:02:21.095818 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896\": container with ID starting with 92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896 not found: ID does not exist" containerID="92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.095874 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896"} err="failed to get container status \"92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896\": rpc error: code = NotFound desc = could not find container \"92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896\": container with ID starting with 92b1bcf6d72e9c7b69ef93845d8c9c1364fe5797b61e0d013b83259598904896 not found: ID does not exist" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.095890 3552 scope.go:117] "RemoveContainer" containerID="4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397" Mar 20 16:02:21 crc kubenswrapper[3552]: E0320 16:02:21.096366 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397\": container with ID starting with 4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397 not found: ID does not exist" containerID="4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.096435 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397"} err="failed to get container status \"4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397\": rpc error: code = NotFound desc = could not find container \"4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397\": container with ID starting with 4c433b056bfbef46f4955a9f96642034f0d1b10695e1c96dcb2caae84fc4c397 not found: ID does not exist" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.096457 3552 scope.go:117] "RemoveContainer" containerID="0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e" Mar 20 16:02:21 crc kubenswrapper[3552]: E0320 16:02:21.096915 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e\": container with ID starting with 0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e not found: ID does not exist" containerID="0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.096941 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e"} err="failed to get container status \"0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e\": rpc error: code = NotFound desc = could not find container \"0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e\": container with ID starting with 0c10fa090a67f0972dd4d93a5882f4daf57759e2aed43298feab3b3ac5aba10e not found: ID does not exist" Mar 20 16:02:21 crc kubenswrapper[3552]: I0320 16:02:21.440016 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" path="/var/lib/kubelet/pods/6fba62f6-9636-405e-bbaa-7b5784ad0a1c/volumes" Mar 20 16:02:24 crc kubenswrapper[3552]: I0320 16:02:24.272763 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2dxq" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" probeResult="failure" output=< Mar 20 16:02:24 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:02:24 crc kubenswrapper[3552]: > Mar 20 16:02:25 crc kubenswrapper[3552]: I0320 16:02:25.891532 3552 generic.go:334] "Generic (PLEG): container finished" podID="c0a9da87-25f8-42b8-94cd-9c25f87989d3" containerID="66274f8a77e33cd3e7d513e99ffd66d107cf285ad1c02c54594f1902e2e68a3a" exitCode=0 Mar 20 16:02:25 crc kubenswrapper[3552]: I0320 16:02:25.891639 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" event={"ID":"c0a9da87-25f8-42b8-94cd-9c25f87989d3","Type":"ContainerDied","Data":"66274f8a77e33cd3e7d513e99ffd66d107cf285ad1c02c54594f1902e2e68a3a"} Mar 20 16:02:26 crc kubenswrapper[3552]: I0320 16:02:26.389441 3552 scope.go:117] "RemoveContainer" containerID="351b090c26880b314b89750cf13da50d0b8ae5e108482d11f8545dfbc54860c7" Mar 20 16:02:26 crc kubenswrapper[3552]: I0320 16:02:26.522751 3552 scope.go:117] "RemoveContainer" containerID="7bbc5d3815c901d18f171790d0ec33f438443169ed6818e6c189c2dd6da77fe2" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.367153 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.492318 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-inventory-0\") pod \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.492376 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-ssh-key-openstack-edpm-ipam\") pod \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.492503 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj2s7\" (UniqueName: \"kubernetes.io/projected/c0a9da87-25f8-42b8-94cd-9c25f87989d3-kube-api-access-nj2s7\") pod \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\" (UID: \"c0a9da87-25f8-42b8-94cd-9c25f87989d3\") " Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.500687 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0a9da87-25f8-42b8-94cd-9c25f87989d3-kube-api-access-nj2s7" (OuterVolumeSpecName: "kube-api-access-nj2s7") pod "c0a9da87-25f8-42b8-94cd-9c25f87989d3" (UID: "c0a9da87-25f8-42b8-94cd-9c25f87989d3"). InnerVolumeSpecName "kube-api-access-nj2s7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.523228 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "c0a9da87-25f8-42b8-94cd-9c25f87989d3" (UID: "c0a9da87-25f8-42b8-94cd-9c25f87989d3"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.525559 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c0a9da87-25f8-42b8-94cd-9c25f87989d3" (UID: "c0a9da87-25f8-42b8-94cd-9c25f87989d3"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.595068 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nj2s7\" (UniqueName: \"kubernetes.io/projected/c0a9da87-25f8-42b8-94cd-9c25f87989d3-kube-api-access-nj2s7\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.595120 3552 reconciler_common.go:300] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-inventory-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.595131 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c0a9da87-25f8-42b8-94cd-9c25f87989d3-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.908101 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" event={"ID":"c0a9da87-25f8-42b8-94cd-9c25f87989d3","Type":"ContainerDied","Data":"ca001c7369b049efe3d639392c0619aa901e375e41e7025e1bc81c576cf7ee39"} Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.908393 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca001c7369b049efe3d639392c0619aa901e375e41e7025e1bc81c576cf7ee39" Mar 20 16:02:27 crc kubenswrapper[3552]: I0320 16:02:27.908207 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-dmpxq" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.020894 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l"] Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021081 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b2f7ac87-e7ee-475a-8aea-91d2123e861d" podNamespace="openstack" podName="run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: E0320 16:02:28.021365 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="extract-utilities" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021381 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="extract-utilities" Mar 20 16:02:28 crc kubenswrapper[3552]: E0320 16:02:28.021395 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="registry-server" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021416 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="registry-server" Mar 20 16:02:28 crc kubenswrapper[3552]: E0320 16:02:28.021444 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c0a9da87-25f8-42b8-94cd-9c25f87989d3" containerName="ssh-known-hosts-edpm-deployment" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021451 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a9da87-25f8-42b8-94cd-9c25f87989d3" containerName="ssh-known-hosts-edpm-deployment" Mar 20 16:02:28 crc kubenswrapper[3552]: E0320 16:02:28.021469 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="extract-content" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021475 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="extract-content" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021683 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0a9da87-25f8-42b8-94cd-9c25f87989d3" containerName="ssh-known-hosts-edpm-deployment" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.021710 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fba62f6-9636-405e-bbaa-7b5784ad0a1c" containerName="registry-server" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.022349 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.024962 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.026630 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.027004 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.027780 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.031276 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l"] Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.205181 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.205316 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw97p\" (UniqueName: \"kubernetes.io/projected/b2f7ac87-e7ee-475a-8aea-91d2123e861d-kube-api-access-rw97p\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.205356 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.308084 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rw97p\" (UniqueName: \"kubernetes.io/projected/b2f7ac87-e7ee-475a-8aea-91d2123e861d-kube-api-access-rw97p\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.308143 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.308270 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.318230 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.318241 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.329955 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw97p\" (UniqueName: \"kubernetes.io/projected/b2f7ac87-e7ee-475a-8aea-91d2123e861d-kube-api-access-rw97p\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dt75l\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.350352 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:28 crc kubenswrapper[3552]: I0320 16:02:28.930268 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l"] Mar 20 16:02:29 crc kubenswrapper[3552]: I0320 16:02:29.927735 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" event={"ID":"b2f7ac87-e7ee-475a-8aea-91d2123e861d","Type":"ContainerStarted","Data":"21b36f53814bf559bfa6dd6f36381013ab5b09b787cec3fa89840851f5fa683c"} Mar 20 16:02:29 crc kubenswrapper[3552]: I0320 16:02:29.928334 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" event={"ID":"b2f7ac87-e7ee-475a-8aea-91d2123e861d","Type":"ContainerStarted","Data":"55179e08d78c3a432b26ede4318d2512363a51de5a5fbb024f16919b148b8988"} Mar 20 16:02:29 crc kubenswrapper[3552]: I0320 16:02:29.953705 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" podStartSLOduration=2.555320225 podStartE2EDuration="2.953660593s" podCreationTimestamp="2026-03-20 16:02:27 +0000 UTC" firstStartedPulling="2026-03-20 16:02:28.944273452 +0000 UTC m=+2248.637970292" lastFinishedPulling="2026-03-20 16:02:29.34261383 +0000 UTC m=+2249.036310660" observedRunningTime="2026-03-20 16:02:29.947377807 +0000 UTC m=+2249.641074657" watchObservedRunningTime="2026-03-20 16:02:29.953660593 +0000 UTC m=+2249.647357423" Mar 20 16:02:34 crc kubenswrapper[3552]: I0320 16:02:34.242772 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2dxq" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" probeResult="failure" output=< Mar 20 16:02:34 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:02:34 crc kubenswrapper[3552]: > Mar 20 16:02:38 crc kubenswrapper[3552]: I0320 16:02:38.995623 3552 generic.go:334] "Generic (PLEG): container finished" podID="b2f7ac87-e7ee-475a-8aea-91d2123e861d" containerID="21b36f53814bf559bfa6dd6f36381013ab5b09b787cec3fa89840851f5fa683c" exitCode=0 Mar 20 16:02:38 crc kubenswrapper[3552]: I0320 16:02:38.995842 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" event={"ID":"b2f7ac87-e7ee-475a-8aea-91d2123e861d","Type":"ContainerDied","Data":"21b36f53814bf559bfa6dd6f36381013ab5b09b787cec3fa89840851f5fa683c"} Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.545311 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.662971 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-ssh-key-openstack-edpm-ipam\") pod \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.663062 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-inventory\") pod \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.663097 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw97p\" (UniqueName: \"kubernetes.io/projected/b2f7ac87-e7ee-475a-8aea-91d2123e861d-kube-api-access-rw97p\") pod \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\" (UID: \"b2f7ac87-e7ee-475a-8aea-91d2123e861d\") " Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.668724 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2f7ac87-e7ee-475a-8aea-91d2123e861d-kube-api-access-rw97p" (OuterVolumeSpecName: "kube-api-access-rw97p") pod "b2f7ac87-e7ee-475a-8aea-91d2123e861d" (UID: "b2f7ac87-e7ee-475a-8aea-91d2123e861d"). InnerVolumeSpecName "kube-api-access-rw97p". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.693512 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b2f7ac87-e7ee-475a-8aea-91d2123e861d" (UID: "b2f7ac87-e7ee-475a-8aea-91d2123e861d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.694517 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-inventory" (OuterVolumeSpecName: "inventory") pod "b2f7ac87-e7ee-475a-8aea-91d2123e861d" (UID: "b2f7ac87-e7ee-475a-8aea-91d2123e861d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.765299 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.765334 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rw97p\" (UniqueName: \"kubernetes.io/projected/b2f7ac87-e7ee-475a-8aea-91d2123e861d-kube-api-access-rw97p\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:40 crc kubenswrapper[3552]: I0320 16:02:40.765347 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b2f7ac87-e7ee-475a-8aea-91d2123e861d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.014116 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" event={"ID":"b2f7ac87-e7ee-475a-8aea-91d2123e861d","Type":"ContainerDied","Data":"55179e08d78c3a432b26ede4318d2512363a51de5a5fbb024f16919b148b8988"} Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.014153 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55179e08d78c3a432b26ede4318d2512363a51de5a5fbb024f16919b148b8988" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.014156 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dt75l" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.110947 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f"] Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.111114 3552 topology_manager.go:215] "Topology Admit Handler" podUID="0a5c936b-91f0-4c32-9f10-f232a07072f0" podNamespace="openstack" podName="reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: E0320 16:02:41.111357 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b2f7ac87-e7ee-475a-8aea-91d2123e861d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.111375 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f7ac87-e7ee-475a-8aea-91d2123e861d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.111598 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f7ac87-e7ee-475a-8aea-91d2123e861d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.112202 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.115395 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.115989 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.116185 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.116243 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.135118 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f"] Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.274897 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.274969 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.275098 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcnlp\" (UniqueName: \"kubernetes.io/projected/0a5c936b-91f0-4c32-9f10-f232a07072f0-kube-api-access-kcnlp\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.376863 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.376938 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.377046 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-kcnlp\" (UniqueName: \"kubernetes.io/projected/0a5c936b-91f0-4c32-9f10-f232a07072f0-kube-api-access-kcnlp\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.382261 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.406059 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.410002 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcnlp\" (UniqueName: \"kubernetes.io/projected/0a5c936b-91f0-4c32-9f10-f232a07072f0-kube-api-access-kcnlp\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:41 crc kubenswrapper[3552]: I0320 16:02:41.426612 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:42 crc kubenswrapper[3552]: I0320 16:02:42.077108 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f"] Mar 20 16:02:42 crc kubenswrapper[3552]: I0320 16:02:42.085205 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:02:43 crc kubenswrapper[3552]: I0320 16:02:43.036067 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" event={"ID":"0a5c936b-91f0-4c32-9f10-f232a07072f0","Type":"ContainerStarted","Data":"0ac4b62617063c35d9c2d01a0bad266eccc9aceb24b49cc8c97eb6c62d8ddc8f"} Mar 20 16:02:43 crc kubenswrapper[3552]: I0320 16:02:43.036103 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" event={"ID":"0a5c936b-91f0-4c32-9f10-f232a07072f0","Type":"ContainerStarted","Data":"f1306b3a733c014d7f28b31d25c6a69ee14a54e7234e183be25af0be4516ec45"} Mar 20 16:02:43 crc kubenswrapper[3552]: I0320 16:02:43.263725 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:02:43 crc kubenswrapper[3552]: I0320 16:02:43.284286 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" podStartSLOduration=1.915992305 podStartE2EDuration="2.284238693s" podCreationTimestamp="2026-03-20 16:02:41 +0000 UTC" firstStartedPulling="2026-03-20 16:02:42.081710732 +0000 UTC m=+2261.775407562" lastFinishedPulling="2026-03-20 16:02:42.44995712 +0000 UTC m=+2262.143653950" observedRunningTime="2026-03-20 16:02:43.062858635 +0000 UTC m=+2262.756555465" watchObservedRunningTime="2026-03-20 16:02:43.284238693 +0000 UTC m=+2262.977935523" Mar 20 16:02:43 crc kubenswrapper[3552]: I0320 16:02:43.356959 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:02:43 crc kubenswrapper[3552]: I0320 16:02:43.398394 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c2dxq"] Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.049484 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c2dxq" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" containerID="cri-o://8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1" gracePeriod=2 Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.548811 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.671585 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-catalog-content\") pod \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.671781 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkbpr\" (UniqueName: \"kubernetes.io/projected/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-kube-api-access-wkbpr\") pod \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.671871 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-utilities\") pod \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\" (UID: \"a92ff56a-5707-4b3e-9c15-29d30c9d5baa\") " Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.672634 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-utilities" (OuterVolumeSpecName: "utilities") pod "a92ff56a-5707-4b3e-9c15-29d30c9d5baa" (UID: "a92ff56a-5707-4b3e-9c15-29d30c9d5baa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.677929 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-kube-api-access-wkbpr" (OuterVolumeSpecName: "kube-api-access-wkbpr") pod "a92ff56a-5707-4b3e-9c15-29d30c9d5baa" (UID: "a92ff56a-5707-4b3e-9c15-29d30c9d5baa"). InnerVolumeSpecName "kube-api-access-wkbpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.774107 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wkbpr\" (UniqueName: \"kubernetes.io/projected/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-kube-api-access-wkbpr\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:45 crc kubenswrapper[3552]: I0320 16:02:45.774165 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.058171 3552 generic.go:334] "Generic (PLEG): container finished" podID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerID="8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1" exitCode=0 Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.058216 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerDied","Data":"8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1"} Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.058237 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2dxq" event={"ID":"a92ff56a-5707-4b3e-9c15-29d30c9d5baa","Type":"ContainerDied","Data":"854b20cb886785ca0e72cf36834451b90053039572018a0a3a3fb726addad370"} Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.058254 3552 scope.go:117] "RemoveContainer" containerID="8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.058363 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2dxq" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.152766 3552 scope.go:117] "RemoveContainer" containerID="5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.223690 3552 scope.go:117] "RemoveContainer" containerID="9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.260369 3552 scope.go:117] "RemoveContainer" containerID="8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1" Mar 20 16:02:46 crc kubenswrapper[3552]: E0320 16:02:46.261070 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1\": container with ID starting with 8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1 not found: ID does not exist" containerID="8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.261115 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1"} err="failed to get container status \"8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1\": rpc error: code = NotFound desc = could not find container \"8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1\": container with ID starting with 8ecf97d0ae59b8a6766975fc48378adca73e1d1be2059b921267f6a086646dc1 not found: ID does not exist" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.261126 3552 scope.go:117] "RemoveContainer" containerID="5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40" Mar 20 16:02:46 crc kubenswrapper[3552]: E0320 16:02:46.261378 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40\": container with ID starting with 5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40 not found: ID does not exist" containerID="5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.261418 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40"} err="failed to get container status \"5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40\": rpc error: code = NotFound desc = could not find container \"5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40\": container with ID starting with 5c4d5189d597547e2324c2ac1361165e2daef3c44d3d1a6ba29a0bee0d9acf40 not found: ID does not exist" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.261430 3552 scope.go:117] "RemoveContainer" containerID="9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a" Mar 20 16:02:46 crc kubenswrapper[3552]: E0320 16:02:46.261657 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a\": container with ID starting with 9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a not found: ID does not exist" containerID="9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.261684 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a"} err="failed to get container status \"9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a\": rpc error: code = NotFound desc = could not find container \"9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a\": container with ID starting with 9b5681286e372391199399fe54093dbd40cc67f005f1fd2f4748d9eecb662d6a not found: ID does not exist" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.643943 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a92ff56a-5707-4b3e-9c15-29d30c9d5baa" (UID: "a92ff56a-5707-4b3e-9c15-29d30c9d5baa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.691926 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c2dxq"] Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.704613 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ff56a-5707-4b3e-9c15-29d30c9d5baa-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:46 crc kubenswrapper[3552]: I0320 16:02:46.705805 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c2dxq"] Mar 20 16:02:47 crc kubenswrapper[3552]: I0320 16:02:47.444580 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" path="/var/lib/kubelet/pods/a92ff56a-5707-4b3e-9c15-29d30c9d5baa/volumes" Mar 20 16:02:53 crc kubenswrapper[3552]: I0320 16:02:53.109966 3552 generic.go:334] "Generic (PLEG): container finished" podID="0a5c936b-91f0-4c32-9f10-f232a07072f0" containerID="0ac4b62617063c35d9c2d01a0bad266eccc9aceb24b49cc8c97eb6c62d8ddc8f" exitCode=0 Mar 20 16:02:53 crc kubenswrapper[3552]: I0320 16:02:53.110154 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" event={"ID":"0a5c936b-91f0-4c32-9f10-f232a07072f0","Type":"ContainerDied","Data":"0ac4b62617063c35d9c2d01a0bad266eccc9aceb24b49cc8c97eb6c62d8ddc8f"} Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.519814 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.658323 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcnlp\" (UniqueName: \"kubernetes.io/projected/0a5c936b-91f0-4c32-9f10-f232a07072f0-kube-api-access-kcnlp\") pod \"0a5c936b-91f0-4c32-9f10-f232a07072f0\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.658520 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-ssh-key-openstack-edpm-ipam\") pod \"0a5c936b-91f0-4c32-9f10-f232a07072f0\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.659330 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-inventory\") pod \"0a5c936b-91f0-4c32-9f10-f232a07072f0\" (UID: \"0a5c936b-91f0-4c32-9f10-f232a07072f0\") " Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.666275 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a5c936b-91f0-4c32-9f10-f232a07072f0-kube-api-access-kcnlp" (OuterVolumeSpecName: "kube-api-access-kcnlp") pod "0a5c936b-91f0-4c32-9f10-f232a07072f0" (UID: "0a5c936b-91f0-4c32-9f10-f232a07072f0"). InnerVolumeSpecName "kube-api-access-kcnlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.684450 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0a5c936b-91f0-4c32-9f10-f232a07072f0" (UID: "0a5c936b-91f0-4c32-9f10-f232a07072f0"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.686220 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-inventory" (OuterVolumeSpecName: "inventory") pod "0a5c936b-91f0-4c32-9f10-f232a07072f0" (UID: "0a5c936b-91f0-4c32-9f10-f232a07072f0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.761993 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-kcnlp\" (UniqueName: \"kubernetes.io/projected/0a5c936b-91f0-4c32-9f10-f232a07072f0-kube-api-access-kcnlp\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.762046 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:54 crc kubenswrapper[3552]: I0320 16:02:54.762062 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a5c936b-91f0-4c32-9f10-f232a07072f0-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.124521 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" event={"ID":"0a5c936b-91f0-4c32-9f10-f232a07072f0","Type":"ContainerDied","Data":"f1306b3a733c014d7f28b31d25c6a69ee14a54e7234e183be25af0be4516ec45"} Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.124794 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1306b3a733c014d7f28b31d25c6a69ee14a54e7234e183be25af0be4516ec45" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.124571 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.271984 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm"] Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272154 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7b22c393-016f-45a8-b806-d10bb8dc57fe" podNamespace="openstack" podName="install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: E0320 16:02:55.272419 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="extract-utilities" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272436 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="extract-utilities" Mar 20 16:02:55 crc kubenswrapper[3552]: E0320 16:02:55.272447 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="0a5c936b-91f0-4c32-9f10-f232a07072f0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272455 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5c936b-91f0-4c32-9f10-f232a07072f0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:55 crc kubenswrapper[3552]: E0320 16:02:55.272467 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272474 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" Mar 20 16:02:55 crc kubenswrapper[3552]: E0320 16:02:55.272484 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="extract-content" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272493 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="extract-content" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272708 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a5c936b-91f0-4c32-9f10-f232a07072f0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.272732 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="a92ff56a-5707-4b3e-9c15-29d30c9d5baa" containerName="registry-server" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.273478 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.279863 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.279987 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.280030 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.280030 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.280584 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.280633 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.280760 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.288535 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.290595 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm"] Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373606 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373674 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373725 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373755 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373796 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373845 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373904 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373942 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.373991 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.374047 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.374086 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.374127 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.374157 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.374193 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2b2h\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-kube-api-access-v2b2h\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475665 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475738 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475783 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475814 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475853 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-v2b2h\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-kube-api-access-v2b2h\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475903 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475936 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.475980 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.476031 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.476077 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.476131 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.476194 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.476227 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.476265 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.482931 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.483653 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.483997 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.484652 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.486378 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.486545 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.486569 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.486809 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.487796 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.488934 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.489523 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.489657 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.490243 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.501467 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2b2h\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-kube-api-access-v2b2h\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:55 crc kubenswrapper[3552]: I0320 16:02:55.591299 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:02:56 crc kubenswrapper[3552]: I0320 16:02:56.222463 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm"] Mar 20 16:02:57 crc kubenswrapper[3552]: I0320 16:02:57.139250 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" event={"ID":"7b22c393-016f-45a8-b806-d10bb8dc57fe","Type":"ContainerStarted","Data":"0137ec84b20c04b1b4055b63f169bf2a55fe7d9a08e0abf1be59b047cde86adf"} Mar 20 16:02:57 crc kubenswrapper[3552]: I0320 16:02:57.139667 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" event={"ID":"7b22c393-016f-45a8-b806-d10bb8dc57fe","Type":"ContainerStarted","Data":"75ba50efd91cdfb6aff8c82d114bb667617c89f9e8d9aee9c9524cf1e3914e23"} Mar 20 16:02:57 crc kubenswrapper[3552]: I0320 16:02:57.175628 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" podStartSLOduration=1.865569432 podStartE2EDuration="2.175577297s" podCreationTimestamp="2026-03-20 16:02:55 +0000 UTC" firstStartedPulling="2026-03-20 16:02:56.229970889 +0000 UTC m=+2275.923667719" lastFinishedPulling="2026-03-20 16:02:56.539978754 +0000 UTC m=+2276.233675584" observedRunningTime="2026-03-20 16:02:57.167878943 +0000 UTC m=+2276.861575793" watchObservedRunningTime="2026-03-20 16:02:57.175577297 +0000 UTC m=+2276.869274127" Mar 20 16:03:01 crc kubenswrapper[3552]: I0320 16:03:01.344370 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:03:01 crc kubenswrapper[3552]: I0320 16:03:01.344942 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:03:01 crc kubenswrapper[3552]: I0320 16:03:01.344976 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:03:01 crc kubenswrapper[3552]: I0320 16:03:01.344998 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:03:01 crc kubenswrapper[3552]: I0320 16:03:01.345057 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:03:35 crc kubenswrapper[3552]: I0320 16:03:35.447806 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b22c393-016f-45a8-b806-d10bb8dc57fe" containerID="0137ec84b20c04b1b4055b63f169bf2a55fe7d9a08e0abf1be59b047cde86adf" exitCode=0 Mar 20 16:03:35 crc kubenswrapper[3552]: I0320 16:03:35.447937 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" event={"ID":"7b22c393-016f-45a8-b806-d10bb8dc57fe","Type":"ContainerDied","Data":"0137ec84b20c04b1b4055b63f169bf2a55fe7d9a08e0abf1be59b047cde86adf"} Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.893952 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962047 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-bootstrap-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962188 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2b2h\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-kube-api-access-v2b2h\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962238 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962270 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-neutron-metadata-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962338 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ovn-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962390 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962430 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-telemetry-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962459 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-repo-setup-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962497 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-nova-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962519 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962539 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ssh-key-openstack-edpm-ipam\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962582 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-ovn-default-certs-0\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962632 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-inventory\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.962654 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-libvirt-combined-ca-bundle\") pod \"7b22c393-016f-45a8-b806-d10bb8dc57fe\" (UID: \"7b22c393-016f-45a8-b806-d10bb8dc57fe\") " Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.969639 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.971066 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.972721 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.972885 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.973018 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.973632 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.975622 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.975667 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.979358 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.979679 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.983249 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-kube-api-access-v2b2h" (OuterVolumeSpecName: "kube-api-access-v2b2h") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "kube-api-access-v2b2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:03:36 crc kubenswrapper[3552]: I0320 16:03:36.987355 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.001579 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-inventory" (OuterVolumeSpecName: "inventory") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.006685 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7b22c393-016f-45a8-b806-d10bb8dc57fe" (UID: "7b22c393-016f-45a8-b806-d10bb8dc57fe"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064906 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-v2b2h\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-kube-api-access-v2b2h\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064935 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064950 3552 reconciler_common.go:300] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064961 3552 reconciler_common.go:300] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064975 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064988 3552 reconciler_common.go:300] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.064999 3552 reconciler_common.go:300] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065008 3552 reconciler_common.go:300] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065020 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065030 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065040 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7b22c393-016f-45a8-b806-d10bb8dc57fe-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065050 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065059 3552 reconciler_common.go:300] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.065070 3552 reconciler_common.go:300] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b22c393-016f-45a8-b806-d10bb8dc57fe-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.467324 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" event={"ID":"7b22c393-016f-45a8-b806-d10bb8dc57fe","Type":"ContainerDied","Data":"75ba50efd91cdfb6aff8c82d114bb667617c89f9e8d9aee9c9524cf1e3914e23"} Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.467506 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75ba50efd91cdfb6aff8c82d114bb667617c89f9e8d9aee9c9524cf1e3914e23" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.467635 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.593525 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6"] Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.593758 3552 topology_manager.go:215] "Topology Admit Handler" podUID="149bba52-4065-4344-845f-1fb933e5833c" podNamespace="openstack" podName="ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: E0320 16:03:37.594081 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b22c393-016f-45a8-b806-d10bb8dc57fe" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.594116 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b22c393-016f-45a8-b806-d10bb8dc57fe" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.594389 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b22c393-016f-45a8-b806-d10bb8dc57fe" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.595781 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.601695 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6"] Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.639962 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.640022 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.640182 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.640302 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.640632 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.686521 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.686808 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.686859 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.686931 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xhkt\" (UniqueName: \"kubernetes.io/projected/149bba52-4065-4344-845f-1fb933e5833c-kube-api-access-7xhkt\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.687086 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/149bba52-4065-4344-845f-1fb933e5833c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.788559 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/149bba52-4065-4344-845f-1fb933e5833c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.788624 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.788727 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.788749 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.788783 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-7xhkt\" (UniqueName: \"kubernetes.io/projected/149bba52-4065-4344-845f-1fb933e5833c-kube-api-access-7xhkt\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.790007 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/149bba52-4065-4344-845f-1fb933e5833c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.794752 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.799296 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.799432 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.817954 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xhkt\" (UniqueName: \"kubernetes.io/projected/149bba52-4065-4344-845f-1fb933e5833c-kube-api-access-7xhkt\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-d2md6\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:37 crc kubenswrapper[3552]: I0320 16:03:37.979097 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:03:38 crc kubenswrapper[3552]: I0320 16:03:38.552289 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6"] Mar 20 16:03:39 crc kubenswrapper[3552]: I0320 16:03:39.484729 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" event={"ID":"149bba52-4065-4344-845f-1fb933e5833c","Type":"ContainerStarted","Data":"6c19b4c8bfc9513a5f462e0fb7dcfcb2c7ea2a0d22a64763d7fe72b0f5a06c2f"} Mar 20 16:03:40 crc kubenswrapper[3552]: I0320 16:03:40.496657 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" event={"ID":"149bba52-4065-4344-845f-1fb933e5833c","Type":"ContainerStarted","Data":"e7faf3559674d8780f0012d969d2703ced8602a123d84ea9873c4adb58e97be7"} Mar 20 16:03:40 crc kubenswrapper[3552]: I0320 16:03:40.525734 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" podStartSLOduration=2.474931023 podStartE2EDuration="3.525658647s" podCreationTimestamp="2026-03-20 16:03:37 +0000 UTC" firstStartedPulling="2026-03-20 16:03:38.557248474 +0000 UTC m=+2318.250945304" lastFinishedPulling="2026-03-20 16:03:39.607976098 +0000 UTC m=+2319.301672928" observedRunningTime="2026-03-20 16:03:40.518294072 +0000 UTC m=+2320.211990922" watchObservedRunningTime="2026-03-20 16:03:40.525658647 +0000 UTC m=+2320.219355497" Mar 20 16:04:01 crc kubenswrapper[3552]: I0320 16:04:01.345709 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:04:01 crc kubenswrapper[3552]: I0320 16:04:01.346370 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:04:01 crc kubenswrapper[3552]: I0320 16:04:01.346428 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:04:01 crc kubenswrapper[3552]: I0320 16:04:01.346459 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:04:01 crc kubenswrapper[3552]: I0320 16:04:01.346600 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:04:12 crc kubenswrapper[3552]: I0320 16:04:12.778570 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:04:12 crc kubenswrapper[3552]: I0320 16:04:12.779124 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:04:41 crc kubenswrapper[3552]: I0320 16:04:41.981860 3552 generic.go:334] "Generic (PLEG): container finished" podID="149bba52-4065-4344-845f-1fb933e5833c" containerID="e7faf3559674d8780f0012d969d2703ced8602a123d84ea9873c4adb58e97be7" exitCode=0 Mar 20 16:04:41 crc kubenswrapper[3552]: I0320 16:04:41.982091 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" event={"ID":"149bba52-4065-4344-845f-1fb933e5833c","Type":"ContainerDied","Data":"e7faf3559674d8780f0012d969d2703ced8602a123d84ea9873c4adb58e97be7"} Mar 20 16:04:42 crc kubenswrapper[3552]: I0320 16:04:42.778354 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:04:42 crc kubenswrapper[3552]: I0320 16:04:42.778461 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.358539 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.486730 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ssh-key-openstack-edpm-ipam\") pod \"149bba52-4065-4344-845f-1fb933e5833c\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.487018 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ovn-combined-ca-bundle\") pod \"149bba52-4065-4344-845f-1fb933e5833c\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.487105 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/149bba52-4065-4344-845f-1fb933e5833c-ovncontroller-config-0\") pod \"149bba52-4065-4344-845f-1fb933e5833c\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.487129 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-inventory\") pod \"149bba52-4065-4344-845f-1fb933e5833c\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.487170 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xhkt\" (UniqueName: \"kubernetes.io/projected/149bba52-4065-4344-845f-1fb933e5833c-kube-api-access-7xhkt\") pod \"149bba52-4065-4344-845f-1fb933e5833c\" (UID: \"149bba52-4065-4344-845f-1fb933e5833c\") " Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.494004 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/149bba52-4065-4344-845f-1fb933e5833c-kube-api-access-7xhkt" (OuterVolumeSpecName: "kube-api-access-7xhkt") pod "149bba52-4065-4344-845f-1fb933e5833c" (UID: "149bba52-4065-4344-845f-1fb933e5833c"). InnerVolumeSpecName "kube-api-access-7xhkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.494318 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "149bba52-4065-4344-845f-1fb933e5833c" (UID: "149bba52-4065-4344-845f-1fb933e5833c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.518749 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149bba52-4065-4344-845f-1fb933e5833c-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "149bba52-4065-4344-845f-1fb933e5833c" (UID: "149bba52-4065-4344-845f-1fb933e5833c"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.522455 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-inventory" (OuterVolumeSpecName: "inventory") pod "149bba52-4065-4344-845f-1fb933e5833c" (UID: "149bba52-4065-4344-845f-1fb933e5833c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.522563 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "149bba52-4065-4344-845f-1fb933e5833c" (UID: "149bba52-4065-4344-845f-1fb933e5833c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.589881 3552 reconciler_common.go:300] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.589915 3552 reconciler_common.go:300] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/149bba52-4065-4344-845f-1fb933e5833c-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.589927 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.589936 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-7xhkt\" (UniqueName: \"kubernetes.io/projected/149bba52-4065-4344-845f-1fb933e5833c-kube-api-access-7xhkt\") on node \"crc\" DevicePath \"\"" Mar 20 16:04:43 crc kubenswrapper[3552]: I0320 16:04:43.589947 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/149bba52-4065-4344-845f-1fb933e5833c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.015903 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" event={"ID":"149bba52-4065-4344-845f-1fb933e5833c","Type":"ContainerDied","Data":"6c19b4c8bfc9513a5f462e0fb7dcfcb2c7ea2a0d22a64763d7fe72b0f5a06c2f"} Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.015935 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c19b4c8bfc9513a5f462e0fb7dcfcb2c7ea2a0d22a64763d7fe72b0f5a06c2f" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.015982 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-d2md6" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.115806 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c"] Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.116085 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7ad3b2bc-4451-4440-b295-0be9dbcc2892" podNamespace="openstack" podName="neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: E0320 16:04:44.116629 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="149bba52-4065-4344-845f-1fb933e5833c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.116657 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="149bba52-4065-4344-845f-1fb933e5833c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.117003 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="149bba52-4065-4344-845f-1fb933e5833c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.118029 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.120914 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.121194 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.122053 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.122959 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.123168 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.123352 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.131002 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c"] Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.309463 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.309563 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.309677 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4rdf\" (UniqueName: \"kubernetes.io/projected/7ad3b2bc-4451-4440-b295-0be9dbcc2892-kube-api-access-j4rdf\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.309798 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.309866 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.310211 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.411593 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.411687 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.411749 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.411819 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-j4rdf\" (UniqueName: \"kubernetes.io/projected/7ad3b2bc-4451-4440-b295-0be9dbcc2892-kube-api-access-j4rdf\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.411887 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.411949 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.417585 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.417625 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.420429 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.421242 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.423223 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.432345 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4rdf\" (UniqueName: \"kubernetes.io/projected/7ad3b2bc-4451-4440-b295-0be9dbcc2892-kube-api-access-j4rdf\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:44 crc kubenswrapper[3552]: I0320 16:04:44.440457 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:04:45 crc kubenswrapper[3552]: I0320 16:04:45.060187 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c"] Mar 20 16:04:45 crc kubenswrapper[3552]: W0320 16:04:45.064604 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ad3b2bc_4451_4440_b295_0be9dbcc2892.slice/crio-c84abaa1f7c694acecdb9b73c5eaf5e4079fb7929d8c14b42cb4bf302c100e01 WatchSource:0}: Error finding container c84abaa1f7c694acecdb9b73c5eaf5e4079fb7929d8c14b42cb4bf302c100e01: Status 404 returned error can't find the container with id c84abaa1f7c694acecdb9b73c5eaf5e4079fb7929d8c14b42cb4bf302c100e01 Mar 20 16:04:46 crc kubenswrapper[3552]: I0320 16:04:46.031297 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" event={"ID":"7ad3b2bc-4451-4440-b295-0be9dbcc2892","Type":"ContainerStarted","Data":"d474a054f2bcbaf5b3cd1cc65e0310a8768b41ca69a4fc54bec252dc66c9e350"} Mar 20 16:04:46 crc kubenswrapper[3552]: I0320 16:04:46.031832 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" event={"ID":"7ad3b2bc-4451-4440-b295-0be9dbcc2892","Type":"ContainerStarted","Data":"c84abaa1f7c694acecdb9b73c5eaf5e4079fb7929d8c14b42cb4bf302c100e01"} Mar 20 16:04:46 crc kubenswrapper[3552]: I0320 16:04:46.065241 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" podStartSLOduration=1.7625578960000001 podStartE2EDuration="2.065167355s" podCreationTimestamp="2026-03-20 16:04:44 +0000 UTC" firstStartedPulling="2026-03-20 16:04:45.066215104 +0000 UTC m=+2384.759911934" lastFinishedPulling="2026-03-20 16:04:45.368824563 +0000 UTC m=+2385.062521393" observedRunningTime="2026-03-20 16:04:46.058618781 +0000 UTC m=+2385.752315641" watchObservedRunningTime="2026-03-20 16:04:46.065167355 +0000 UTC m=+2385.758864205" Mar 20 16:05:01 crc kubenswrapper[3552]: I0320 16:05:01.347309 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:05:01 crc kubenswrapper[3552]: I0320 16:05:01.347869 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:05:01 crc kubenswrapper[3552]: I0320 16:05:01.347900 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:05:01 crc kubenswrapper[3552]: I0320 16:05:01.347946 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:05:01 crc kubenswrapper[3552]: I0320 16:05:01.347987 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:05:12 crc kubenswrapper[3552]: I0320 16:05:12.778365 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:05:12 crc kubenswrapper[3552]: I0320 16:05:12.779017 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:05:12 crc kubenswrapper[3552]: I0320 16:05:12.779055 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:05:12 crc kubenswrapper[3552]: I0320 16:05:12.780076 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:05:12 crc kubenswrapper[3552]: I0320 16:05:12.780292 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" gracePeriod=600 Mar 20 16:05:12 crc kubenswrapper[3552]: E0320 16:05:12.868135 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:05:13 crc kubenswrapper[3552]: I0320 16:05:13.241510 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" exitCode=0 Mar 20 16:05:13 crc kubenswrapper[3552]: I0320 16:05:13.241582 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617"} Mar 20 16:05:13 crc kubenswrapper[3552]: I0320 16:05:13.241851 3552 scope.go:117] "RemoveContainer" containerID="f0b09ae54bea2ff083e11bf57e9ad0b3e8c74d921c18b9a3d158e51bbcd5f477" Mar 20 16:05:13 crc kubenswrapper[3552]: I0320 16:05:13.242660 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:05:13 crc kubenswrapper[3552]: E0320 16:05:13.243293 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:05:25 crc kubenswrapper[3552]: I0320 16:05:25.432774 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:05:25 crc kubenswrapper[3552]: E0320 16:05:25.435096 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:05:32 crc kubenswrapper[3552]: I0320 16:05:32.440085 3552 generic.go:334] "Generic (PLEG): container finished" podID="7ad3b2bc-4451-4440-b295-0be9dbcc2892" containerID="d474a054f2bcbaf5b3cd1cc65e0310a8768b41ca69a4fc54bec252dc66c9e350" exitCode=0 Mar 20 16:05:32 crc kubenswrapper[3552]: I0320 16:05:32.440196 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" event={"ID":"7ad3b2bc-4451-4440-b295-0be9dbcc2892","Type":"ContainerDied","Data":"d474a054f2bcbaf5b3cd1cc65e0310a8768b41ca69a4fc54bec252dc66c9e350"} Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.857259 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.897722 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-ssh-key-openstack-edpm-ipam\") pod \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.897826 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-metadata-combined-ca-bundle\") pod \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.897868 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-inventory\") pod \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.898020 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-ovn-metadata-agent-neutron-config-0\") pod \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.898040 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-nova-metadata-neutron-config-0\") pod \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.898120 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4rdf\" (UniqueName: \"kubernetes.io/projected/7ad3b2bc-4451-4440-b295-0be9dbcc2892-kube-api-access-j4rdf\") pod \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\" (UID: \"7ad3b2bc-4451-4440-b295-0be9dbcc2892\") " Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.903483 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "7ad3b2bc-4451-4440-b295-0be9dbcc2892" (UID: "7ad3b2bc-4451-4440-b295-0be9dbcc2892"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.903651 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ad3b2bc-4451-4440-b295-0be9dbcc2892-kube-api-access-j4rdf" (OuterVolumeSpecName: "kube-api-access-j4rdf") pod "7ad3b2bc-4451-4440-b295-0be9dbcc2892" (UID: "7ad3b2bc-4451-4440-b295-0be9dbcc2892"). InnerVolumeSpecName "kube-api-access-j4rdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.923670 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7ad3b2bc-4451-4440-b295-0be9dbcc2892" (UID: "7ad3b2bc-4451-4440-b295-0be9dbcc2892"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.925633 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-inventory" (OuterVolumeSpecName: "inventory") pod "7ad3b2bc-4451-4440-b295-0be9dbcc2892" (UID: "7ad3b2bc-4451-4440-b295-0be9dbcc2892"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.925955 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "7ad3b2bc-4451-4440-b295-0be9dbcc2892" (UID: "7ad3b2bc-4451-4440-b295-0be9dbcc2892"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.926396 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "7ad3b2bc-4451-4440-b295-0be9dbcc2892" (UID: "7ad3b2bc-4451-4440-b295-0be9dbcc2892"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.999643 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.999867 3552 reconciler_common.go:300] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.999883 3552 reconciler_common.go:300] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.999894 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-j4rdf\" (UniqueName: \"kubernetes.io/projected/7ad3b2bc-4451-4440-b295-0be9dbcc2892-kube-api-access-j4rdf\") on node \"crc\" DevicePath \"\"" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.999907 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:05:33 crc kubenswrapper[3552]: I0320 16:05:33.999917 3552 reconciler_common.go:300] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad3b2bc-4451-4440-b295-0be9dbcc2892-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.462073 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" event={"ID":"7ad3b2bc-4451-4440-b295-0be9dbcc2892","Type":"ContainerDied","Data":"c84abaa1f7c694acecdb9b73c5eaf5e4079fb7929d8c14b42cb4bf302c100e01"} Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.462118 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.462144 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c84abaa1f7c694acecdb9b73c5eaf5e4079fb7929d8c14b42cb4bf302c100e01" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.640692 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26"] Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.640930 3552 topology_manager.go:215] "Topology Admit Handler" podUID="763b4ba4-050a-48e5-a22e-eb51ceaec61b" podNamespace="openstack" podName="libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: E0320 16:05:34.641302 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7ad3b2bc-4451-4440-b295-0be9dbcc2892" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.641320 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ad3b2bc-4451-4440-b295-0be9dbcc2892" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.641630 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ad3b2bc-4451-4440-b295-0be9dbcc2892" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.642448 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.646746 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.646747 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.647573 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.647861 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.652322 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.679419 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26"] Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.719000 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.719253 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tf4gp\" (UniqueName: \"kubernetes.io/projected/763b4ba4-050a-48e5-a22e-eb51ceaec61b-kube-api-access-tf4gp\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.719332 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.719436 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.719534 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.821594 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-tf4gp\" (UniqueName: \"kubernetes.io/projected/763b4ba4-050a-48e5-a22e-eb51ceaec61b-kube-api-access-tf4gp\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.821666 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.821708 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.821767 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.821859 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.827080 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.827112 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.827219 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.827542 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.841744 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf4gp\" (UniqueName: \"kubernetes.io/projected/763b4ba4-050a-48e5-a22e-eb51ceaec61b-kube-api-access-tf4gp\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-sgj26\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:34 crc kubenswrapper[3552]: I0320 16:05:34.972109 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:05:35 crc kubenswrapper[3552]: I0320 16:05:35.524906 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26"] Mar 20 16:05:35 crc kubenswrapper[3552]: W0320 16:05:35.526245 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod763b4ba4_050a_48e5_a22e_eb51ceaec61b.slice/crio-3ec2df49c30826551afbd234967724ff0b3a6ef68f99b1d8317f4c501dc29fde WatchSource:0}: Error finding container 3ec2df49c30826551afbd234967724ff0b3a6ef68f99b1d8317f4c501dc29fde: Status 404 returned error can't find the container with id 3ec2df49c30826551afbd234967724ff0b3a6ef68f99b1d8317f4c501dc29fde Mar 20 16:05:36 crc kubenswrapper[3552]: I0320 16:05:36.483012 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" event={"ID":"763b4ba4-050a-48e5-a22e-eb51ceaec61b","Type":"ContainerStarted","Data":"21ae791b8e039b5e48f7b30a403a811d108c19abd7c8df685cb64b89e4b9bc14"} Mar 20 16:05:36 crc kubenswrapper[3552]: I0320 16:05:36.483830 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" event={"ID":"763b4ba4-050a-48e5-a22e-eb51ceaec61b","Type":"ContainerStarted","Data":"3ec2df49c30826551afbd234967724ff0b3a6ef68f99b1d8317f4c501dc29fde"} Mar 20 16:05:36 crc kubenswrapper[3552]: I0320 16:05:36.514764 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" podStartSLOduration=2.246799568 podStartE2EDuration="2.514705306s" podCreationTimestamp="2026-03-20 16:05:34 +0000 UTC" firstStartedPulling="2026-03-20 16:05:35.528681668 +0000 UTC m=+2435.222378518" lastFinishedPulling="2026-03-20 16:05:35.796587426 +0000 UTC m=+2435.490284256" observedRunningTime="2026-03-20 16:05:36.508876992 +0000 UTC m=+2436.202573832" watchObservedRunningTime="2026-03-20 16:05:36.514705306 +0000 UTC m=+2436.208402146" Mar 20 16:05:39 crc kubenswrapper[3552]: I0320 16:05:39.431494 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:05:39 crc kubenswrapper[3552]: E0320 16:05:39.432438 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:05:54 crc kubenswrapper[3552]: I0320 16:05:54.430698 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:05:54 crc kubenswrapper[3552]: E0320 16:05:54.432991 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:06:01 crc kubenswrapper[3552]: I0320 16:06:01.348603 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:06:01 crc kubenswrapper[3552]: I0320 16:06:01.349105 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:06:01 crc kubenswrapper[3552]: I0320 16:06:01.349131 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:06:01 crc kubenswrapper[3552]: I0320 16:06:01.349154 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:06:01 crc kubenswrapper[3552]: I0320 16:06:01.349173 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:06:06 crc kubenswrapper[3552]: I0320 16:06:06.431291 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:06:06 crc kubenswrapper[3552]: E0320 16:06:06.433335 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:06:18 crc kubenswrapper[3552]: I0320 16:06:18.430804 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:06:18 crc kubenswrapper[3552]: E0320 16:06:18.431884 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:06:33 crc kubenswrapper[3552]: I0320 16:06:33.430275 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:06:33 crc kubenswrapper[3552]: E0320 16:06:33.431170 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:06:44 crc kubenswrapper[3552]: I0320 16:06:44.430650 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:06:44 crc kubenswrapper[3552]: E0320 16:06:44.431680 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:06:58 crc kubenswrapper[3552]: I0320 16:06:58.430876 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:06:58 crc kubenswrapper[3552]: E0320 16:06:58.431931 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:07:01 crc kubenswrapper[3552]: I0320 16:07:01.350207 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:07:01 crc kubenswrapper[3552]: I0320 16:07:01.350568 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:07:01 crc kubenswrapper[3552]: I0320 16:07:01.350593 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:07:01 crc kubenswrapper[3552]: I0320 16:07:01.350634 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:07:01 crc kubenswrapper[3552]: I0320 16:07:01.350655 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:07:09 crc kubenswrapper[3552]: I0320 16:07:09.431693 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:07:09 crc kubenswrapper[3552]: E0320 16:07:09.432673 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:07:23 crc kubenswrapper[3552]: I0320 16:07:23.430939 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:07:23 crc kubenswrapper[3552]: E0320 16:07:23.432902 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:07:35 crc kubenswrapper[3552]: I0320 16:07:35.430907 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:07:35 crc kubenswrapper[3552]: E0320 16:07:35.431906 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:07:50 crc kubenswrapper[3552]: I0320 16:07:50.430978 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:07:50 crc kubenswrapper[3552]: E0320 16:07:50.432221 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:08:01 crc kubenswrapper[3552]: I0320 16:08:01.350874 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:08:01 crc kubenswrapper[3552]: I0320 16:08:01.351696 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:08:01 crc kubenswrapper[3552]: I0320 16:08:01.351736 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:08:01 crc kubenswrapper[3552]: I0320 16:08:01.351777 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:08:01 crc kubenswrapper[3552]: I0320 16:08:01.351815 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:08:01 crc kubenswrapper[3552]: I0320 16:08:01.435621 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:08:01 crc kubenswrapper[3552]: E0320 16:08:01.436170 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:08:12 crc kubenswrapper[3552]: I0320 16:08:12.431220 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:08:12 crc kubenswrapper[3552]: E0320 16:08:12.432424 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:08:27 crc kubenswrapper[3552]: I0320 16:08:27.431077 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:08:27 crc kubenswrapper[3552]: E0320 16:08:27.432447 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:08:40 crc kubenswrapper[3552]: I0320 16:08:40.430340 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:08:40 crc kubenswrapper[3552]: E0320 16:08:40.431660 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:08:53 crc kubenswrapper[3552]: I0320 16:08:53.433100 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:08:53 crc kubenswrapper[3552]: E0320 16:08:53.434360 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:09:01 crc kubenswrapper[3552]: I0320 16:09:01.352495 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:09:01 crc kubenswrapper[3552]: I0320 16:09:01.353073 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:09:01 crc kubenswrapper[3552]: I0320 16:09:01.353097 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:09:01 crc kubenswrapper[3552]: I0320 16:09:01.353124 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:09:01 crc kubenswrapper[3552]: I0320 16:09:01.353146 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:09:07 crc kubenswrapper[3552]: I0320 16:09:07.431000 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:09:07 crc kubenswrapper[3552]: E0320 16:09:07.432238 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:09:20 crc kubenswrapper[3552]: I0320 16:09:20.432256 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:09:20 crc kubenswrapper[3552]: E0320 16:09:20.434296 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:09:29 crc kubenswrapper[3552]: I0320 16:09:29.969101 3552 generic.go:334] "Generic (PLEG): container finished" podID="763b4ba4-050a-48e5-a22e-eb51ceaec61b" containerID="21ae791b8e039b5e48f7b30a403a811d108c19abd7c8df685cb64b89e4b9bc14" exitCode=0 Mar 20 16:09:29 crc kubenswrapper[3552]: I0320 16:09:29.969317 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" event={"ID":"763b4ba4-050a-48e5-a22e-eb51ceaec61b","Type":"ContainerDied","Data":"21ae791b8e039b5e48f7b30a403a811d108c19abd7c8df685cb64b89e4b9bc14"} Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.437545 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:09:31 crc kubenswrapper[3552]: E0320 16:09:31.438727 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.446270 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.640149 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-combined-ca-bundle\") pod \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.640366 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-inventory\") pod \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.640471 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-secret-0\") pod \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.640546 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tf4gp\" (UniqueName: \"kubernetes.io/projected/763b4ba4-050a-48e5-a22e-eb51ceaec61b-kube-api-access-tf4gp\") pod \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.640643 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-ssh-key-openstack-edpm-ipam\") pod \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\" (UID: \"763b4ba4-050a-48e5-a22e-eb51ceaec61b\") " Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.651821 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "763b4ba4-050a-48e5-a22e-eb51ceaec61b" (UID: "763b4ba4-050a-48e5-a22e-eb51ceaec61b"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.651942 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/763b4ba4-050a-48e5-a22e-eb51ceaec61b-kube-api-access-tf4gp" (OuterVolumeSpecName: "kube-api-access-tf4gp") pod "763b4ba4-050a-48e5-a22e-eb51ceaec61b" (UID: "763b4ba4-050a-48e5-a22e-eb51ceaec61b"). InnerVolumeSpecName "kube-api-access-tf4gp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.668161 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "763b4ba4-050a-48e5-a22e-eb51ceaec61b" (UID: "763b4ba4-050a-48e5-a22e-eb51ceaec61b"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.674054 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "763b4ba4-050a-48e5-a22e-eb51ceaec61b" (UID: "763b4ba4-050a-48e5-a22e-eb51ceaec61b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.675382 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-inventory" (OuterVolumeSpecName: "inventory") pod "763b4ba4-050a-48e5-a22e-eb51ceaec61b" (UID: "763b4ba4-050a-48e5-a22e-eb51ceaec61b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.743078 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.743113 3552 reconciler_common.go:300] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.743126 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.743136 3552 reconciler_common.go:300] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/763b4ba4-050a-48e5-a22e-eb51ceaec61b-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.743146 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-tf4gp\" (UniqueName: \"kubernetes.io/projected/763b4ba4-050a-48e5-a22e-eb51ceaec61b-kube-api-access-tf4gp\") on node \"crc\" DevicePath \"\"" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.991584 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" event={"ID":"763b4ba4-050a-48e5-a22e-eb51ceaec61b","Type":"ContainerDied","Data":"3ec2df49c30826551afbd234967724ff0b3a6ef68f99b1d8317f4c501dc29fde"} Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.991615 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ec2df49c30826551afbd234967724ff0b3a6ef68f99b1d8317f4c501dc29fde" Mar 20 16:09:31 crc kubenswrapper[3552]: I0320 16:09:31.991663 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-sgj26" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.129565 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp"] Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.134656 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7e975c55-e5ef-4c8c-b6dc-1af5da847c65" podNamespace="openstack" podName="nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: E0320 16:09:32.134979 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="763b4ba4-050a-48e5-a22e-eb51ceaec61b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.134998 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="763b4ba4-050a-48e5-a22e-eb51ceaec61b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.135228 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="763b4ba4-050a-48e5-a22e-eb51ceaec61b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.136004 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.141368 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.141737 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.142875 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.143145 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.143285 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.143477 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.144378 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.149608 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp"] Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253319 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253395 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253462 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253506 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253535 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82twx\" (UniqueName: \"kubernetes.io/projected/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-kube-api-access-82twx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253576 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-2\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253622 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253661 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253731 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253765 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.253807 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-3\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-3\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.355282 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.355488 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.355587 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-82twx\" (UniqueName: \"kubernetes.io/projected/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-kube-api-access-82twx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.355743 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-2\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.355876 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.356034 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.356347 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.356487 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.356957 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-3\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-3\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.357182 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.357348 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.360117 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.360379 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-2\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.360550 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.360934 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.362128 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.363095 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.363356 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.365165 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-3\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-3\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.365493 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.366160 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.391482 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-82twx\" (UniqueName: \"kubernetes.io/projected/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-kube-api-access-82twx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-mhhbp\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:32 crc kubenswrapper[3552]: I0320 16:09:32.526787 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:09:33 crc kubenswrapper[3552]: I0320 16:09:33.154674 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp"] Mar 20 16:09:33 crc kubenswrapper[3552]: W0320 16:09:33.160614 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e975c55_e5ef_4c8c_b6dc_1af5da847c65.slice/crio-237f554ce3f503ba8f510b6504b917f43981915ed02f1f7ce9606486ccb4497f WatchSource:0}: Error finding container 237f554ce3f503ba8f510b6504b917f43981915ed02f1f7ce9606486ccb4497f: Status 404 returned error can't find the container with id 237f554ce3f503ba8f510b6504b917f43981915ed02f1f7ce9606486ccb4497f Mar 20 16:09:33 crc kubenswrapper[3552]: I0320 16:09:33.163506 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:09:34 crc kubenswrapper[3552]: I0320 16:09:34.014503 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" event={"ID":"7e975c55-e5ef-4c8c-b6dc-1af5da847c65","Type":"ContainerStarted","Data":"fb93271315c83036c16642da6c73d20fe8330507d001b594b27c483d4c64fb36"} Mar 20 16:09:34 crc kubenswrapper[3552]: I0320 16:09:34.014844 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" event={"ID":"7e975c55-e5ef-4c8c-b6dc-1af5da847c65","Type":"ContainerStarted","Data":"237f554ce3f503ba8f510b6504b917f43981915ed02f1f7ce9606486ccb4497f"} Mar 20 16:09:34 crc kubenswrapper[3552]: I0320 16:09:34.038842 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" podStartSLOduration=1.626365158 podStartE2EDuration="2.038780867s" podCreationTimestamp="2026-03-20 16:09:32 +0000 UTC" firstStartedPulling="2026-03-20 16:09:33.163204875 +0000 UTC m=+2672.856901715" lastFinishedPulling="2026-03-20 16:09:33.575620594 +0000 UTC m=+2673.269317424" observedRunningTime="2026-03-20 16:09:34.033099507 +0000 UTC m=+2673.726796347" watchObservedRunningTime="2026-03-20 16:09:34.038780867 +0000 UTC m=+2673.732477697" Mar 20 16:09:43 crc kubenswrapper[3552]: I0320 16:09:43.430761 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:09:43 crc kubenswrapper[3552]: E0320 16:09:43.431791 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:09:58 crc kubenswrapper[3552]: I0320 16:09:58.431701 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:09:58 crc kubenswrapper[3552]: E0320 16:09:58.433150 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:10:01 crc kubenswrapper[3552]: I0320 16:10:01.354151 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:10:01 crc kubenswrapper[3552]: I0320 16:10:01.355738 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:10:01 crc kubenswrapper[3552]: I0320 16:10:01.355839 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:10:01 crc kubenswrapper[3552]: I0320 16:10:01.356016 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:10:01 crc kubenswrapper[3552]: I0320 16:10:01.356121 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.455958 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fjn2f"] Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.457229 3552 topology_manager.go:215] "Topology Admit Handler" podUID="d28b96cb-e725-46ee-955a-7499f1043075" podNamespace="openshift-marketplace" podName="certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.460287 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.467264 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fjn2f"] Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.571589 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-utilities\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.571713 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-catalog-content\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.571845 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-747ww\" (UniqueName: \"kubernetes.io/projected/d28b96cb-e725-46ee-955a-7499f1043075-kube-api-access-747ww\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.673133 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-catalog-content\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.673577 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-747ww\" (UniqueName: \"kubernetes.io/projected/d28b96cb-e725-46ee-955a-7499f1043075-kube-api-access-747ww\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.673725 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-catalog-content\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.674094 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-utilities\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.674589 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-utilities\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.692229 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-747ww\" (UniqueName: \"kubernetes.io/projected/d28b96cb-e725-46ee-955a-7499f1043075-kube-api-access-747ww\") pod \"certified-operators-fjn2f\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:02 crc kubenswrapper[3552]: I0320 16:10:02.787784 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:03 crc kubenswrapper[3552]: W0320 16:10:03.295104 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd28b96cb_e725_46ee_955a_7499f1043075.slice/crio-89ed2660a6f81b80130ee9324eb1c6b29b61e7ac4ae42a5aa5d147248ab2ee5e WatchSource:0}: Error finding container 89ed2660a6f81b80130ee9324eb1c6b29b61e7ac4ae42a5aa5d147248ab2ee5e: Status 404 returned error can't find the container with id 89ed2660a6f81b80130ee9324eb1c6b29b61e7ac4ae42a5aa5d147248ab2ee5e Mar 20 16:10:03 crc kubenswrapper[3552]: I0320 16:10:03.297289 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fjn2f"] Mar 20 16:10:04 crc kubenswrapper[3552]: I0320 16:10:04.257150 3552 generic.go:334] "Generic (PLEG): container finished" podID="d28b96cb-e725-46ee-955a-7499f1043075" containerID="4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521" exitCode=0 Mar 20 16:10:04 crc kubenswrapper[3552]: I0320 16:10:04.257201 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerDied","Data":"4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521"} Mar 20 16:10:04 crc kubenswrapper[3552]: I0320 16:10:04.258095 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerStarted","Data":"89ed2660a6f81b80130ee9324eb1c6b29b61e7ac4ae42a5aa5d147248ab2ee5e"} Mar 20 16:10:05 crc kubenswrapper[3552]: I0320 16:10:05.267482 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerStarted","Data":"e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63"} Mar 20 16:10:10 crc kubenswrapper[3552]: I0320 16:10:10.432120 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:10:10 crc kubenswrapper[3552]: E0320 16:10:10.433090 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:10:13 crc kubenswrapper[3552]: I0320 16:10:13.343353 3552 generic.go:334] "Generic (PLEG): container finished" podID="d28b96cb-e725-46ee-955a-7499f1043075" containerID="e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63" exitCode=0 Mar 20 16:10:13 crc kubenswrapper[3552]: I0320 16:10:13.343568 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerDied","Data":"e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63"} Mar 20 16:10:14 crc kubenswrapper[3552]: I0320 16:10:14.354059 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerStarted","Data":"079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5"} Mar 20 16:10:14 crc kubenswrapper[3552]: I0320 16:10:14.396476 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fjn2f" podStartSLOduration=3.000660166 podStartE2EDuration="12.396432702s" podCreationTimestamp="2026-03-20 16:10:02 +0000 UTC" firstStartedPulling="2026-03-20 16:10:04.259554427 +0000 UTC m=+2703.953251257" lastFinishedPulling="2026-03-20 16:10:13.655326953 +0000 UTC m=+2713.349023793" observedRunningTime="2026-03-20 16:10:14.388153632 +0000 UTC m=+2714.081850472" watchObservedRunningTime="2026-03-20 16:10:14.396432702 +0000 UTC m=+2714.090129532" Mar 20 16:10:22 crc kubenswrapper[3552]: I0320 16:10:22.788241 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:22 crc kubenswrapper[3552]: I0320 16:10:22.788906 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:22 crc kubenswrapper[3552]: I0320 16:10:22.877751 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:23 crc kubenswrapper[3552]: I0320 16:10:23.498274 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:23 crc kubenswrapper[3552]: I0320 16:10:23.560326 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fjn2f"] Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.430511 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.440125 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fjn2f" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="registry-server" containerID="cri-o://079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5" gracePeriod=2 Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.869017 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.937096 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-747ww\" (UniqueName: \"kubernetes.io/projected/d28b96cb-e725-46ee-955a-7499f1043075-kube-api-access-747ww\") pod \"d28b96cb-e725-46ee-955a-7499f1043075\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.937234 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-utilities\") pod \"d28b96cb-e725-46ee-955a-7499f1043075\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.937264 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-catalog-content\") pod \"d28b96cb-e725-46ee-955a-7499f1043075\" (UID: \"d28b96cb-e725-46ee-955a-7499f1043075\") " Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.938909 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-utilities" (OuterVolumeSpecName: "utilities") pod "d28b96cb-e725-46ee-955a-7499f1043075" (UID: "d28b96cb-e725-46ee-955a-7499f1043075"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.944248 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d28b96cb-e725-46ee-955a-7499f1043075-kube-api-access-747ww" (OuterVolumeSpecName: "kube-api-access-747ww") pod "d28b96cb-e725-46ee-955a-7499f1043075" (UID: "d28b96cb-e725-46ee-955a-7499f1043075"). InnerVolumeSpecName "kube-api-access-747ww". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.958217 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-747ww\" (UniqueName: \"kubernetes.io/projected/d28b96cb-e725-46ee-955a-7499f1043075-kube-api-access-747ww\") on node \"crc\" DevicePath \"\"" Mar 20 16:10:25 crc kubenswrapper[3552]: I0320 16:10:25.958263 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.088478 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mk8k7"] Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.092873 3552 topology_manager.go:215] "Topology Admit Handler" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" podNamespace="openshift-marketplace" podName="community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: E0320 16:10:26.093259 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="extract-content" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.093353 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="extract-content" Mar 20 16:10:26 crc kubenswrapper[3552]: E0320 16:10:26.093574 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="registry-server" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.093587 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="registry-server" Mar 20 16:10:26 crc kubenswrapper[3552]: E0320 16:10:26.093598 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="extract-utilities" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.093604 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="extract-utilities" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.093812 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="d28b96cb-e725-46ee-955a-7499f1043075" containerName="registry-server" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.095435 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mk8k7"] Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.096058 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.162021 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-utilities\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.162125 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwp4f\" (UniqueName: \"kubernetes.io/projected/edefda3a-fa4d-4ef4-b49a-95251f91bb92-kube-api-access-fwp4f\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.162182 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-catalog-content\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.256574 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d28b96cb-e725-46ee-955a-7499f1043075" (UID: "d28b96cb-e725-46ee-955a-7499f1043075"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.263594 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-catalog-content\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.263848 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-utilities\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.264038 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fwp4f\" (UniqueName: \"kubernetes.io/projected/edefda3a-fa4d-4ef4-b49a-95251f91bb92-kube-api-access-fwp4f\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.264206 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d28b96cb-e725-46ee-955a-7499f1043075-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.264244 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-catalog-content\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.264585 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-utilities\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.281771 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwp4f\" (UniqueName: \"kubernetes.io/projected/edefda3a-fa4d-4ef4-b49a-95251f91bb92-kube-api-access-fwp4f\") pod \"community-operators-mk8k7\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.420986 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.452367 3552 generic.go:334] "Generic (PLEG): container finished" podID="d28b96cb-e725-46ee-955a-7499f1043075" containerID="079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5" exitCode=0 Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.452442 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fjn2f" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.452466 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerDied","Data":"079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5"} Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.452488 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fjn2f" event={"ID":"d28b96cb-e725-46ee-955a-7499f1043075","Type":"ContainerDied","Data":"89ed2660a6f81b80130ee9324eb1c6b29b61e7ac4ae42a5aa5d147248ab2ee5e"} Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.452506 3552 scope.go:117] "RemoveContainer" containerID="079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.460869 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"e680d2bfd56125cc2701591706ccb54da162fa49508def9218ede80dc902e95b"} Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.526693 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fjn2f"] Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.528444 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fjn2f"] Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.532882 3552 scope.go:117] "RemoveContainer" containerID="e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.624076 3552 scope.go:117] "RemoveContainer" containerID="4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.669629 3552 scope.go:117] "RemoveContainer" containerID="079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5" Mar 20 16:10:26 crc kubenswrapper[3552]: E0320 16:10:26.670830 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5\": container with ID starting with 079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5 not found: ID does not exist" containerID="079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.670889 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5"} err="failed to get container status \"079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5\": rpc error: code = NotFound desc = could not find container \"079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5\": container with ID starting with 079d70c0c6754e37570f726ba569734563e9be94cfb21518a94bcb6803dc02f5 not found: ID does not exist" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.670908 3552 scope.go:117] "RemoveContainer" containerID="e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63" Mar 20 16:10:26 crc kubenswrapper[3552]: E0320 16:10:26.671321 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63\": container with ID starting with e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63 not found: ID does not exist" containerID="e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.671351 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63"} err="failed to get container status \"e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63\": rpc error: code = NotFound desc = could not find container \"e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63\": container with ID starting with e1d7294398846155dd40238f4f61d465a4e8fb5a5c2876e647e74d48c680fb63 not found: ID does not exist" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.671363 3552 scope.go:117] "RemoveContainer" containerID="4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521" Mar 20 16:10:26 crc kubenswrapper[3552]: E0320 16:10:26.671711 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521\": container with ID starting with 4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521 not found: ID does not exist" containerID="4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.671748 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521"} err="failed to get container status \"4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521\": rpc error: code = NotFound desc = could not find container \"4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521\": container with ID starting with 4a9dc418e4e364651a399b359e4ea812d7a22f5b4ac308902721ee37c6bbd521 not found: ID does not exist" Mar 20 16:10:26 crc kubenswrapper[3552]: I0320 16:10:26.947734 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mk8k7"] Mar 20 16:10:27 crc kubenswrapper[3552]: I0320 16:10:27.441114 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d28b96cb-e725-46ee-955a-7499f1043075" path="/var/lib/kubelet/pods/d28b96cb-e725-46ee-955a-7499f1043075/volumes" Mar 20 16:10:27 crc kubenswrapper[3552]: I0320 16:10:27.470004 3552 generic.go:334] "Generic (PLEG): container finished" podID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerID="bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530" exitCode=0 Mar 20 16:10:27 crc kubenswrapper[3552]: I0320 16:10:27.470052 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerDied","Data":"bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530"} Mar 20 16:10:27 crc kubenswrapper[3552]: I0320 16:10:27.470075 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerStarted","Data":"16ed8f5da071ef303a903e8789c372b3054f6a9e2f034bfb61d135c7ec0d75ae"} Mar 20 16:10:28 crc kubenswrapper[3552]: I0320 16:10:28.481902 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerStarted","Data":"f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b"} Mar 20 16:10:37 crc kubenswrapper[3552]: I0320 16:10:37.559664 3552 generic.go:334] "Generic (PLEG): container finished" podID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerID="f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b" exitCode=0 Mar 20 16:10:37 crc kubenswrapper[3552]: I0320 16:10:37.559763 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerDied","Data":"f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b"} Mar 20 16:10:39 crc kubenswrapper[3552]: I0320 16:10:39.577176 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerStarted","Data":"b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334"} Mar 20 16:10:39 crc kubenswrapper[3552]: I0320 16:10:39.610225 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mk8k7" podStartSLOduration=3.11964961 podStartE2EDuration="13.610143894s" podCreationTimestamp="2026-03-20 16:10:26 +0000 UTC" firstStartedPulling="2026-03-20 16:10:27.471810123 +0000 UTC m=+2727.165506963" lastFinishedPulling="2026-03-20 16:10:37.962304417 +0000 UTC m=+2737.656001247" observedRunningTime="2026-03-20 16:10:39.598995768 +0000 UTC m=+2739.292692598" watchObservedRunningTime="2026-03-20 16:10:39.610143894 +0000 UTC m=+2739.303840724" Mar 20 16:10:46 crc kubenswrapper[3552]: I0320 16:10:46.421990 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:46 crc kubenswrapper[3552]: I0320 16:10:46.422748 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:46 crc kubenswrapper[3552]: I0320 16:10:46.550054 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:46 crc kubenswrapper[3552]: I0320 16:10:46.740292 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:46 crc kubenswrapper[3552]: I0320 16:10:46.789475 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mk8k7"] Mar 20 16:10:48 crc kubenswrapper[3552]: I0320 16:10:48.647244 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mk8k7" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="registry-server" containerID="cri-o://b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334" gracePeriod=2 Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.044756 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.210599 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwp4f\" (UniqueName: \"kubernetes.io/projected/edefda3a-fa4d-4ef4-b49a-95251f91bb92-kube-api-access-fwp4f\") pod \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.210767 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-utilities\") pod \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.210795 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-catalog-content\") pod \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\" (UID: \"edefda3a-fa4d-4ef4-b49a-95251f91bb92\") " Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.211683 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-utilities" (OuterVolumeSpecName: "utilities") pod "edefda3a-fa4d-4ef4-b49a-95251f91bb92" (UID: "edefda3a-fa4d-4ef4-b49a-95251f91bb92"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.218015 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edefda3a-fa4d-4ef4-b49a-95251f91bb92-kube-api-access-fwp4f" (OuterVolumeSpecName: "kube-api-access-fwp4f") pod "edefda3a-fa4d-4ef4-b49a-95251f91bb92" (UID: "edefda3a-fa4d-4ef4-b49a-95251f91bb92"). InnerVolumeSpecName "kube-api-access-fwp4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.313313 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-fwp4f\" (UniqueName: \"kubernetes.io/projected/edefda3a-fa4d-4ef4-b49a-95251f91bb92-kube-api-access-fwp4f\") on node \"crc\" DevicePath \"\"" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.313360 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.658105 3552 generic.go:334] "Generic (PLEG): container finished" podID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerID="b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334" exitCode=0 Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.658166 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mk8k7" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.658206 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerDied","Data":"b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334"} Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.658620 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mk8k7" event={"ID":"edefda3a-fa4d-4ef4-b49a-95251f91bb92","Type":"ContainerDied","Data":"16ed8f5da071ef303a903e8789c372b3054f6a9e2f034bfb61d135c7ec0d75ae"} Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.658648 3552 scope.go:117] "RemoveContainer" containerID="b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.693479 3552 scope.go:117] "RemoveContainer" containerID="f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.751933 3552 scope.go:117] "RemoveContainer" containerID="bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.818330 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "edefda3a-fa4d-4ef4-b49a-95251f91bb92" (UID: "edefda3a-fa4d-4ef4-b49a-95251f91bb92"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.818510 3552 scope.go:117] "RemoveContainer" containerID="b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334" Mar 20 16:10:49 crc kubenswrapper[3552]: E0320 16:10:49.819078 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334\": container with ID starting with b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334 not found: ID does not exist" containerID="b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.819174 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334"} err="failed to get container status \"b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334\": rpc error: code = NotFound desc = could not find container \"b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334\": container with ID starting with b5c622de1575f2d51c9aef6bfe51be308f5a7eb840142a2c6ca372e524c24334 not found: ID does not exist" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.819194 3552 scope.go:117] "RemoveContainer" containerID="f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b" Mar 20 16:10:49 crc kubenswrapper[3552]: E0320 16:10:49.819646 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b\": container with ID starting with f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b not found: ID does not exist" containerID="f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.819689 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b"} err="failed to get container status \"f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b\": rpc error: code = NotFound desc = could not find container \"f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b\": container with ID starting with f5a4739661d6935ba7ebf56db64b4e6310d1eb1bd2064a9d47571cb9d9ff988b not found: ID does not exist" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.819705 3552 scope.go:117] "RemoveContainer" containerID="bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530" Mar 20 16:10:49 crc kubenswrapper[3552]: E0320 16:10:49.819968 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530\": container with ID starting with bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530 not found: ID does not exist" containerID="bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.820013 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530"} err="failed to get container status \"bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530\": rpc error: code = NotFound desc = could not find container \"bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530\": container with ID starting with bd5b5757a385e03de8315a1f8a9443dfb06ee521aa1a854339a55d6073c14530 not found: ID does not exist" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.823228 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edefda3a-fa4d-4ef4-b49a-95251f91bb92-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:10:49 crc kubenswrapper[3552]: I0320 16:10:49.994122 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mk8k7"] Mar 20 16:10:50 crc kubenswrapper[3552]: I0320 16:10:50.008939 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mk8k7"] Mar 20 16:10:51 crc kubenswrapper[3552]: I0320 16:10:51.449235 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" path="/var/lib/kubelet/pods/edefda3a-fa4d-4ef4-b49a-95251f91bb92/volumes" Mar 20 16:11:01 crc kubenswrapper[3552]: I0320 16:11:01.356452 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:11:01 crc kubenswrapper[3552]: I0320 16:11:01.356974 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:11:01 crc kubenswrapper[3552]: I0320 16:11:01.357031 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:11:01 crc kubenswrapper[3552]: I0320 16:11:01.357060 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:11:01 crc kubenswrapper[3552]: I0320 16:11:01.357085 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:12:01 crc kubenswrapper[3552]: I0320 16:12:01.357732 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:12:01 crc kubenswrapper[3552]: I0320 16:12:01.359037 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:12:01 crc kubenswrapper[3552]: I0320 16:12:01.359129 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:12:01 crc kubenswrapper[3552]: I0320 16:12:01.359227 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:12:01 crc kubenswrapper[3552]: I0320 16:12:01.359324 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:12:05 crc kubenswrapper[3552]: I0320 16:12:05.298148 3552 generic.go:334] "Generic (PLEG): container finished" podID="7e975c55-e5ef-4c8c-b6dc-1af5da847c65" containerID="fb93271315c83036c16642da6c73d20fe8330507d001b594b27c483d4c64fb36" exitCode=0 Mar 20 16:12:05 crc kubenswrapper[3552]: I0320 16:12:05.298227 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" event={"ID":"7e975c55-e5ef-4c8c-b6dc-1af5da847c65","Type":"ContainerDied","Data":"fb93271315c83036c16642da6c73d20fe8330507d001b594b27c483d4c64fb36"} Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.781946 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.855674 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-ssh-key-openstack-edpm-ipam\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856229 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-1\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856258 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-combined-ca-bundle\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856392 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-2\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-2\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856480 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82twx\" (UniqueName: \"kubernetes.io/projected/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-kube-api-access-82twx\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856589 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-1\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856631 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-3\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-3\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.856916 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-extra-config-0\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.857034 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-inventory\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.857080 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-0\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.857155 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-0\") pod \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\" (UID: \"7e975c55-e5ef-4c8c-b6dc-1af5da847c65\") " Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.863858 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-kube-api-access-82twx" (OuterVolumeSpecName: "kube-api-access-82twx") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "kube-api-access-82twx". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.882237 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-3" (OuterVolumeSpecName: "nova-cell1-compute-config-3") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-cell1-compute-config-3". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.887558 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.889190 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.891242 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.891660 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-2" (OuterVolumeSpecName: "nova-cell1-compute-config-2") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-cell1-compute-config-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.893798 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.893983 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-inventory" (OuterVolumeSpecName: "inventory") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.901620 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.905072 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.908262 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "7e975c55-e5ef-4c8c-b6dc-1af5da847c65" (UID: "7e975c55-e5ef-4c8c-b6dc-1af5da847c65"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959489 3552 reconciler_common.go:300] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959529 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959540 3552 reconciler_common.go:300] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959551 3552 reconciler_common.go:300] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959563 3552 reconciler_common.go:300] "Volume detached for volume \"nova-cell1-compute-config-2\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-2\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959573 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-82twx\" (UniqueName: \"kubernetes.io/projected/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-kube-api-access-82twx\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959582 3552 reconciler_common.go:300] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959592 3552 reconciler_common.go:300] "Volume detached for volume \"nova-cell1-compute-config-3\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-cell1-compute-config-3\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959602 3552 reconciler_common.go:300] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959612 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:06 crc kubenswrapper[3552]: I0320 16:12:06.959644 3552 reconciler_common.go:300] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7e975c55-e5ef-4c8c-b6dc-1af5da847c65-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.314616 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" event={"ID":"7e975c55-e5ef-4c8c-b6dc-1af5da847c65","Type":"ContainerDied","Data":"237f554ce3f503ba8f510b6504b917f43981915ed02f1f7ce9606486ccb4497f"} Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.314645 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-mhhbp" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.314645 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="237f554ce3f503ba8f510b6504b917f43981915ed02f1f7ce9606486ccb4497f" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.423368 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9"] Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.423542 3552 topology_manager.go:215] "Topology Admit Handler" podUID="6cfc2f22-9946-4e63-9255-8e174a5bcb2f" podNamespace="openstack" podName="telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: E0320 16:12:07.423817 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="registry-server" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.423834 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="registry-server" Mar 20 16:12:07 crc kubenswrapper[3552]: E0320 16:12:07.423861 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="extract-utilities" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.423869 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="extract-utilities" Mar 20 16:12:07 crc kubenswrapper[3552]: E0320 16:12:07.423884 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7e975c55-e5ef-4c8c-b6dc-1af5da847c65" containerName="nova-edpm-deployment-openstack-edpm-ipam" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.423893 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e975c55-e5ef-4c8c-b6dc-1af5da847c65" containerName="nova-edpm-deployment-openstack-edpm-ipam" Mar 20 16:12:07 crc kubenswrapper[3552]: E0320 16:12:07.423907 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="extract-content" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.423913 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="extract-content" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.424075 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="edefda3a-fa4d-4ef4-b49a-95251f91bb92" containerName="registry-server" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.424102 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e975c55-e5ef-4c8c-b6dc-1af5da847c65" containerName="nova-edpm-deployment-openstack-edpm-ipam" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.424661 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.427361 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.427538 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-7h7sk" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.430526 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.430660 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.430799 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.445165 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9"] Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.584634 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.585055 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.585120 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.585264 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5kms\" (UniqueName: \"kubernetes.io/projected/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-kube-api-access-k5kms\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.585305 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.585340 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.586121 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688207 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688281 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688309 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688342 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688393 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-k5kms\" (UniqueName: \"kubernetes.io/projected/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-kube-api-access-k5kms\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688504 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.688526 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.693063 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.693299 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.694442 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.694714 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.694843 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.695713 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.709054 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5kms\" (UniqueName: \"kubernetes.io/projected/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-kube-api-access-k5kms\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:07 crc kubenswrapper[3552]: I0320 16:12:07.746754 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:12:08 crc kubenswrapper[3552]: I0320 16:12:08.258127 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9"] Mar 20 16:12:08 crc kubenswrapper[3552]: I0320 16:12:08.324572 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" event={"ID":"6cfc2f22-9946-4e63-9255-8e174a5bcb2f","Type":"ContainerStarted","Data":"ac384f52f97368220e42a25415c53fac6ad2f05a364fae2038ba84309d6759b4"} Mar 20 16:12:09 crc kubenswrapper[3552]: I0320 16:12:09.333755 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" event={"ID":"6cfc2f22-9946-4e63-9255-8e174a5bcb2f","Type":"ContainerStarted","Data":"d942658359b01deea45779e2a3ab55dbc1f24690a6bcb31dcb70823ccbfe79d5"} Mar 20 16:12:09 crc kubenswrapper[3552]: I0320 16:12:09.361358 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" podStartSLOduration=1.985671884 podStartE2EDuration="2.361304869s" podCreationTimestamp="2026-03-20 16:12:07 +0000 UTC" firstStartedPulling="2026-03-20 16:12:08.270121577 +0000 UTC m=+2827.963818407" lastFinishedPulling="2026-03-20 16:12:08.645754562 +0000 UTC m=+2828.339451392" observedRunningTime="2026-03-20 16:12:09.353147222 +0000 UTC m=+2829.046844072" watchObservedRunningTime="2026-03-20 16:12:09.361304869 +0000 UTC m=+2829.055001699" Mar 20 16:12:38 crc kubenswrapper[3552]: I0320 16:12:38.997297 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-275vl"] Mar 20 16:12:38 crc kubenswrapper[3552]: I0320 16:12:38.998104 3552 topology_manager.go:215] "Topology Admit Handler" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" podNamespace="openshift-marketplace" podName="redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.000654 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.041793 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-275vl"] Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.050187 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-catalog-content\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.050334 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-utilities\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.050538 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77nbf\" (UniqueName: \"kubernetes.io/projected/fd1cee1e-31e8-4640-8f16-aad80a331110-kube-api-access-77nbf\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.153161 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-catalog-content\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.153233 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-utilities\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.153310 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-77nbf\" (UniqueName: \"kubernetes.io/projected/fd1cee1e-31e8-4640-8f16-aad80a331110-kube-api-access-77nbf\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.153636 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-catalog-content\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.153698 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-utilities\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.185236 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-77nbf\" (UniqueName: \"kubernetes.io/projected/fd1cee1e-31e8-4640-8f16-aad80a331110-kube-api-access-77nbf\") pod \"redhat-marketplace-275vl\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.406854 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:39 crc kubenswrapper[3552]: I0320 16:12:39.886216 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-275vl"] Mar 20 16:12:40 crc kubenswrapper[3552]: I0320 16:12:40.599598 3552 generic.go:334] "Generic (PLEG): container finished" podID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerID="a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc" exitCode=0 Mar 20 16:12:40 crc kubenswrapper[3552]: I0320 16:12:40.599873 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerDied","Data":"a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc"} Mar 20 16:12:40 crc kubenswrapper[3552]: I0320 16:12:40.599892 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerStarted","Data":"0cf24f675b0b63724fbb2e3a4235814261c4bf03c8c290b9065729035b25b118"} Mar 20 16:12:41 crc kubenswrapper[3552]: I0320 16:12:41.610103 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerStarted","Data":"2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8"} Mar 20 16:12:42 crc kubenswrapper[3552]: I0320 16:12:42.778547 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:12:42 crc kubenswrapper[3552]: I0320 16:12:42.778878 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:12:43 crc kubenswrapper[3552]: E0320 16:12:43.950251 3552 cadvisor_stats_provider.go:501] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd1cee1e_31e8_4640_8f16_aad80a331110.slice/crio-conmon-2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8.scope\": RecentStats: unable to find data in memory cache]" Mar 20 16:12:44 crc kubenswrapper[3552]: I0320 16:12:44.634497 3552 generic.go:334] "Generic (PLEG): container finished" podID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerID="2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8" exitCode=0 Mar 20 16:12:44 crc kubenswrapper[3552]: I0320 16:12:44.634538 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerDied","Data":"2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8"} Mar 20 16:12:45 crc kubenswrapper[3552]: I0320 16:12:45.643093 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerStarted","Data":"484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48"} Mar 20 16:12:49 crc kubenswrapper[3552]: I0320 16:12:49.407291 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:49 crc kubenswrapper[3552]: I0320 16:12:49.407844 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:49 crc kubenswrapper[3552]: I0320 16:12:49.523071 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:49 crc kubenswrapper[3552]: I0320 16:12:49.543508 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-275vl" podStartSLOduration=7.245067781 podStartE2EDuration="11.543467509s" podCreationTimestamp="2026-03-20 16:12:38 +0000 UTC" firstStartedPulling="2026-03-20 16:12:40.601633914 +0000 UTC m=+2860.295330744" lastFinishedPulling="2026-03-20 16:12:44.900033642 +0000 UTC m=+2864.593730472" observedRunningTime="2026-03-20 16:12:45.670909914 +0000 UTC m=+2865.364606764" watchObservedRunningTime="2026-03-20 16:12:49.543467509 +0000 UTC m=+2869.237164339" Mar 20 16:12:59 crc kubenswrapper[3552]: I0320 16:12:59.514266 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:12:59 crc kubenswrapper[3552]: I0320 16:12:59.571853 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-275vl"] Mar 20 16:12:59 crc kubenswrapper[3552]: I0320 16:12:59.777471 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-275vl" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="registry-server" containerID="cri-o://484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48" gracePeriod=2 Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.208356 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.324190 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-utilities\") pod \"fd1cee1e-31e8-4640-8f16-aad80a331110\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.324265 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77nbf\" (UniqueName: \"kubernetes.io/projected/fd1cee1e-31e8-4640-8f16-aad80a331110-kube-api-access-77nbf\") pod \"fd1cee1e-31e8-4640-8f16-aad80a331110\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.324350 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-catalog-content\") pod \"fd1cee1e-31e8-4640-8f16-aad80a331110\" (UID: \"fd1cee1e-31e8-4640-8f16-aad80a331110\") " Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.325089 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-utilities" (OuterVolumeSpecName: "utilities") pod "fd1cee1e-31e8-4640-8f16-aad80a331110" (UID: "fd1cee1e-31e8-4640-8f16-aad80a331110"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.330666 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd1cee1e-31e8-4640-8f16-aad80a331110-kube-api-access-77nbf" (OuterVolumeSpecName: "kube-api-access-77nbf") pod "fd1cee1e-31e8-4640-8f16-aad80a331110" (UID: "fd1cee1e-31e8-4640-8f16-aad80a331110"). InnerVolumeSpecName "kube-api-access-77nbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.426725 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-77nbf\" (UniqueName: \"kubernetes.io/projected/fd1cee1e-31e8-4640-8f16-aad80a331110-kube-api-access-77nbf\") on node \"crc\" DevicePath \"\"" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.426761 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.474640 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd1cee1e-31e8-4640-8f16-aad80a331110" (UID: "fd1cee1e-31e8-4640-8f16-aad80a331110"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.528376 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1cee1e-31e8-4640-8f16-aad80a331110-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.786184 3552 generic.go:334] "Generic (PLEG): container finished" podID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerID="484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48" exitCode=0 Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.786227 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerDied","Data":"484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48"} Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.786250 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-275vl" event={"ID":"fd1cee1e-31e8-4640-8f16-aad80a331110","Type":"ContainerDied","Data":"0cf24f675b0b63724fbb2e3a4235814261c4bf03c8c290b9065729035b25b118"} Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.786268 3552 scope.go:117] "RemoveContainer" containerID="484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.787110 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-275vl" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.826737 3552 scope.go:117] "RemoveContainer" containerID="2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.856005 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-275vl"] Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.874654 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-275vl"] Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.884520 3552 scope.go:117] "RemoveContainer" containerID="a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.941905 3552 scope.go:117] "RemoveContainer" containerID="484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48" Mar 20 16:13:00 crc kubenswrapper[3552]: E0320 16:13:00.942468 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48\": container with ID starting with 484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48 not found: ID does not exist" containerID="484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.942534 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48"} err="failed to get container status \"484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48\": rpc error: code = NotFound desc = could not find container \"484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48\": container with ID starting with 484e267c5a30c4121ab3b1752ea4100ae09a70c69073a60f00d4897ae332ec48 not found: ID does not exist" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.942556 3552 scope.go:117] "RemoveContainer" containerID="2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8" Mar 20 16:13:00 crc kubenswrapper[3552]: E0320 16:13:00.943017 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8\": container with ID starting with 2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8 not found: ID does not exist" containerID="2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.943099 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8"} err="failed to get container status \"2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8\": rpc error: code = NotFound desc = could not find container \"2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8\": container with ID starting with 2f08af67e4c6b8c0a7ba6374a33fa06ee7dcd1aa00c4b318146d063d36e1ccc8 not found: ID does not exist" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.943118 3552 scope.go:117] "RemoveContainer" containerID="a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc" Mar 20 16:13:00 crc kubenswrapper[3552]: E0320 16:13:00.943484 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc\": container with ID starting with a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc not found: ID does not exist" containerID="a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc" Mar 20 16:13:00 crc kubenswrapper[3552]: I0320 16:13:00.943524 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc"} err="failed to get container status \"a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc\": rpc error: code = NotFound desc = could not find container \"a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc\": container with ID starting with a03834d3b8e68b947b613870727e91bb26a91ce9d47fe51ab17f967b3b04a1bc not found: ID does not exist" Mar 20 16:13:01 crc kubenswrapper[3552]: I0320 16:13:01.413615 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:13:01 crc kubenswrapper[3552]: I0320 16:13:01.413760 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:13:01 crc kubenswrapper[3552]: I0320 16:13:01.413813 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:13:01 crc kubenswrapper[3552]: I0320 16:13:01.413856 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:13:01 crc kubenswrapper[3552]: I0320 16:13:01.413890 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:13:01 crc kubenswrapper[3552]: I0320 16:13:01.441535 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" path="/var/lib/kubelet/pods/fd1cee1e-31e8-4640-8f16-aad80a331110/volumes" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.880169 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dx6qm"] Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.880854 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" podNamespace="openshift-marketplace" podName="redhat-operators-dx6qm" Mar 20 16:13:06 crc kubenswrapper[3552]: E0320 16:13:06.881201 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="extract-content" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.881215 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="extract-content" Mar 20 16:13:06 crc kubenswrapper[3552]: E0320 16:13:06.881226 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="registry-server" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.881235 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="registry-server" Mar 20 16:13:06 crc kubenswrapper[3552]: E0320 16:13:06.881259 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="extract-utilities" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.881265 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="extract-utilities" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.881563 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd1cee1e-31e8-4640-8f16-aad80a331110" containerName="registry-server" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.883289 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.893856 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dx6qm"] Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.984142 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpmfm\" (UniqueName: \"kubernetes.io/projected/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-kube-api-access-mpmfm\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.984524 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-utilities\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:06 crc kubenswrapper[3552]: I0320 16:13:06.984579 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-catalog-content\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.086434 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-utilities\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.086511 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-catalog-content\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.086578 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-mpmfm\" (UniqueName: \"kubernetes.io/projected/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-kube-api-access-mpmfm\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.087019 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-catalog-content\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.087089 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-utilities\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.107140 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpmfm\" (UniqueName: \"kubernetes.io/projected/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-kube-api-access-mpmfm\") pod \"redhat-operators-dx6qm\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.215712 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.697480 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dx6qm"] Mar 20 16:13:07 crc kubenswrapper[3552]: W0320 16:13:07.702050 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4f2514c_b7aa_4e01_ac94_9dc8d1725852.slice/crio-d52baffe90c3052df10f52c89012f80ba73fb8e23e3c4c9c5a97ffceb6c5aa58 WatchSource:0}: Error finding container d52baffe90c3052df10f52c89012f80ba73fb8e23e3c4c9c5a97ffceb6c5aa58: Status 404 returned error can't find the container with id d52baffe90c3052df10f52c89012f80ba73fb8e23e3c4c9c5a97ffceb6c5aa58 Mar 20 16:13:07 crc kubenswrapper[3552]: I0320 16:13:07.840468 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerStarted","Data":"d52baffe90c3052df10f52c89012f80ba73fb8e23e3c4c9c5a97ffceb6c5aa58"} Mar 20 16:13:08 crc kubenswrapper[3552]: I0320 16:13:08.856072 3552 generic.go:334] "Generic (PLEG): container finished" podID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerID="cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462" exitCode=0 Mar 20 16:13:08 crc kubenswrapper[3552]: I0320 16:13:08.856363 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerDied","Data":"cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462"} Mar 20 16:13:09 crc kubenswrapper[3552]: I0320 16:13:09.868995 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerStarted","Data":"01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9"} Mar 20 16:13:12 crc kubenswrapper[3552]: I0320 16:13:12.778894 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:13:12 crc kubenswrapper[3552]: I0320 16:13:12.779286 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:13:40 crc kubenswrapper[3552]: I0320 16:13:40.156162 3552 generic.go:334] "Generic (PLEG): container finished" podID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerID="01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9" exitCode=0 Mar 20 16:13:40 crc kubenswrapper[3552]: I0320 16:13:40.156263 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerDied","Data":"01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9"} Mar 20 16:13:41 crc kubenswrapper[3552]: I0320 16:13:41.167488 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerStarted","Data":"b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea"} Mar 20 16:13:41 crc kubenswrapper[3552]: I0320 16:13:41.189979 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dx6qm" podStartSLOduration=3.607163076 podStartE2EDuration="35.189918918s" podCreationTimestamp="2026-03-20 16:13:06 +0000 UTC" firstStartedPulling="2026-03-20 16:13:08.858620639 +0000 UTC m=+2888.552317459" lastFinishedPulling="2026-03-20 16:13:40.441376481 +0000 UTC m=+2920.135073301" observedRunningTime="2026-03-20 16:13:41.186940639 +0000 UTC m=+2920.880637499" watchObservedRunningTime="2026-03-20 16:13:41.189918918 +0000 UTC m=+2920.883615748" Mar 20 16:13:42 crc kubenswrapper[3552]: I0320 16:13:42.778876 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:13:42 crc kubenswrapper[3552]: I0320 16:13:42.779222 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:13:42 crc kubenswrapper[3552]: I0320 16:13:42.779264 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:13:42 crc kubenswrapper[3552]: I0320 16:13:42.780362 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e680d2bfd56125cc2701591706ccb54da162fa49508def9218ede80dc902e95b"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:13:42 crc kubenswrapper[3552]: I0320 16:13:42.780568 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://e680d2bfd56125cc2701591706ccb54da162fa49508def9218ede80dc902e95b" gracePeriod=600 Mar 20 16:13:45 crc kubenswrapper[3552]: I0320 16:13:45.203687 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="e680d2bfd56125cc2701591706ccb54da162fa49508def9218ede80dc902e95b" exitCode=0 Mar 20 16:13:45 crc kubenswrapper[3552]: I0320 16:13:45.203764 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"e680d2bfd56125cc2701591706ccb54da162fa49508def9218ede80dc902e95b"} Mar 20 16:13:45 crc kubenswrapper[3552]: I0320 16:13:45.204291 3552 scope.go:117] "RemoveContainer" containerID="c3c98dfa5f3a29d1668a0d6410bf7611c4f0cbbf2e1ffc42685cd31d21205617" Mar 20 16:13:46 crc kubenswrapper[3552]: I0320 16:13:46.217681 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5"} Mar 20 16:13:47 crc kubenswrapper[3552]: I0320 16:13:47.216198 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:47 crc kubenswrapper[3552]: I0320 16:13:47.216652 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:13:48 crc kubenswrapper[3552]: I0320 16:13:48.304893 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dx6qm" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="registry-server" probeResult="failure" output=< Mar 20 16:13:48 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:13:48 crc kubenswrapper[3552]: > Mar 20 16:13:58 crc kubenswrapper[3552]: I0320 16:13:58.299321 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dx6qm" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="registry-server" probeResult="failure" output=< Mar 20 16:13:58 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:13:58 crc kubenswrapper[3552]: > Mar 20 16:14:01 crc kubenswrapper[3552]: I0320 16:14:01.414141 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:14:01 crc kubenswrapper[3552]: I0320 16:14:01.414504 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:14:01 crc kubenswrapper[3552]: I0320 16:14:01.414530 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:14:01 crc kubenswrapper[3552]: I0320 16:14:01.414554 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:14:01 crc kubenswrapper[3552]: I0320 16:14:01.414598 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:14:07 crc kubenswrapper[3552]: I0320 16:14:07.325279 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:14:07 crc kubenswrapper[3552]: I0320 16:14:07.400579 3552 generic.go:334] "Generic (PLEG): container finished" podID="6cfc2f22-9946-4e63-9255-8e174a5bcb2f" containerID="d942658359b01deea45779e2a3ab55dbc1f24690a6bcb31dcb70823ccbfe79d5" exitCode=0 Mar 20 16:14:07 crc kubenswrapper[3552]: I0320 16:14:07.400635 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" event={"ID":"6cfc2f22-9946-4e63-9255-8e174a5bcb2f","Type":"ContainerDied","Data":"d942658359b01deea45779e2a3ab55dbc1f24690a6bcb31dcb70823ccbfe79d5"} Mar 20 16:14:07 crc kubenswrapper[3552]: I0320 16:14:07.451313 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:14:07 crc kubenswrapper[3552]: I0320 16:14:07.492467 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dx6qm"] Mar 20 16:14:08 crc kubenswrapper[3552]: I0320 16:14:08.411550 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dx6qm" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="registry-server" containerID="cri-o://b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea" gracePeriod=2 Mar 20 16:14:08 crc kubenswrapper[3552]: I0320 16:14:08.932074 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.041272 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.107262 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-utilities\") pod \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.107541 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpmfm\" (UniqueName: \"kubernetes.io/projected/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-kube-api-access-mpmfm\") pod \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.107591 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-catalog-content\") pod \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\" (UID: \"b4f2514c-b7aa-4e01-ac94-9dc8d1725852\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.108078 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-utilities" (OuterVolumeSpecName: "utilities") pod "b4f2514c-b7aa-4e01-ac94-9dc8d1725852" (UID: "b4f2514c-b7aa-4e01-ac94-9dc8d1725852"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.109674 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-0\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.109741 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5kms\" (UniqueName: \"kubernetes.io/projected/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-kube-api-access-k5kms\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.110030 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.113917 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-kube-api-access-mpmfm" (OuterVolumeSpecName: "kube-api-access-mpmfm") pod "b4f2514c-b7aa-4e01-ac94-9dc8d1725852" (UID: "b4f2514c-b7aa-4e01-ac94-9dc8d1725852"). InnerVolumeSpecName "kube-api-access-mpmfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.114368 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-kube-api-access-k5kms" (OuterVolumeSpecName: "kube-api-access-k5kms") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "kube-api-access-k5kms". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.137222 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.211481 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-inventory\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.211774 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-1\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.211855 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-2\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.212101 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-telemetry-combined-ca-bundle\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.212121 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ssh-key-openstack-edpm-ipam\") pod \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\" (UID: \"6cfc2f22-9946-4e63-9255-8e174a5bcb2f\") " Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.212900 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-k5kms\" (UniqueName: \"kubernetes.io/projected/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-kube-api-access-k5kms\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.213031 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mpmfm\" (UniqueName: \"kubernetes.io/projected/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-kube-api-access-mpmfm\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.213199 3552 reconciler_common.go:300] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.220744 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.237493 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.238871 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.244042 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.246871 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-inventory" (OuterVolumeSpecName: "inventory") pod "6cfc2f22-9946-4e63-9255-8e174a5bcb2f" (UID: "6cfc2f22-9946-4e63-9255-8e174a5bcb2f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.318262 3552 reconciler_common.go:300] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.318298 3552 reconciler_common.go:300] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.318313 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.318331 3552 reconciler_common.go:300] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-inventory\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.318344 3552 reconciler_common.go:300] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/6cfc2f22-9946-4e63-9255-8e174a5bcb2f-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.426778 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" event={"ID":"6cfc2f22-9946-4e63-9255-8e174a5bcb2f","Type":"ContainerDied","Data":"ac384f52f97368220e42a25415c53fac6ad2f05a364fae2038ba84309d6759b4"} Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.426829 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac384f52f97368220e42a25415c53fac6ad2f05a364fae2038ba84309d6759b4" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.426942 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.437834 3552 generic.go:334] "Generic (PLEG): container finished" podID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerID="b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea" exitCode=0 Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.437921 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx6qm" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.459944 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerDied","Data":"b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea"} Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.459991 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx6qm" event={"ID":"b4f2514c-b7aa-4e01-ac94-9dc8d1725852","Type":"ContainerDied","Data":"d52baffe90c3052df10f52c89012f80ba73fb8e23e3c4c9c5a97ffceb6c5aa58"} Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.460019 3552 scope.go:117] "RemoveContainer" containerID="b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.561260 3552 scope.go:117] "RemoveContainer" containerID="01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.627921 3552 scope.go:117] "RemoveContainer" containerID="cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.679658 3552 scope.go:117] "RemoveContainer" containerID="b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea" Mar 20 16:14:09 crc kubenswrapper[3552]: E0320 16:14:09.680259 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea\": container with ID starting with b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea not found: ID does not exist" containerID="b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.680330 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea"} err="failed to get container status \"b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea\": rpc error: code = NotFound desc = could not find container \"b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea\": container with ID starting with b464ab6d689c3d63bb435bc11218a70d984281d7aca395a3fd90f56dfc2526ea not found: ID does not exist" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.680351 3552 scope.go:117] "RemoveContainer" containerID="01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9" Mar 20 16:14:09 crc kubenswrapper[3552]: E0320 16:14:09.680762 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9\": container with ID starting with 01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9 not found: ID does not exist" containerID="01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.680794 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9"} err="failed to get container status \"01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9\": rpc error: code = NotFound desc = could not find container \"01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9\": container with ID starting with 01047f50004a6b26020ba1b9176daf9fa2a51f8638662d6bf3712d1ba02b47a9 not found: ID does not exist" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.680806 3552 scope.go:117] "RemoveContainer" containerID="cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462" Mar 20 16:14:09 crc kubenswrapper[3552]: E0320 16:14:09.681079 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462\": container with ID starting with cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462 not found: ID does not exist" containerID="cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462" Mar 20 16:14:09 crc kubenswrapper[3552]: I0320 16:14:09.681107 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462"} err="failed to get container status \"cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462\": rpc error: code = NotFound desc = could not find container \"cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462\": container with ID starting with cb50ec3c1a184df2ac505b6b036fb40b451cb9759b0247fa1d258d2d0d0ad462 not found: ID does not exist" Mar 20 16:14:10 crc kubenswrapper[3552]: I0320 16:14:10.166519 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b4f2514c-b7aa-4e01-ac94-9dc8d1725852" (UID: "b4f2514c-b7aa-4e01-ac94-9dc8d1725852"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:14:10 crc kubenswrapper[3552]: I0320 16:14:10.245206 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4f2514c-b7aa-4e01-ac94-9dc8d1725852-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:14:10 crc kubenswrapper[3552]: I0320 16:14:10.479098 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dx6qm"] Mar 20 16:14:10 crc kubenswrapper[3552]: I0320 16:14:10.487579 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dx6qm"] Mar 20 16:14:11 crc kubenswrapper[3552]: I0320 16:14:11.442458 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" path="/var/lib/kubelet/pods/b4f2514c-b7aa-4e01-ac94-9dc8d1725852/volumes" Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.200663 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.201459 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="prometheus" containerID="cri-o://9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6" gracePeriod=600 Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.201555 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="thanos-sidecar" containerID="cri-o://e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3" gracePeriod=600 Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.201705 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="config-reloader" containerID="cri-o://feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3" gracePeriod=600 Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.904759 3552 generic.go:334] "Generic (PLEG): container finished" podID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerID="e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3" exitCode=0 Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.904786 3552 generic.go:334] "Generic (PLEG): container finished" podID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerID="9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6" exitCode=0 Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.904821 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerDied","Data":"e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3"} Mar 20 16:14:59 crc kubenswrapper[3552]: I0320 16:14:59.904839 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerDied","Data":"9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6"} Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.175367 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn"] Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.175941 3552 topology_manager.go:215] "Topology Admit Handler" podUID="8c7177de-40aa-4417-ac06-af48316da848" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: E0320 16:15:00.176289 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="extract-utilities" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.176305 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="extract-utilities" Mar 20 16:15:00 crc kubenswrapper[3552]: E0320 16:15:00.176345 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="extract-content" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.176355 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="extract-content" Mar 20 16:15:00 crc kubenswrapper[3552]: E0320 16:15:00.176390 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="registry-server" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.176417 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="registry-server" Mar 20 16:15:00 crc kubenswrapper[3552]: E0320 16:15:00.176441 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="6cfc2f22-9946-4e63-9255-8e174a5bcb2f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.176450 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cfc2f22-9946-4e63-9255-8e174a5bcb2f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.176653 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4f2514c-b7aa-4e01-ac94-9dc8d1725852" containerName="registry-server" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.176665 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cfc2f22-9946-4e63-9255-8e174a5bcb2f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.177478 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.179452 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.182070 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.185260 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn"] Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.377449 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c7177de-40aa-4417-ac06-af48316da848-secret-volume\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.377533 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c7177de-40aa-4417-ac06-af48316da848-config-volume\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.377606 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg5cf\" (UniqueName: \"kubernetes.io/projected/8c7177de-40aa-4417-ac06-af48316da848-kube-api-access-hg5cf\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.480024 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c7177de-40aa-4417-ac06-af48316da848-secret-volume\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.480083 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c7177de-40aa-4417-ac06-af48316da848-config-volume\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.480142 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-hg5cf\" (UniqueName: \"kubernetes.io/projected/8c7177de-40aa-4417-ac06-af48316da848-kube-api-access-hg5cf\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.481499 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c7177de-40aa-4417-ac06-af48316da848-config-volume\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.486009 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c7177de-40aa-4417-ac06-af48316da848-secret-volume\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.496793 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg5cf\" (UniqueName: \"kubernetes.io/projected/8c7177de-40aa-4417-ac06-af48316da848-kube-api-access-hg5cf\") pod \"collect-profiles-29567055-jctdn\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.500903 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.909924 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.919641 3552 generic.go:334] "Generic (PLEG): container finished" podID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerID="feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3" exitCode=0 Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.919682 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerDied","Data":"feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3"} Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.919702 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae","Type":"ContainerDied","Data":"5c8abc493589e32ee8164e4806cd57ccbef0238ee226a3358882098934fdbcc4"} Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.919722 3552 scope.go:117] "RemoveContainer" containerID="e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.919907 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.955073 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn"] Mar 20 16:15:00 crc kubenswrapper[3552]: I0320 16:15:00.978288 3552 scope.go:117] "RemoveContainer" containerID="feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.090519 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-0\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.092149 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095224 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095274 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-secret-combined-ca-bundle\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095370 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpxvj\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-kube-api-access-bpxvj\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095444 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095475 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config-out\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095503 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-1\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095548 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095569 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095607 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-2\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095633 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-thanos-prometheus-http-client-file\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095687 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.095733 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-tls-assets\") pod \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\" (UID: \"96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae\") " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.096138 3552 reconciler_common.go:300] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.099341 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.099950 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.107777 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-kube-api-access-bpxvj" (OuterVolumeSpecName: "kube-api-access-bpxvj") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "kube-api-access-bpxvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.107815 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.108973 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.110769 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config" (OuterVolumeSpecName: "config") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.110760 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.112227 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.112692 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config-out" (OuterVolumeSpecName: "config-out") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.120453 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.160852 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d". PluginName "kubernetes.io/csi", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.192223 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config" (OuterVolumeSpecName: "web-config") pod "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" (UID: "96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198096 3552 reconciler_common.go:300] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198143 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-bpxvj\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-kube-api-access-bpxvj\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198164 3552 reconciler_common.go:300] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198179 3552 reconciler_common.go:300] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config-out\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198194 3552 reconciler_common.go:300] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198207 3552 reconciler_common.go:300] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198219 3552 reconciler_common.go:300] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-config\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198234 3552 reconciler_common.go:300] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198249 3552 reconciler_common.go:300] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198265 3552 reconciler_common.go:300] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.198279 3552 reconciler_common.go:300] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae-tls-assets\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.199107 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") on node \"crc\" " Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.268428 3552 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.271295 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d") on node "crc" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.285990 3552 scope.go:117] "RemoveContainer" containerID="9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.306771 3552 reconciler_common.go:300] "Volume detached for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.332775 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.336791 3552 scope.go:117] "RemoveContainer" containerID="70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.354360 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.386047 3552 scope.go:117] "RemoveContainer" containerID="e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.387293 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3\": container with ID starting with e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3 not found: ID does not exist" containerID="e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.387353 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3"} err="failed to get container status \"e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3\": rpc error: code = NotFound desc = could not find container \"e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3\": container with ID starting with e001dac14d8e643e42f1d2ef7495d1900c471a94fd12cede10c5b8cea51f6ed3 not found: ID does not exist" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.387363 3552 scope.go:117] "RemoveContainer" containerID="feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.392551 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3\": container with ID starting with feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3 not found: ID does not exist" containerID="feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.392613 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3"} err="failed to get container status \"feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3\": rpc error: code = NotFound desc = could not find container \"feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3\": container with ID starting with feb740047fa2d6ac3530ba896bd2c5d60fabe209b405c83b91a7cba6defb25f3 not found: ID does not exist" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.392629 3552 scope.go:117] "RemoveContainer" containerID="9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.393355 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6\": container with ID starting with 9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6 not found: ID does not exist" containerID="9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.393458 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6"} err="failed to get container status \"9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6\": rpc error: code = NotFound desc = could not find container \"9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6\": container with ID starting with 9ff41141e3f717bbce36e67a3df25a1678dd5a26b28b5db705a308f88604f6d6 not found: ID does not exist" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.393482 3552 scope.go:117] "RemoveContainer" containerID="70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.394076 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f\": container with ID starting with 70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f not found: ID does not exist" containerID="70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.394174 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f"} err="failed to get container status \"70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f\": rpc error: code = NotFound desc = could not find container \"70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f\": container with ID starting with 70d87c4b9ab0b18979932cbfdc533bc88d68b214d91eb76a79e73067d1a0175f not found: ID does not exist" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.404421 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.404602 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5aa16a4d-0492-4319-ae5d-87e5a20bec39" podNamespace="openstack" podName="prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.404852 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="thanos-sidecar" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.404863 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="thanos-sidecar" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.404886 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="init-config-reloader" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.404892 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="init-config-reloader" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.404904 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="prometheus" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.404910 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="prometheus" Mar 20 16:15:01 crc kubenswrapper[3552]: E0320 16:15:01.404932 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="config-reloader" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.404939 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="config-reloader" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.405120 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="thanos-sidecar" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.405137 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="prometheus" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.405147 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" containerName="config-reloader" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.416311 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.416379 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.416426 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.416451 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.416507 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.420478 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.425604 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.425884 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.426021 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.426299 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-s2p59" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.426475 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.426501 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.426522 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.451715 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.452660 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae" path="/var/lib/kubelet/pods/96d21e55-2bd4-4fbe-b8fb-4f33165bd3ae/volumes" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.455185 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614180 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614266 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614574 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614618 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5aa16a4d-0492-4319-ae5d-87e5a20bec39-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614658 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614684 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614748 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614777 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614848 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5aa16a4d-0492-4319-ae5d-87e5a20bec39-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614901 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614946 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-config\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.614992 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.615116 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df4br\" (UniqueName: \"kubernetes.io/projected/5aa16a4d-0492-4319-ae5d-87e5a20bec39-kube-api-access-df4br\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717244 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-df4br\" (UniqueName: \"kubernetes.io/projected/5aa16a4d-0492-4319-ae5d-87e5a20bec39-kube-api-access-df4br\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717326 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717363 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717469 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717525 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5aa16a4d-0492-4319-ae5d-87e5a20bec39-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717554 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717574 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717605 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717626 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717657 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5aa16a4d-0492-4319-ae5d-87e5a20bec39-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717688 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717713 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-config\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.717737 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.719236 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.719315 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.719841 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5aa16a4d-0492-4319-ae5d-87e5a20bec39-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.722580 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.723016 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-config\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.723040 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.723190 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.724071 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5aa16a4d-0492-4319-ae5d-87e5a20bec39-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.724345 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5aa16a4d-0492-4319-ae5d-87e5a20bec39-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.724956 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.725022 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5aa16a4d-0492-4319-ae5d-87e5a20bec39-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.725675 3552 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.725711 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c127805bcb575cbe31260fab01e798009882e8c5a15f13517ae73f993a199ea5/globalmount\"" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.737329 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-df4br\" (UniqueName: \"kubernetes.io/projected/5aa16a4d-0492-4319-ae5d-87e5a20bec39-kube-api-access-df4br\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.794645 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-709f33c1-d04d-4cf0-8fea-754a33bbdd5d\") pod \"prometheus-metric-storage-0\" (UID: \"5aa16a4d-0492-4319-ae5d-87e5a20bec39\") " pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.927174 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" event={"ID":"8c7177de-40aa-4417-ac06-af48316da848","Type":"ContainerStarted","Data":"30f91d02cb0b702fb58105d8610783ff15e614c332a22b1b4c9a27aeab34f236"} Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.927223 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" event={"ID":"8c7177de-40aa-4417-ac06-af48316da848","Type":"ContainerStarted","Data":"9dc84c621927d4ff50dd9c876a71a2a69c0526de0b4f3e2a242bda077e38f03c"} Mar 20 16:15:01 crc kubenswrapper[3552]: I0320 16:15:01.947863 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" podStartSLOduration=1.947806501 podStartE2EDuration="1.947806501s" podCreationTimestamp="2026-03-20 16:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 16:15:01.941728619 +0000 UTC m=+3001.635425449" watchObservedRunningTime="2026-03-20 16:15:01.947806501 +0000 UTC m=+3001.641503331" Mar 20 16:15:02 crc kubenswrapper[3552]: I0320 16:15:02.047469 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:02 crc kubenswrapper[3552]: I0320 16:15:02.536270 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Mar 20 16:15:02 crc kubenswrapper[3552]: W0320 16:15:02.542055 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5aa16a4d_0492_4319_ae5d_87e5a20bec39.slice/crio-b9143335c1a549e30751cd8b6ebb9e496e730788bd34f20a1fc21569464f5972 WatchSource:0}: Error finding container b9143335c1a549e30751cd8b6ebb9e496e730788bd34f20a1fc21569464f5972: Status 404 returned error can't find the container with id b9143335c1a549e30751cd8b6ebb9e496e730788bd34f20a1fc21569464f5972 Mar 20 16:15:02 crc kubenswrapper[3552]: I0320 16:15:02.938277 3552 generic.go:334] "Generic (PLEG): container finished" podID="8c7177de-40aa-4417-ac06-af48316da848" containerID="30f91d02cb0b702fb58105d8610783ff15e614c332a22b1b4c9a27aeab34f236" exitCode=0 Mar 20 16:15:02 crc kubenswrapper[3552]: I0320 16:15:02.938355 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" event={"ID":"8c7177de-40aa-4417-ac06-af48316da848","Type":"ContainerDied","Data":"30f91d02cb0b702fb58105d8610783ff15e614c332a22b1b4c9a27aeab34f236"} Mar 20 16:15:02 crc kubenswrapper[3552]: I0320 16:15:02.941875 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5aa16a4d-0492-4319-ae5d-87e5a20bec39","Type":"ContainerStarted","Data":"b9143335c1a549e30751cd8b6ebb9e496e730788bd34f20a1fc21569464f5972"} Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.262151 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.384959 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c7177de-40aa-4417-ac06-af48316da848-secret-volume\") pod \"8c7177de-40aa-4417-ac06-af48316da848\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.385253 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c7177de-40aa-4417-ac06-af48316da848-config-volume\") pod \"8c7177de-40aa-4417-ac06-af48316da848\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.385311 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hg5cf\" (UniqueName: \"kubernetes.io/projected/8c7177de-40aa-4417-ac06-af48316da848-kube-api-access-hg5cf\") pod \"8c7177de-40aa-4417-ac06-af48316da848\" (UID: \"8c7177de-40aa-4417-ac06-af48316da848\") " Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.386488 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c7177de-40aa-4417-ac06-af48316da848-config-volume" (OuterVolumeSpecName: "config-volume") pod "8c7177de-40aa-4417-ac06-af48316da848" (UID: "8c7177de-40aa-4417-ac06-af48316da848"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.393327 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c7177de-40aa-4417-ac06-af48316da848-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8c7177de-40aa-4417-ac06-af48316da848" (UID: "8c7177de-40aa-4417-ac06-af48316da848"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.394436 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c7177de-40aa-4417-ac06-af48316da848-kube-api-access-hg5cf" (OuterVolumeSpecName: "kube-api-access-hg5cf") pod "8c7177de-40aa-4417-ac06-af48316da848" (UID: "8c7177de-40aa-4417-ac06-af48316da848"). InnerVolumeSpecName "kube-api-access-hg5cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.489236 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c7177de-40aa-4417-ac06-af48316da848-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.489285 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-hg5cf\" (UniqueName: \"kubernetes.io/projected/8c7177de-40aa-4417-ac06-af48316da848-kube-api-access-hg5cf\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.489298 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c7177de-40aa-4417-ac06-af48316da848-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.962498 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" event={"ID":"8c7177de-40aa-4417-ac06-af48316da848","Type":"ContainerDied","Data":"9dc84c621927d4ff50dd9c876a71a2a69c0526de0b4f3e2a242bda077e38f03c"} Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.962797 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9dc84c621927d4ff50dd9c876a71a2a69c0526de0b4f3e2a242bda077e38f03c" Mar 20 16:15:04 crc kubenswrapper[3552]: I0320 16:15:04.962913 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567055-jctdn" Mar 20 16:15:05 crc kubenswrapper[3552]: I0320 16:15:05.041523 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55"] Mar 20 16:15:05 crc kubenswrapper[3552]: I0320 16:15:05.049326 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567010-zqp55"] Mar 20 16:15:05 crc kubenswrapper[3552]: I0320 16:15:05.448479 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ecabf18-01fa-4357-8042-dc7fcb56e5ee" path="/var/lib/kubelet/pods/6ecabf18-01fa-4357-8042-dc7fcb56e5ee/volumes" Mar 20 16:15:06 crc kubenswrapper[3552]: I0320 16:15:06.981080 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5aa16a4d-0492-4319-ae5d-87e5a20bec39","Type":"ContainerStarted","Data":"68339f39987fc48231d1570e6f2efa3f60fa790f2f6b1eeb0158e20142f22b52"} Mar 20 16:15:17 crc kubenswrapper[3552]: I0320 16:15:17.067064 3552 generic.go:334] "Generic (PLEG): container finished" podID="5aa16a4d-0492-4319-ae5d-87e5a20bec39" containerID="68339f39987fc48231d1570e6f2efa3f60fa790f2f6b1eeb0158e20142f22b52" exitCode=0 Mar 20 16:15:17 crc kubenswrapper[3552]: I0320 16:15:17.067613 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5aa16a4d-0492-4319-ae5d-87e5a20bec39","Type":"ContainerDied","Data":"68339f39987fc48231d1570e6f2efa3f60fa790f2f6b1eeb0158e20142f22b52"} Mar 20 16:15:18 crc kubenswrapper[3552]: I0320 16:15:18.076129 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5aa16a4d-0492-4319-ae5d-87e5a20bec39","Type":"ContainerStarted","Data":"06bfca30b0df5decf12b6fd00b8638b3283adc235170a4dd0dcc672eaa6fd92e"} Mar 20 16:15:23 crc kubenswrapper[3552]: I0320 16:15:23.125254 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5aa16a4d-0492-4319-ae5d-87e5a20bec39","Type":"ContainerStarted","Data":"30df6a14e2229806b3ba7e2a0c93b9288d0db23b90c9112326eff889eca446cd"} Mar 20 16:15:23 crc kubenswrapper[3552]: I0320 16:15:23.126327 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5aa16a4d-0492-4319-ae5d-87e5a20bec39","Type":"ContainerStarted","Data":"d17e563a1d15768b6d919a94974976fa51afbb0afa6e6246c7eb6f2af8475781"} Mar 20 16:15:23 crc kubenswrapper[3552]: I0320 16:15:23.174153 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=22.174109141 podStartE2EDuration="22.174109141s" podCreationTimestamp="2026-03-20 16:15:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 16:15:23.167290229 +0000 UTC m=+3022.860987079" watchObservedRunningTime="2026-03-20 16:15:23.174109141 +0000 UTC m=+3022.867805971" Mar 20 16:15:26 crc kubenswrapper[3552]: I0320 16:15:26.943568 3552 scope.go:117] "RemoveContainer" containerID="10a41eed047daf8ffb05cea26fd3200eecb45c1c5fc574acdb0ba75711e07896" Mar 20 16:15:27 crc kubenswrapper[3552]: I0320 16:15:27.048500 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:32 crc kubenswrapper[3552]: I0320 16:15:32.048675 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:32 crc kubenswrapper[3552]: I0320 16:15:32.053925 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:32 crc kubenswrapper[3552]: I0320 16:15:32.198779 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.582532 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.583274 3552 topology_manager.go:215] "Topology Admit Handler" podUID="bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" podNamespace="openstack" podName="tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: E0320 16:15:53.583636 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="8c7177de-40aa-4417-ac06-af48316da848" containerName="collect-profiles" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.583652 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c7177de-40aa-4417-ac06-af48316da848" containerName="collect-profiles" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.583837 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c7177de-40aa-4417-ac06-af48316da848" containerName="collect-profiles" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.584480 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.587528 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.587705 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.587863 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.588008 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5lzdb" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.597655 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689054 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689167 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gv84n\" (UniqueName: \"kubernetes.io/projected/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-kube-api-access-gv84n\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689238 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689275 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689351 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689586 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689615 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-config-data\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689645 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.689682 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.790898 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.790954 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.790984 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-config-data\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791021 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791069 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791119 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791169 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gv84n\" (UniqueName: \"kubernetes.io/projected/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-kube-api-access-gv84n\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791258 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791300 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791728 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791741 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.791793 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.793061 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.795355 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-config-data\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.798497 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.798813 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.811457 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.813938 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gv84n\" (UniqueName: \"kubernetes.io/projected/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-kube-api-access-gv84n\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.825640 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " pod="openstack/tempest-tests-tempest" Mar 20 16:15:53 crc kubenswrapper[3552]: I0320 16:15:53.903481 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Mar 20 16:15:54 crc kubenswrapper[3552]: I0320 16:15:54.389541 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Mar 20 16:15:54 crc kubenswrapper[3552]: W0320 16:15:54.405129 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbea2de7e_3b1c_4ab2_a39f_5f9980e81f92.slice/crio-7a1b62a09c083f677cd53af0816a8cd66735b17332378a260fc639e19bda1657 WatchSource:0}: Error finding container 7a1b62a09c083f677cd53af0816a8cd66735b17332378a260fc639e19bda1657: Status 404 returned error can't find the container with id 7a1b62a09c083f677cd53af0816a8cd66735b17332378a260fc639e19bda1657 Mar 20 16:15:54 crc kubenswrapper[3552]: I0320 16:15:54.407871 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:15:55 crc kubenswrapper[3552]: I0320 16:15:55.394544 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92","Type":"ContainerStarted","Data":"7a1b62a09c083f677cd53af0816a8cd66735b17332378a260fc639e19bda1657"} Mar 20 16:16:01 crc kubenswrapper[3552]: I0320 16:16:01.439186 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:16:01 crc kubenswrapper[3552]: I0320 16:16:01.445090 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:16:01 crc kubenswrapper[3552]: I0320 16:16:01.445196 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:16:01 crc kubenswrapper[3552]: I0320 16:16:01.445225 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:16:01 crc kubenswrapper[3552]: I0320 16:16:01.445277 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:16:07 crc kubenswrapper[3552]: I0320 16:16:07.079822 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Mar 20 16:16:08 crc kubenswrapper[3552]: I0320 16:16:08.507940 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92","Type":"ContainerStarted","Data":"36b2065160badeb7a2c0565766e1067f5beacf8fbb7931cb5ebf6fee0bfae393"} Mar 20 16:16:08 crc kubenswrapper[3552]: I0320 16:16:08.535354 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.8654014979999998 podStartE2EDuration="16.535306408s" podCreationTimestamp="2026-03-20 16:15:52 +0000 UTC" firstStartedPulling="2026-03-20 16:15:54.407619392 +0000 UTC m=+3054.101316222" lastFinishedPulling="2026-03-20 16:16:07.077524302 +0000 UTC m=+3066.771221132" observedRunningTime="2026-03-20 16:16:08.52448633 +0000 UTC m=+3068.218183170" watchObservedRunningTime="2026-03-20 16:16:08.535306408 +0000 UTC m=+3068.229003238" Mar 20 16:16:12 crc kubenswrapper[3552]: I0320 16:16:12.778280 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:16:12 crc kubenswrapper[3552]: I0320 16:16:12.779638 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:16:42 crc kubenswrapper[3552]: I0320 16:16:42.778994 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:16:42 crc kubenswrapper[3552]: I0320 16:16:42.779649 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:17:01 crc kubenswrapper[3552]: I0320 16:17:01.445727 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:17:01 crc kubenswrapper[3552]: I0320 16:17:01.446372 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:17:01 crc kubenswrapper[3552]: I0320 16:17:01.446435 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:17:01 crc kubenswrapper[3552]: I0320 16:17:01.446470 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:17:01 crc kubenswrapper[3552]: I0320 16:17:01.446542 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:17:12 crc kubenswrapper[3552]: I0320 16:17:12.778353 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:17:12 crc kubenswrapper[3552]: I0320 16:17:12.778883 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:17:12 crc kubenswrapper[3552]: I0320 16:17:12.778920 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:17:12 crc kubenswrapper[3552]: I0320 16:17:12.779943 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:17:12 crc kubenswrapper[3552]: I0320 16:17:12.780111 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" gracePeriod=600 Mar 20 16:17:12 crc kubenswrapper[3552]: E0320 16:17:12.898480 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:17:13 crc kubenswrapper[3552]: I0320 16:17:13.030484 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" exitCode=0 Mar 20 16:17:13 crc kubenswrapper[3552]: I0320 16:17:13.030524 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5"} Mar 20 16:17:13 crc kubenswrapper[3552]: I0320 16:17:13.030550 3552 scope.go:117] "RemoveContainer" containerID="e680d2bfd56125cc2701591706ccb54da162fa49508def9218ede80dc902e95b" Mar 20 16:17:13 crc kubenswrapper[3552]: I0320 16:17:13.031247 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:17:13 crc kubenswrapper[3552]: E0320 16:17:13.031767 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:17:28 crc kubenswrapper[3552]: I0320 16:17:28.429909 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:17:28 crc kubenswrapper[3552]: E0320 16:17:28.431882 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:17:39 crc kubenswrapper[3552]: I0320 16:17:39.430472 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:17:39 crc kubenswrapper[3552]: E0320 16:17:39.432606 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:17:52 crc kubenswrapper[3552]: I0320 16:17:52.430350 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:17:52 crc kubenswrapper[3552]: E0320 16:17:52.431429 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:18:01 crc kubenswrapper[3552]: I0320 16:18:01.447264 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:18:01 crc kubenswrapper[3552]: I0320 16:18:01.448792 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:18:01 crc kubenswrapper[3552]: I0320 16:18:01.448846 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:18:01 crc kubenswrapper[3552]: I0320 16:18:01.449008 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:18:01 crc kubenswrapper[3552]: I0320 16:18:01.449089 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:18:05 crc kubenswrapper[3552]: I0320 16:18:05.431218 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:18:05 crc kubenswrapper[3552]: E0320 16:18:05.444427 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:18:20 crc kubenswrapper[3552]: I0320 16:18:20.431325 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:18:20 crc kubenswrapper[3552]: E0320 16:18:20.432649 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:18:35 crc kubenswrapper[3552]: I0320 16:18:35.430379 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:18:35 crc kubenswrapper[3552]: E0320 16:18:35.431354 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:18:49 crc kubenswrapper[3552]: I0320 16:18:49.430502 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:18:49 crc kubenswrapper[3552]: E0320 16:18:49.431638 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:19:01 crc kubenswrapper[3552]: I0320 16:19:01.449986 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:19:01 crc kubenswrapper[3552]: I0320 16:19:01.450622 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:19:01 crc kubenswrapper[3552]: I0320 16:19:01.450653 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:19:01 crc kubenswrapper[3552]: I0320 16:19:01.450700 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:19:01 crc kubenswrapper[3552]: I0320 16:19:01.450747 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:19:03 crc kubenswrapper[3552]: I0320 16:19:03.431259 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:19:03 crc kubenswrapper[3552]: E0320 16:19:03.432611 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:19:15 crc kubenswrapper[3552]: I0320 16:19:15.433196 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:19:15 crc kubenswrapper[3552]: E0320 16:19:15.435324 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:19:26 crc kubenswrapper[3552]: I0320 16:19:26.431449 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:19:26 crc kubenswrapper[3552]: E0320 16:19:26.432499 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:19:38 crc kubenswrapper[3552]: I0320 16:19:38.430924 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:19:38 crc kubenswrapper[3552]: E0320 16:19:38.432098 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:19:52 crc kubenswrapper[3552]: I0320 16:19:52.430819 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:19:52 crc kubenswrapper[3552]: E0320 16:19:52.432815 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:20:01 crc kubenswrapper[3552]: I0320 16:20:01.451498 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:20:01 crc kubenswrapper[3552]: I0320 16:20:01.453324 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:20:01 crc kubenswrapper[3552]: I0320 16:20:01.453378 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:20:01 crc kubenswrapper[3552]: I0320 16:20:01.453502 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:20:01 crc kubenswrapper[3552]: I0320 16:20:01.453547 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.665064 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6fpfl"] Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.665550 3552 topology_manager.go:215] "Topology Admit Handler" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" podNamespace="openshift-marketplace" podName="certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.667284 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.682279 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6fpfl"] Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.827653 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-catalog-content\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.828113 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sljkp\" (UniqueName: \"kubernetes.io/projected/bb2210d3-231e-4fc4-af10-7ae42010d52f-kube-api-access-sljkp\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.828171 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-utilities\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.930574 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sljkp\" (UniqueName: \"kubernetes.io/projected/bb2210d3-231e-4fc4-af10-7ae42010d52f-kube-api-access-sljkp\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.930643 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-utilities\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.930739 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-catalog-content\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.931333 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-utilities\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.931335 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-catalog-content\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.949628 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sljkp\" (UniqueName: \"kubernetes.io/projected/bb2210d3-231e-4fc4-af10-7ae42010d52f-kube-api-access-sljkp\") pod \"certified-operators-6fpfl\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:03 crc kubenswrapper[3552]: I0320 16:20:03.986921 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:04 crc kubenswrapper[3552]: I0320 16:20:04.545669 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6fpfl"] Mar 20 16:20:04 crc kubenswrapper[3552]: I0320 16:20:04.864340 3552 generic.go:334] "Generic (PLEG): container finished" podID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerID="cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86" exitCode=0 Mar 20 16:20:04 crc kubenswrapper[3552]: I0320 16:20:04.864413 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerDied","Data":"cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86"} Mar 20 16:20:04 crc kubenswrapper[3552]: I0320 16:20:04.864441 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerStarted","Data":"a48fb865ef0a62822de79b3409c55e155475f3454fc455ae5776d0f9789e9cd0"} Mar 20 16:20:05 crc kubenswrapper[3552]: I0320 16:20:05.874865 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerStarted","Data":"96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f"} Mar 20 16:20:06 crc kubenswrapper[3552]: I0320 16:20:06.430572 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:20:06 crc kubenswrapper[3552]: E0320 16:20:06.431126 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:20:13 crc kubenswrapper[3552]: I0320 16:20:13.934580 3552 generic.go:334] "Generic (PLEG): container finished" podID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerID="96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f" exitCode=0 Mar 20 16:20:13 crc kubenswrapper[3552]: I0320 16:20:13.934810 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerDied","Data":"96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f"} Mar 20 16:20:14 crc kubenswrapper[3552]: I0320 16:20:14.944384 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerStarted","Data":"1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938"} Mar 20 16:20:14 crc kubenswrapper[3552]: I0320 16:20:14.976176 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6fpfl" podStartSLOduration=2.632365658 podStartE2EDuration="11.9761101s" podCreationTimestamp="2026-03-20 16:20:03 +0000 UTC" firstStartedPulling="2026-03-20 16:20:04.882468848 +0000 UTC m=+3304.576165668" lastFinishedPulling="2026-03-20 16:20:14.22621328 +0000 UTC m=+3313.919910110" observedRunningTime="2026-03-20 16:20:14.962051905 +0000 UTC m=+3314.655748755" watchObservedRunningTime="2026-03-20 16:20:14.9761101 +0000 UTC m=+3314.669806940" Mar 20 16:20:17 crc kubenswrapper[3552]: I0320 16:20:17.433002 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:20:17 crc kubenswrapper[3552]: E0320 16:20:17.433985 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:20:23 crc kubenswrapper[3552]: I0320 16:20:23.987936 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:23 crc kubenswrapper[3552]: I0320 16:20:23.988565 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:24 crc kubenswrapper[3552]: I0320 16:20:24.070239 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:24 crc kubenswrapper[3552]: I0320 16:20:24.159126 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:24 crc kubenswrapper[3552]: I0320 16:20:24.205942 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6fpfl"] Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.034904 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6fpfl" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="registry-server" containerID="cri-o://1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938" gracePeriod=2 Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.485701 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.494757 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-utilities\") pod \"bb2210d3-231e-4fc4-af10-7ae42010d52f\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.494831 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sljkp\" (UniqueName: \"kubernetes.io/projected/bb2210d3-231e-4fc4-af10-7ae42010d52f-kube-api-access-sljkp\") pod \"bb2210d3-231e-4fc4-af10-7ae42010d52f\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.494883 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-catalog-content\") pod \"bb2210d3-231e-4fc4-af10-7ae42010d52f\" (UID: \"bb2210d3-231e-4fc4-af10-7ae42010d52f\") " Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.496097 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-utilities" (OuterVolumeSpecName: "utilities") pod "bb2210d3-231e-4fc4-af10-7ae42010d52f" (UID: "bb2210d3-231e-4fc4-af10-7ae42010d52f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.504936 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb2210d3-231e-4fc4-af10-7ae42010d52f-kube-api-access-sljkp" (OuterVolumeSpecName: "kube-api-access-sljkp") pod "bb2210d3-231e-4fc4-af10-7ae42010d52f" (UID: "bb2210d3-231e-4fc4-af10-7ae42010d52f"). InnerVolumeSpecName "kube-api-access-sljkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.596078 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.596115 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-sljkp\" (UniqueName: \"kubernetes.io/projected/bb2210d3-231e-4fc4-af10-7ae42010d52f-kube-api-access-sljkp\") on node \"crc\" DevicePath \"\"" Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.763577 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb2210d3-231e-4fc4-af10-7ae42010d52f" (UID: "bb2210d3-231e-4fc4-af10-7ae42010d52f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:20:26 crc kubenswrapper[3552]: I0320 16:20:26.799056 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2210d3-231e-4fc4-af10-7ae42010d52f-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.047353 3552 generic.go:334] "Generic (PLEG): container finished" podID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerID="1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938" exitCode=0 Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.047397 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerDied","Data":"1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938"} Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.047446 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6fpfl" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.047461 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6fpfl" event={"ID":"bb2210d3-231e-4fc4-af10-7ae42010d52f","Type":"ContainerDied","Data":"a48fb865ef0a62822de79b3409c55e155475f3454fc455ae5776d0f9789e9cd0"} Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.047484 3552 scope.go:117] "RemoveContainer" containerID="1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.095290 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6fpfl"] Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.108495 3552 scope.go:117] "RemoveContainer" containerID="96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.114289 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6fpfl"] Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.151368 3552 scope.go:117] "RemoveContainer" containerID="cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.232074 3552 scope.go:117] "RemoveContainer" containerID="1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938" Mar 20 16:20:27 crc kubenswrapper[3552]: E0320 16:20:27.232634 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938\": container with ID starting with 1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938 not found: ID does not exist" containerID="1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.232686 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938"} err="failed to get container status \"1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938\": rpc error: code = NotFound desc = could not find container \"1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938\": container with ID starting with 1e5acba42d0e2547abc2e0a797881bd1ec6c8cec4fa633f2c87b710e58846938 not found: ID does not exist" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.232702 3552 scope.go:117] "RemoveContainer" containerID="96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f" Mar 20 16:20:27 crc kubenswrapper[3552]: E0320 16:20:27.233059 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f\": container with ID starting with 96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f not found: ID does not exist" containerID="96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.233095 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f"} err="failed to get container status \"96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f\": rpc error: code = NotFound desc = could not find container \"96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f\": container with ID starting with 96f255fdc19ba3a4cdb13160197da644da5878bc1e9594e63925681195c8fc2f not found: ID does not exist" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.233106 3552 scope.go:117] "RemoveContainer" containerID="cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86" Mar 20 16:20:27 crc kubenswrapper[3552]: E0320 16:20:27.233390 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86\": container with ID starting with cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86 not found: ID does not exist" containerID="cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.233446 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86"} err="failed to get container status \"cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86\": rpc error: code = NotFound desc = could not find container \"cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86\": container with ID starting with cc77a51d7bce0c1d9adef8d69186094368072e8e633dd72d6a4882f92ad3cc86 not found: ID does not exist" Mar 20 16:20:27 crc kubenswrapper[3552]: I0320 16:20:27.439584 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" path="/var/lib/kubelet/pods/bb2210d3-231e-4fc4-af10-7ae42010d52f/volumes" Mar 20 16:20:28 crc kubenswrapper[3552]: I0320 16:20:28.430312 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:20:28 crc kubenswrapper[3552]: E0320 16:20:28.431122 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.659870 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8sfrh"] Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.660548 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" podNamespace="openshift-marketplace" podName="community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: E0320 16:20:37.660849 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="registry-server" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.660861 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="registry-server" Mar 20 16:20:37 crc kubenswrapper[3552]: E0320 16:20:37.660884 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="extract-utilities" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.660891 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="extract-utilities" Mar 20 16:20:37 crc kubenswrapper[3552]: E0320 16:20:37.660911 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="extract-content" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.660917 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="extract-content" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.661099 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb2210d3-231e-4fc4-af10-7ae42010d52f" containerName="registry-server" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.664132 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.672050 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8sfrh"] Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.708142 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-utilities\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.708520 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-catalog-content\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.708603 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sftmr\" (UniqueName: \"kubernetes.io/projected/ad977863-c3ec-4304-a7f5-6fadc5370124-kube-api-access-sftmr\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.810517 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-utilities\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.810593 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-catalog-content\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.810663 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-sftmr\" (UniqueName: \"kubernetes.io/projected/ad977863-c3ec-4304-a7f5-6fadc5370124-kube-api-access-sftmr\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.811161 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-utilities\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.811185 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-catalog-content\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:37 crc kubenswrapper[3552]: I0320 16:20:37.830632 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-sftmr\" (UniqueName: \"kubernetes.io/projected/ad977863-c3ec-4304-a7f5-6fadc5370124-kube-api-access-sftmr\") pod \"community-operators-8sfrh\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:38 crc kubenswrapper[3552]: I0320 16:20:38.006680 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:38 crc kubenswrapper[3552]: I0320 16:20:38.322279 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8sfrh"] Mar 20 16:20:39 crc kubenswrapper[3552]: I0320 16:20:39.136755 3552 generic.go:334] "Generic (PLEG): container finished" podID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerID="096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969" exitCode=0 Mar 20 16:20:39 crc kubenswrapper[3552]: I0320 16:20:39.136991 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerDied","Data":"096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969"} Mar 20 16:20:39 crc kubenswrapper[3552]: I0320 16:20:39.137013 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerStarted","Data":"a53b6b6ca8a3f43566c5962d794d8e4d3cb31eca6d502fa92302ff987cbeb95b"} Mar 20 16:20:40 crc kubenswrapper[3552]: I0320 16:20:40.146493 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerStarted","Data":"b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f"} Mar 20 16:20:41 crc kubenswrapper[3552]: I0320 16:20:41.439820 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:20:41 crc kubenswrapper[3552]: E0320 16:20:41.441210 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:20:52 crc kubenswrapper[3552]: I0320 16:20:52.432095 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:20:52 crc kubenswrapper[3552]: E0320 16:20:52.433189 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:20:53 crc kubenswrapper[3552]: I0320 16:20:53.244675 3552 generic.go:334] "Generic (PLEG): container finished" podID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerID="b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f" exitCode=0 Mar 20 16:20:53 crc kubenswrapper[3552]: I0320 16:20:53.244731 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerDied","Data":"b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f"} Mar 20 16:20:55 crc kubenswrapper[3552]: I0320 16:20:55.259790 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerStarted","Data":"ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c"} Mar 20 16:20:55 crc kubenswrapper[3552]: I0320 16:20:55.286136 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8sfrh" podStartSLOduration=3.8708398649999998 podStartE2EDuration="18.286084625s" podCreationTimestamp="2026-03-20 16:20:37 +0000 UTC" firstStartedPulling="2026-03-20 16:20:39.139193418 +0000 UTC m=+3338.832890248" lastFinishedPulling="2026-03-20 16:20:53.554438178 +0000 UTC m=+3353.248135008" observedRunningTime="2026-03-20 16:20:55.277587339 +0000 UTC m=+3354.971284179" watchObservedRunningTime="2026-03-20 16:20:55.286084625 +0000 UTC m=+3354.979781455" Mar 20 16:20:58 crc kubenswrapper[3552]: I0320 16:20:58.007459 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:58 crc kubenswrapper[3552]: I0320 16:20:58.007792 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:20:59 crc kubenswrapper[3552]: I0320 16:20:59.089877 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-8sfrh" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="registry-server" probeResult="failure" output=< Mar 20 16:20:59 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:20:59 crc kubenswrapper[3552]: > Mar 20 16:21:01 crc kubenswrapper[3552]: I0320 16:21:01.453976 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:21:01 crc kubenswrapper[3552]: I0320 16:21:01.454599 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:21:01 crc kubenswrapper[3552]: I0320 16:21:01.454634 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:21:01 crc kubenswrapper[3552]: I0320 16:21:01.454665 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:21:01 crc kubenswrapper[3552]: I0320 16:21:01.454731 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:21:04 crc kubenswrapper[3552]: I0320 16:21:04.431535 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:21:04 crc kubenswrapper[3552]: E0320 16:21:04.432814 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:21:08 crc kubenswrapper[3552]: I0320 16:21:08.119343 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:21:08 crc kubenswrapper[3552]: I0320 16:21:08.203538 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:21:08 crc kubenswrapper[3552]: I0320 16:21:08.251246 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8sfrh"] Mar 20 16:21:09 crc kubenswrapper[3552]: I0320 16:21:09.354939 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8sfrh" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="registry-server" containerID="cri-o://ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c" gracePeriod=2 Mar 20 16:21:09 crc kubenswrapper[3552]: I0320 16:21:09.972020 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.141563 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-catalog-content\") pod \"ad977863-c3ec-4304-a7f5-6fadc5370124\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.141995 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sftmr\" (UniqueName: \"kubernetes.io/projected/ad977863-c3ec-4304-a7f5-6fadc5370124-kube-api-access-sftmr\") pod \"ad977863-c3ec-4304-a7f5-6fadc5370124\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.142041 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-utilities\") pod \"ad977863-c3ec-4304-a7f5-6fadc5370124\" (UID: \"ad977863-c3ec-4304-a7f5-6fadc5370124\") " Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.142667 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-utilities" (OuterVolumeSpecName: "utilities") pod "ad977863-c3ec-4304-a7f5-6fadc5370124" (UID: "ad977863-c3ec-4304-a7f5-6fadc5370124"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.143119 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.152788 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad977863-c3ec-4304-a7f5-6fadc5370124-kube-api-access-sftmr" (OuterVolumeSpecName: "kube-api-access-sftmr") pod "ad977863-c3ec-4304-a7f5-6fadc5370124" (UID: "ad977863-c3ec-4304-a7f5-6fadc5370124"). InnerVolumeSpecName "kube-api-access-sftmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.243860 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-sftmr\" (UniqueName: \"kubernetes.io/projected/ad977863-c3ec-4304-a7f5-6fadc5370124-kube-api-access-sftmr\") on node \"crc\" DevicePath \"\"" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.379674 3552 generic.go:334] "Generic (PLEG): container finished" podID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerID="ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c" exitCode=0 Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.379760 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerDied","Data":"ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c"} Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.379815 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8sfrh" event={"ID":"ad977863-c3ec-4304-a7f5-6fadc5370124","Type":"ContainerDied","Data":"a53b6b6ca8a3f43566c5962d794d8e4d3cb31eca6d502fa92302ff987cbeb95b"} Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.379840 3552 scope.go:117] "RemoveContainer" containerID="ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.379984 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8sfrh" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.511478 3552 scope.go:117] "RemoveContainer" containerID="b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.563731 3552 scope.go:117] "RemoveContainer" containerID="096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.615810 3552 scope.go:117] "RemoveContainer" containerID="ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c" Mar 20 16:21:10 crc kubenswrapper[3552]: E0320 16:21:10.623543 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c\": container with ID starting with ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c not found: ID does not exist" containerID="ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.623611 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c"} err="failed to get container status \"ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c\": rpc error: code = NotFound desc = could not find container \"ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c\": container with ID starting with ae39598cf67b5861307444cd465eead47ebdb78b9d9483962b8b887b0cd2508c not found: ID does not exist" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.623630 3552 scope.go:117] "RemoveContainer" containerID="b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f" Mar 20 16:21:10 crc kubenswrapper[3552]: E0320 16:21:10.627832 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f\": container with ID starting with b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f not found: ID does not exist" containerID="b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.627873 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f"} err="failed to get container status \"b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f\": rpc error: code = NotFound desc = could not find container \"b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f\": container with ID starting with b68a9664f51a4b8205c5cdedc7659dae306672390bb493bbf42432e982e4f08f not found: ID does not exist" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.627883 3552 scope.go:117] "RemoveContainer" containerID="096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969" Mar 20 16:21:10 crc kubenswrapper[3552]: E0320 16:21:10.628307 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969\": container with ID starting with 096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969 not found: ID does not exist" containerID="096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.628363 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969"} err="failed to get container status \"096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969\": rpc error: code = NotFound desc = could not find container \"096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969\": container with ID starting with 096b1daffbf852e1abe0dedf794a548fb9c662df784e5aabfcf842dc2e6e3969 not found: ID does not exist" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.728594 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad977863-c3ec-4304-a7f5-6fadc5370124" (UID: "ad977863-c3ec-4304-a7f5-6fadc5370124"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:21:10 crc kubenswrapper[3552]: I0320 16:21:10.757447 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad977863-c3ec-4304-a7f5-6fadc5370124-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:21:11 crc kubenswrapper[3552]: I0320 16:21:11.013744 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8sfrh"] Mar 20 16:21:11 crc kubenswrapper[3552]: I0320 16:21:11.022943 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8sfrh"] Mar 20 16:21:11 crc kubenswrapper[3552]: I0320 16:21:11.465761 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" path="/var/lib/kubelet/pods/ad977863-c3ec-4304-a7f5-6fadc5370124/volumes" Mar 20 16:21:18 crc kubenswrapper[3552]: I0320 16:21:18.431501 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:21:18 crc kubenswrapper[3552]: E0320 16:21:18.432344 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:21:31 crc kubenswrapper[3552]: I0320 16:21:31.441926 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:21:31 crc kubenswrapper[3552]: E0320 16:21:31.449483 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:21:44 crc kubenswrapper[3552]: I0320 16:21:44.431123 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:21:44 crc kubenswrapper[3552]: E0320 16:21:44.432319 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:21:57 crc kubenswrapper[3552]: I0320 16:21:57.430647 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:21:57 crc kubenswrapper[3552]: E0320 16:21:57.431670 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:22:01 crc kubenswrapper[3552]: I0320 16:22:01.455295 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:22:01 crc kubenswrapper[3552]: I0320 16:22:01.456326 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:22:01 crc kubenswrapper[3552]: I0320 16:22:01.456442 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:22:01 crc kubenswrapper[3552]: I0320 16:22:01.456504 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:22:01 crc kubenswrapper[3552]: I0320 16:22:01.456559 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:22:09 crc kubenswrapper[3552]: I0320 16:22:09.431457 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:22:09 crc kubenswrapper[3552]: E0320 16:22:09.433005 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:22:21 crc kubenswrapper[3552]: I0320 16:22:21.435278 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:22:22 crc kubenswrapper[3552]: I0320 16:22:22.037590 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"85cd2649beefd6061eba43bc624a5e42be1fe28d6dbbc476102b7dfeb2c23cb5"} Mar 20 16:23:01 crc kubenswrapper[3552]: I0320 16:23:01.459713 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:23:01 crc kubenswrapper[3552]: I0320 16:23:01.460291 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:23:01 crc kubenswrapper[3552]: I0320 16:23:01.460318 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:23:01 crc kubenswrapper[3552]: I0320 16:23:01.460373 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:23:01 crc kubenswrapper[3552]: I0320 16:23:01.460455 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:24:01 crc kubenswrapper[3552]: I0320 16:24:01.461393 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:24:01 crc kubenswrapper[3552]: I0320 16:24:01.462018 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:24:01 crc kubenswrapper[3552]: I0320 16:24:01.462045 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:24:01 crc kubenswrapper[3552]: I0320 16:24:01.462067 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:24:01 crc kubenswrapper[3552]: I0320 16:24:01.462086 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:24:42 crc kubenswrapper[3552]: I0320 16:24:42.778667 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:24:42 crc kubenswrapper[3552]: I0320 16:24:42.779324 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:25:01 crc kubenswrapper[3552]: I0320 16:25:01.463480 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:25:01 crc kubenswrapper[3552]: I0320 16:25:01.464025 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:25:01 crc kubenswrapper[3552]: I0320 16:25:01.464064 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:25:01 crc kubenswrapper[3552]: I0320 16:25:01.464100 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:25:01 crc kubenswrapper[3552]: I0320 16:25:01.464162 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:25:12 crc kubenswrapper[3552]: I0320 16:25:12.778391 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:25:12 crc kubenswrapper[3552]: I0320 16:25:12.779638 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.059174 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ttgwf"] Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.059943 3552 topology_manager.go:215] "Topology Admit Handler" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" podNamespace="openshift-marketplace" podName="redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: E0320 16:25:29.060265 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="extract-content" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.060278 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="extract-content" Mar 20 16:25:29 crc kubenswrapper[3552]: E0320 16:25:29.060299 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="extract-utilities" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.060308 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="extract-utilities" Mar 20 16:25:29 crc kubenswrapper[3552]: E0320 16:25:29.060340 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="registry-server" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.060349 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="registry-server" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.060671 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad977863-c3ec-4304-a7f5-6fadc5370124" containerName="registry-server" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.062289 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.081801 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ttgwf"] Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.181485 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-catalog-content\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.181580 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-utilities\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.181669 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6479\" (UniqueName: \"kubernetes.io/projected/7b7b5a6c-2925-4209-9af4-5c1b497b914e-kube-api-access-q6479\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.282931 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-utilities\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.283029 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-q6479\" (UniqueName: \"kubernetes.io/projected/7b7b5a6c-2925-4209-9af4-5c1b497b914e-kube-api-access-q6479\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.283205 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-catalog-content\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.283615 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-utilities\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.283723 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-catalog-content\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.304539 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6479\" (UniqueName: \"kubernetes.io/projected/7b7b5a6c-2925-4209-9af4-5c1b497b914e-kube-api-access-q6479\") pod \"redhat-operators-ttgwf\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.431967 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:25:29 crc kubenswrapper[3552]: I0320 16:25:29.968957 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ttgwf"] Mar 20 16:25:30 crc kubenswrapper[3552]: I0320 16:25:30.631218 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerID="3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2" exitCode=0 Mar 20 16:25:30 crc kubenswrapper[3552]: I0320 16:25:30.631360 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerDied","Data":"3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2"} Mar 20 16:25:30 crc kubenswrapper[3552]: I0320 16:25:30.631734 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerStarted","Data":"18f3104a07462c5b981b00d0922ce1a162d0a11245116c65027a95d9f34764a1"} Mar 20 16:25:30 crc kubenswrapper[3552]: I0320 16:25:30.634103 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:25:31 crc kubenswrapper[3552]: I0320 16:25:31.640801 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerStarted","Data":"a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1"} Mar 20 16:25:42 crc kubenswrapper[3552]: I0320 16:25:42.779226 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:25:42 crc kubenswrapper[3552]: I0320 16:25:42.779778 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:25:42 crc kubenswrapper[3552]: I0320 16:25:42.779819 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:25:42 crc kubenswrapper[3552]: I0320 16:25:42.780785 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85cd2649beefd6061eba43bc624a5e42be1fe28d6dbbc476102b7dfeb2c23cb5"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:25:42 crc kubenswrapper[3552]: I0320 16:25:42.780943 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://85cd2649beefd6061eba43bc624a5e42be1fe28d6dbbc476102b7dfeb2c23cb5" gracePeriod=600 Mar 20 16:25:43 crc kubenswrapper[3552]: I0320 16:25:43.751745 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="85cd2649beefd6061eba43bc624a5e42be1fe28d6dbbc476102b7dfeb2c23cb5" exitCode=0 Mar 20 16:25:43 crc kubenswrapper[3552]: I0320 16:25:43.751783 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"85cd2649beefd6061eba43bc624a5e42be1fe28d6dbbc476102b7dfeb2c23cb5"} Mar 20 16:25:43 crc kubenswrapper[3552]: I0320 16:25:43.752915 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6"} Mar 20 16:25:43 crc kubenswrapper[3552]: I0320 16:25:43.752940 3552 scope.go:117] "RemoveContainer" containerID="9095322d325bbc870748f968a17c748f6cf773bb23ba0c5ea69372ef6d2e5dd5" Mar 20 16:26:01 crc kubenswrapper[3552]: I0320 16:26:01.464486 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:26:01 crc kubenswrapper[3552]: I0320 16:26:01.465143 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:26:01 crc kubenswrapper[3552]: I0320 16:26:01.465170 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:26:01 crc kubenswrapper[3552]: I0320 16:26:01.465190 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:26:01 crc kubenswrapper[3552]: I0320 16:26:01.465210 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:26:05 crc kubenswrapper[3552]: I0320 16:26:05.943110 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerID="a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1" exitCode=0 Mar 20 16:26:05 crc kubenswrapper[3552]: I0320 16:26:05.943217 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerDied","Data":"a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1"} Mar 20 16:26:07 crc kubenswrapper[3552]: I0320 16:26:07.961194 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerStarted","Data":"e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8"} Mar 20 16:26:08 crc kubenswrapper[3552]: I0320 16:26:08.002912 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ttgwf" podStartSLOduration=3.415230033 podStartE2EDuration="39.002857169s" podCreationTimestamp="2026-03-20 16:25:29 +0000 UTC" firstStartedPulling="2026-03-20 16:25:30.633801528 +0000 UTC m=+3630.327498348" lastFinishedPulling="2026-03-20 16:26:06.221428634 +0000 UTC m=+3665.915125484" observedRunningTime="2026-03-20 16:26:07.990219911 +0000 UTC m=+3667.683916741" watchObservedRunningTime="2026-03-20 16:26:08.002857169 +0000 UTC m=+3667.696554019" Mar 20 16:26:09 crc kubenswrapper[3552]: I0320 16:26:09.441445 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:26:09 crc kubenswrapper[3552]: I0320 16:26:09.441799 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:26:10 crc kubenswrapper[3552]: I0320 16:26:10.517522 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ttgwf" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="registry-server" probeResult="failure" output=< Mar 20 16:26:10 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:26:10 crc kubenswrapper[3552]: > Mar 20 16:26:11 crc kubenswrapper[3552]: E0320 16:26:11.907154 3552 upgradeaware.go:425] Error proxying data from client to backend: readfrom tcp 38.102.83.200:40854->38.102.83.200:41689: write tcp 38.102.83.200:40854->38.102.83.200:41689: write: connection reset by peer Mar 20 16:26:20 crc kubenswrapper[3552]: I0320 16:26:20.539603 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ttgwf" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="registry-server" probeResult="failure" output=< Mar 20 16:26:20 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:26:20 crc kubenswrapper[3552]: > Mar 20 16:26:29 crc kubenswrapper[3552]: I0320 16:26:29.566920 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:26:29 crc kubenswrapper[3552]: I0320 16:26:29.662554 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:26:29 crc kubenswrapper[3552]: I0320 16:26:29.707157 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ttgwf"] Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.168644 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ttgwf" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="registry-server" containerID="cri-o://e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8" gracePeriod=2 Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.852658 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.947230 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6479\" (UniqueName: \"kubernetes.io/projected/7b7b5a6c-2925-4209-9af4-5c1b497b914e-kube-api-access-q6479\") pod \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.947721 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-catalog-content\") pod \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.947847 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-utilities\") pod \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\" (UID: \"7b7b5a6c-2925-4209-9af4-5c1b497b914e\") " Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.948573 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-utilities" (OuterVolumeSpecName: "utilities") pod "7b7b5a6c-2925-4209-9af4-5c1b497b914e" (UID: "7b7b5a6c-2925-4209-9af4-5c1b497b914e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:26:31 crc kubenswrapper[3552]: I0320 16:26:31.959941 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b7b5a6c-2925-4209-9af4-5c1b497b914e-kube-api-access-q6479" (OuterVolumeSpecName: "kube-api-access-q6479") pod "7b7b5a6c-2925-4209-9af4-5c1b497b914e" (UID: "7b7b5a6c-2925-4209-9af4-5c1b497b914e"). InnerVolumeSpecName "kube-api-access-q6479". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.050263 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-q6479\" (UniqueName: \"kubernetes.io/projected/7b7b5a6c-2925-4209-9af4-5c1b497b914e-kube-api-access-q6479\") on node \"crc\" DevicePath \"\"" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.050307 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.185773 3552 generic.go:334] "Generic (PLEG): container finished" podID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerID="e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8" exitCode=0 Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.185814 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerDied","Data":"e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8"} Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.185816 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ttgwf" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.185838 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttgwf" event={"ID":"7b7b5a6c-2925-4209-9af4-5c1b497b914e","Type":"ContainerDied","Data":"18f3104a07462c5b981b00d0922ce1a162d0a11245116c65027a95d9f34764a1"} Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.185859 3552 scope.go:117] "RemoveContainer" containerID="e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.244683 3552 scope.go:117] "RemoveContainer" containerID="a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.310988 3552 scope.go:117] "RemoveContainer" containerID="3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.359391 3552 scope.go:117] "RemoveContainer" containerID="e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8" Mar 20 16:26:32 crc kubenswrapper[3552]: E0320 16:26:32.359940 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8\": container with ID starting with e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8 not found: ID does not exist" containerID="e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.359981 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8"} err="failed to get container status \"e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8\": rpc error: code = NotFound desc = could not find container \"e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8\": container with ID starting with e72c2eb224d930f86b976c7598764b74a3eebc2577148490706ff0c5f7c8d1b8 not found: ID does not exist" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.359990 3552 scope.go:117] "RemoveContainer" containerID="a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1" Mar 20 16:26:32 crc kubenswrapper[3552]: E0320 16:26:32.361143 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1\": container with ID starting with a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1 not found: ID does not exist" containerID="a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.361185 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1"} err="failed to get container status \"a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1\": rpc error: code = NotFound desc = could not find container \"a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1\": container with ID starting with a125095d2269cc1aef56853a5cccea3cb2785239afa5f4d60b5b086a73c2ade1 not found: ID does not exist" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.361199 3552 scope.go:117] "RemoveContainer" containerID="3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2" Mar 20 16:26:32 crc kubenswrapper[3552]: E0320 16:26:32.361476 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2\": container with ID starting with 3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2 not found: ID does not exist" containerID="3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.361501 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2"} err="failed to get container status \"3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2\": rpc error: code = NotFound desc = could not find container \"3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2\": container with ID starting with 3f6e32518269356322c57652eb38debf4336b39d0f7f1b4bae16231c22cfa3e2 not found: ID does not exist" Mar 20 16:26:32 crc kubenswrapper[3552]: I0320 16:26:32.977076 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b7b5a6c-2925-4209-9af4-5c1b497b914e" (UID: "7b7b5a6c-2925-4209-9af4-5c1b497b914e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:26:33 crc kubenswrapper[3552]: I0320 16:26:33.070307 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b7b5a6c-2925-4209-9af4-5c1b497b914e-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:26:33 crc kubenswrapper[3552]: I0320 16:26:33.125944 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ttgwf"] Mar 20 16:26:33 crc kubenswrapper[3552]: I0320 16:26:33.134176 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ttgwf"] Mar 20 16:26:33 crc kubenswrapper[3552]: I0320 16:26:33.454167 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" path="/var/lib/kubelet/pods/7b7b5a6c-2925-4209-9af4-5c1b497b914e/volumes" Mar 20 16:27:01 crc kubenswrapper[3552]: I0320 16:27:01.465940 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:27:01 crc kubenswrapper[3552]: I0320 16:27:01.466854 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:27:01 crc kubenswrapper[3552]: I0320 16:27:01.466959 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:27:01 crc kubenswrapper[3552]: I0320 16:27:01.467037 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:27:01 crc kubenswrapper[3552]: I0320 16:27:01.467072 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:28:01 crc kubenswrapper[3552]: I0320 16:28:01.467770 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:28:01 crc kubenswrapper[3552]: I0320 16:28:01.468479 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:28:01 crc kubenswrapper[3552]: I0320 16:28:01.468518 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:28:01 crc kubenswrapper[3552]: I0320 16:28:01.468578 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:28:01 crc kubenswrapper[3552]: I0320 16:28:01.468648 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:28:12 crc kubenswrapper[3552]: I0320 16:28:12.779029 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:28:12 crc kubenswrapper[3552]: I0320 16:28:12.779835 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:28:42 crc kubenswrapper[3552]: I0320 16:28:42.778766 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:28:42 crc kubenswrapper[3552]: I0320 16:28:42.779522 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:29:01 crc kubenswrapper[3552]: I0320 16:29:01.468928 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:29:01 crc kubenswrapper[3552]: I0320 16:29:01.469611 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:29:01 crc kubenswrapper[3552]: I0320 16:29:01.469654 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:29:01 crc kubenswrapper[3552]: I0320 16:29:01.470708 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:29:01 crc kubenswrapper[3552]: I0320 16:29:01.470735 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:29:12 crc kubenswrapper[3552]: I0320 16:29:12.778288 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:29:12 crc kubenswrapper[3552]: I0320 16:29:12.779003 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:29:12 crc kubenswrapper[3552]: I0320 16:29:12.779047 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:29:12 crc kubenswrapper[3552]: I0320 16:29:12.780639 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:29:12 crc kubenswrapper[3552]: I0320 16:29:12.780865 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" gracePeriod=600 Mar 20 16:29:12 crc kubenswrapper[3552]: E0320 16:29:12.956519 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:29:13 crc kubenswrapper[3552]: I0320 16:29:13.565685 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" exitCode=0 Mar 20 16:29:13 crc kubenswrapper[3552]: I0320 16:29:13.565746 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6"} Mar 20 16:29:13 crc kubenswrapper[3552]: I0320 16:29:13.565801 3552 scope.go:117] "RemoveContainer" containerID="85cd2649beefd6061eba43bc624a5e42be1fe28d6dbbc476102b7dfeb2c23cb5" Mar 20 16:29:13 crc kubenswrapper[3552]: I0320 16:29:13.566743 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:29:13 crc kubenswrapper[3552]: E0320 16:29:13.567518 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:29:25 crc kubenswrapper[3552]: I0320 16:29:25.432273 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:29:25 crc kubenswrapper[3552]: E0320 16:29:25.433490 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:29:40 crc kubenswrapper[3552]: I0320 16:29:40.433782 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:29:40 crc kubenswrapper[3552]: E0320 16:29:40.435300 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:29:51 crc kubenswrapper[3552]: I0320 16:29:51.436198 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:29:51 crc kubenswrapper[3552]: E0320 16:29:51.437684 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.167180 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg"] Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.167790 3552 topology_manager.go:215] "Topology Admit Handler" podUID="db93172c-03a4-4caf-b915-63ecd3f82f75" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: E0320 16:30:00.168068 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="registry-server" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.168087 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="registry-server" Mar 20 16:30:00 crc kubenswrapper[3552]: E0320 16:30:00.168110 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="extract-utilities" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.168117 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="extract-utilities" Mar 20 16:30:00 crc kubenswrapper[3552]: E0320 16:30:00.168143 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="extract-content" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.168149 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="extract-content" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.168321 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b7b5a6c-2925-4209-9af4-5c1b497b914e" containerName="registry-server" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.168946 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.175481 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.175486 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.179831 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg"] Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.204245 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnpjf\" (UniqueName: \"kubernetes.io/projected/db93172c-03a4-4caf-b915-63ecd3f82f75-kube-api-access-rnpjf\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.204687 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db93172c-03a4-4caf-b915-63ecd3f82f75-config-volume\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.204772 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db93172c-03a4-4caf-b915-63ecd3f82f75-secret-volume\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.306017 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db93172c-03a4-4caf-b915-63ecd3f82f75-config-volume\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.306103 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db93172c-03a4-4caf-b915-63ecd3f82f75-secret-volume\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.306313 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rnpjf\" (UniqueName: \"kubernetes.io/projected/db93172c-03a4-4caf-b915-63ecd3f82f75-kube-api-access-rnpjf\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.307308 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db93172c-03a4-4caf-b915-63ecd3f82f75-config-volume\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.312310 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db93172c-03a4-4caf-b915-63ecd3f82f75-secret-volume\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.339069 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnpjf\" (UniqueName: \"kubernetes.io/projected/db93172c-03a4-4caf-b915-63ecd3f82f75-kube-api-access-rnpjf\") pod \"collect-profiles-29567070-bbklg\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.494763 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.955345 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg"] Mar 20 16:30:00 crc kubenswrapper[3552]: I0320 16:30:00.974387 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" event={"ID":"db93172c-03a4-4caf-b915-63ecd3f82f75","Type":"ContainerStarted","Data":"2df5fb97178244b51607916f82e9c6e0ac10651a79b22ace3426dd3c4ce4da09"} Mar 20 16:30:01 crc kubenswrapper[3552]: I0320 16:30:01.471012 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:30:01 crc kubenswrapper[3552]: I0320 16:30:01.471327 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:30:01 crc kubenswrapper[3552]: I0320 16:30:01.471355 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:30:01 crc kubenswrapper[3552]: I0320 16:30:01.471374 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:30:01 crc kubenswrapper[3552]: I0320 16:30:01.471446 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:30:01 crc kubenswrapper[3552]: I0320 16:30:01.986544 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" event={"ID":"db93172c-03a4-4caf-b915-63ecd3f82f75","Type":"ContainerStarted","Data":"b581f10c6a039daae067f59e5f22616f27ee0eb285355d16ca718c58c03bd170"} Mar 20 16:30:02 crc kubenswrapper[3552]: I0320 16:30:02.042681 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" podStartSLOduration=2.042638031 podStartE2EDuration="2.042638031s" podCreationTimestamp="2026-03-20 16:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 16:30:02.026884811 +0000 UTC m=+3901.720581641" watchObservedRunningTime="2026-03-20 16:30:02.042638031 +0000 UTC m=+3901.736334861" Mar 20 16:30:03 crc kubenswrapper[3552]: I0320 16:30:03.003266 3552 generic.go:334] "Generic (PLEG): container finished" podID="db93172c-03a4-4caf-b915-63ecd3f82f75" containerID="b581f10c6a039daae067f59e5f22616f27ee0eb285355d16ca718c58c03bd170" exitCode=0 Mar 20 16:30:03 crc kubenswrapper[3552]: I0320 16:30:03.003311 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" event={"ID":"db93172c-03a4-4caf-b915-63ecd3f82f75","Type":"ContainerDied","Data":"b581f10c6a039daae067f59e5f22616f27ee0eb285355d16ca718c58c03bd170"} Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.438947 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.539109 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db93172c-03a4-4caf-b915-63ecd3f82f75-config-volume\") pod \"db93172c-03a4-4caf-b915-63ecd3f82f75\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.539190 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnpjf\" (UniqueName: \"kubernetes.io/projected/db93172c-03a4-4caf-b915-63ecd3f82f75-kube-api-access-rnpjf\") pod \"db93172c-03a4-4caf-b915-63ecd3f82f75\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.539296 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db93172c-03a4-4caf-b915-63ecd3f82f75-secret-volume\") pod \"db93172c-03a4-4caf-b915-63ecd3f82f75\" (UID: \"db93172c-03a4-4caf-b915-63ecd3f82f75\") " Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.540102 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db93172c-03a4-4caf-b915-63ecd3f82f75-config-volume" (OuterVolumeSpecName: "config-volume") pod "db93172c-03a4-4caf-b915-63ecd3f82f75" (UID: "db93172c-03a4-4caf-b915-63ecd3f82f75"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.548274 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db93172c-03a4-4caf-b915-63ecd3f82f75-kube-api-access-rnpjf" (OuterVolumeSpecName: "kube-api-access-rnpjf") pod "db93172c-03a4-4caf-b915-63ecd3f82f75" (UID: "db93172c-03a4-4caf-b915-63ecd3f82f75"). InnerVolumeSpecName "kube-api-access-rnpjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.548852 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db93172c-03a4-4caf-b915-63ecd3f82f75-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "db93172c-03a4-4caf-b915-63ecd3f82f75" (UID: "db93172c-03a4-4caf-b915-63ecd3f82f75"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.641392 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rnpjf\" (UniqueName: \"kubernetes.io/projected/db93172c-03a4-4caf-b915-63ecd3f82f75-kube-api-access-rnpjf\") on node \"crc\" DevicePath \"\"" Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.641445 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db93172c-03a4-4caf-b915-63ecd3f82f75-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:30:04 crc kubenswrapper[3552]: I0320 16:30:04.641455 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db93172c-03a4-4caf-b915-63ecd3f82f75-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:30:05 crc kubenswrapper[3552]: I0320 16:30:05.022807 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" Mar 20 16:30:05 crc kubenswrapper[3552]: I0320 16:30:05.022738 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567070-bbklg" event={"ID":"db93172c-03a4-4caf-b915-63ecd3f82f75","Type":"ContainerDied","Data":"2df5fb97178244b51607916f82e9c6e0ac10651a79b22ace3426dd3c4ce4da09"} Mar 20 16:30:05 crc kubenswrapper[3552]: I0320 16:30:05.032556 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2df5fb97178244b51607916f82e9c6e0ac10651a79b22ace3426dd3c4ce4da09" Mar 20 16:30:05 crc kubenswrapper[3552]: I0320 16:30:05.431038 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:30:05 crc kubenswrapper[3552]: E0320 16:30:05.431669 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:30:05 crc kubenswrapper[3552]: I0320 16:30:05.518136 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52"] Mar 20 16:30:05 crc kubenswrapper[3552]: I0320 16:30:05.528036 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567025-9vx52"] Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.056114 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ghcpv"] Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.056593 3552 topology_manager.go:215] "Topology Admit Handler" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" podNamespace="openshift-marketplace" podName="certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: E0320 16:30:07.056863 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="db93172c-03a4-4caf-b915-63ecd3f82f75" containerName="collect-profiles" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.056877 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="db93172c-03a4-4caf-b915-63ecd3f82f75" containerName="collect-profiles" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.057108 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="db93172c-03a4-4caf-b915-63ecd3f82f75" containerName="collect-profiles" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.058453 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.102081 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ghcpv"] Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.192570 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-catalog-content\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.192631 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2jfj\" (UniqueName: \"kubernetes.io/projected/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-kube-api-access-v2jfj\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.193071 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-utilities\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.294791 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-catalog-content\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.294855 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-v2jfj\" (UniqueName: \"kubernetes.io/projected/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-kube-api-access-v2jfj\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.294987 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-utilities\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.295582 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-utilities\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.295597 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-catalog-content\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.315192 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2jfj\" (UniqueName: \"kubernetes.io/projected/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-kube-api-access-v2jfj\") pod \"certified-operators-ghcpv\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.429367 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.449611 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c56d4d5-04a0-49da-8353-73a78e0775ef" path="/var/lib/kubelet/pods/9c56d4d5-04a0-49da-8353-73a78e0775ef/volumes" Mar 20 16:30:07 crc kubenswrapper[3552]: I0320 16:30:07.886117 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ghcpv"] Mar 20 16:30:08 crc kubenswrapper[3552]: I0320 16:30:08.049380 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerStarted","Data":"692dbf86f14854c05da1427b66779f212ae87f333556b40632090c806a4f2375"} Mar 20 16:30:09 crc kubenswrapper[3552]: I0320 16:30:09.061252 3552 generic.go:334] "Generic (PLEG): container finished" podID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerID="2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1" exitCode=0 Mar 20 16:30:09 crc kubenswrapper[3552]: I0320 16:30:09.061573 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerDied","Data":"2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1"} Mar 20 16:30:10 crc kubenswrapper[3552]: I0320 16:30:10.073099 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerStarted","Data":"9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91"} Mar 20 16:30:17 crc kubenswrapper[3552]: I0320 16:30:17.124942 3552 generic.go:334] "Generic (PLEG): container finished" podID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerID="9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91" exitCode=0 Mar 20 16:30:17 crc kubenswrapper[3552]: I0320 16:30:17.125025 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerDied","Data":"9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91"} Mar 20 16:30:18 crc kubenswrapper[3552]: I0320 16:30:18.136361 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerStarted","Data":"f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215"} Mar 20 16:30:18 crc kubenswrapper[3552]: I0320 16:30:18.164804 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ghcpv" podStartSLOduration=2.776228278 podStartE2EDuration="11.164745398s" podCreationTimestamp="2026-03-20 16:30:07 +0000 UTC" firstStartedPulling="2026-03-20 16:30:09.063776534 +0000 UTC m=+3908.757473364" lastFinishedPulling="2026-03-20 16:30:17.452293654 +0000 UTC m=+3917.145990484" observedRunningTime="2026-03-20 16:30:18.156312283 +0000 UTC m=+3917.850009133" watchObservedRunningTime="2026-03-20 16:30:18.164745398 +0000 UTC m=+3917.858442238" Mar 20 16:30:18 crc kubenswrapper[3552]: I0320 16:30:18.430997 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:30:18 crc kubenswrapper[3552]: E0320 16:30:18.431493 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:30:27 crc kubenswrapper[3552]: I0320 16:30:27.439258 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:27 crc kubenswrapper[3552]: I0320 16:30:27.439977 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:27 crc kubenswrapper[3552]: I0320 16:30:27.458820 3552 scope.go:117] "RemoveContainer" containerID="a31837bebbee7da5d08b021f0e6c8e73fb7abf5edb9f3b9789c246773818cfb7" Mar 20 16:30:27 crc kubenswrapper[3552]: I0320 16:30:27.524010 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:28 crc kubenswrapper[3552]: I0320 16:30:28.299826 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:28 crc kubenswrapper[3552]: I0320 16:30:28.361197 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ghcpv"] Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.242756 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ghcpv" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="registry-server" containerID="cri-o://f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215" gracePeriod=2 Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.615453 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.653453 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-catalog-content\") pod \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.653587 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-utilities\") pod \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.653655 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2jfj\" (UniqueName: \"kubernetes.io/projected/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-kube-api-access-v2jfj\") pod \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\" (UID: \"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16\") " Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.654657 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-utilities" (OuterVolumeSpecName: "utilities") pod "dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" (UID: "dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.660246 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-kube-api-access-v2jfj" (OuterVolumeSpecName: "kube-api-access-v2jfj") pod "dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" (UID: "dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16"). InnerVolumeSpecName "kube-api-access-v2jfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.757152 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.757199 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-v2jfj\" (UniqueName: \"kubernetes.io/projected/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-kube-api-access-v2jfj\") on node \"crc\" DevicePath \"\"" Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.932230 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" (UID: "dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:30:30 crc kubenswrapper[3552]: I0320 16:30:30.961124 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.255869 3552 generic.go:334] "Generic (PLEG): container finished" podID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerID="f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215" exitCode=0 Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.255914 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ghcpv" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.255961 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerDied","Data":"f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215"} Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.256316 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghcpv" event={"ID":"dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16","Type":"ContainerDied","Data":"692dbf86f14854c05da1427b66779f212ae87f333556b40632090c806a4f2375"} Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.256345 3552 scope.go:117] "RemoveContainer" containerID="f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.304941 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ghcpv"] Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.315028 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ghcpv"] Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.321945 3552 scope.go:117] "RemoveContainer" containerID="9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.362587 3552 scope.go:117] "RemoveContainer" containerID="2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.404243 3552 scope.go:117] "RemoveContainer" containerID="f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215" Mar 20 16:30:31 crc kubenswrapper[3552]: E0320 16:30:31.406569 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215\": container with ID starting with f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215 not found: ID does not exist" containerID="f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.406623 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215"} err="failed to get container status \"f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215\": rpc error: code = NotFound desc = could not find container \"f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215\": container with ID starting with f9b371617863b217674ea18433651df4234fc1e3a6c46e39f1b4ade48fa4f215 not found: ID does not exist" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.406637 3552 scope.go:117] "RemoveContainer" containerID="9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91" Mar 20 16:30:31 crc kubenswrapper[3552]: E0320 16:30:31.407425 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91\": container with ID starting with 9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91 not found: ID does not exist" containerID="9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.407511 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91"} err="failed to get container status \"9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91\": rpc error: code = NotFound desc = could not find container \"9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91\": container with ID starting with 9257e4f9572fb2cf6c7eb21a65176c5ba356855900a6805a7d4bc5b427d18f91 not found: ID does not exist" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.407535 3552 scope.go:117] "RemoveContainer" containerID="2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1" Mar 20 16:30:31 crc kubenswrapper[3552]: E0320 16:30:31.408028 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1\": container with ID starting with 2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1 not found: ID does not exist" containerID="2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.408061 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1"} err="failed to get container status \"2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1\": rpc error: code = NotFound desc = could not find container \"2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1\": container with ID starting with 2af5c13e85e5a5b60a637aee93f6a7bbabedf47190ea50bae868d463c08f93e1 not found: ID does not exist" Mar 20 16:30:31 crc kubenswrapper[3552]: I0320 16:30:31.440785 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" path="/var/lib/kubelet/pods/dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16/volumes" Mar 20 16:30:33 crc kubenswrapper[3552]: I0320 16:30:33.431329 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:30:33 crc kubenswrapper[3552]: E0320 16:30:33.432543 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:30:44 crc kubenswrapper[3552]: I0320 16:30:44.431172 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:30:44 crc kubenswrapper[3552]: E0320 16:30:44.433424 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:30:57 crc kubenswrapper[3552]: I0320 16:30:57.431342 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:30:57 crc kubenswrapper[3552]: E0320 16:30:57.432730 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:31:01 crc kubenswrapper[3552]: I0320 16:31:01.471880 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:31:01 crc kubenswrapper[3552]: I0320 16:31:01.472470 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:31:01 crc kubenswrapper[3552]: I0320 16:31:01.472495 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:31:01 crc kubenswrapper[3552]: I0320 16:31:01.472544 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:31:01 crc kubenswrapper[3552]: I0320 16:31:01.472592 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:31:08 crc kubenswrapper[3552]: I0320 16:31:08.430712 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:31:08 crc kubenswrapper[3552]: E0320 16:31:08.431963 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:31:22 crc kubenswrapper[3552]: I0320 16:31:22.430199 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:31:22 crc kubenswrapper[3552]: E0320 16:31:22.431443 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:31:33 crc kubenswrapper[3552]: I0320 16:31:33.437969 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:31:33 crc kubenswrapper[3552]: E0320 16:31:33.439506 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:31:47 crc kubenswrapper[3552]: I0320 16:31:47.431685 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:31:47 crc kubenswrapper[3552]: E0320 16:31:47.433182 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:32:01 crc kubenswrapper[3552]: I0320 16:32:01.436842 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:32:01 crc kubenswrapper[3552]: E0320 16:32:01.438112 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:32:01 crc kubenswrapper[3552]: I0320 16:32:01.472895 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:32:01 crc kubenswrapper[3552]: I0320 16:32:01.473067 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:32:01 crc kubenswrapper[3552]: I0320 16:32:01.473129 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:32:01 crc kubenswrapper[3552]: I0320 16:32:01.473184 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:32:01 crc kubenswrapper[3552]: I0320 16:32:01.473279 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:32:13 crc kubenswrapper[3552]: I0320 16:32:13.435126 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:32:13 crc kubenswrapper[3552]: E0320 16:32:13.437129 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.498807 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dfxdg"] Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.499631 3552 topology_manager.go:215] "Topology Admit Handler" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" podNamespace="openshift-marketplace" podName="redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: E0320 16:32:25.499980 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="extract-content" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.499995 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="extract-content" Mar 20 16:32:25 crc kubenswrapper[3552]: E0320 16:32:25.500029 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="registry-server" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.500040 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="registry-server" Mar 20 16:32:25 crc kubenswrapper[3552]: E0320 16:32:25.500059 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="extract-utilities" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.500069 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="extract-utilities" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.500344 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="dac9fd1d-77d3-4ec0-bfc9-3ccabd938f16" containerName="registry-server" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.502457 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.534249 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfxdg"] Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.660397 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-utilities\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.660814 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-catalog-content\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.660952 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj787\" (UniqueName: \"kubernetes.io/projected/3abb33eb-4377-47b6-b653-6f0a7c826853-kube-api-access-rj787\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.762535 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-utilities\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.762603 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-catalog-content\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.762653 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-rj787\" (UniqueName: \"kubernetes.io/projected/3abb33eb-4377-47b6-b653-6f0a7c826853-kube-api-access-rj787\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.763169 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-utilities\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.763224 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-catalog-content\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.789744 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj787\" (UniqueName: \"kubernetes.io/projected/3abb33eb-4377-47b6-b653-6f0a7c826853-kube-api-access-rj787\") pod \"redhat-marketplace-dfxdg\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:25 crc kubenswrapper[3552]: I0320 16:32:25.846469 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:26 crc kubenswrapper[3552]: I0320 16:32:26.344923 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfxdg"] Mar 20 16:32:26 crc kubenswrapper[3552]: I0320 16:32:26.698229 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerStarted","Data":"dda0b59f8be8043e72b9d392f1fc9df7da12cc753f7ead3ff768777487b2304e"} Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.279648 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rvfc2"] Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.280124 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" podNamespace="openshift-marketplace" podName="community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.282383 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.289911 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rvfc2"] Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.395347 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-catalog-content\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.396000 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-utilities\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.396076 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5ltb\" (UniqueName: \"kubernetes.io/projected/c9550f7b-cbf9-4409-bc39-4f7563380369-kube-api-access-n5ltb\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.498748 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-utilities\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.499228 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-n5ltb\" (UniqueName: \"kubernetes.io/projected/c9550f7b-cbf9-4409-bc39-4f7563380369-kube-api-access-n5ltb\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.499462 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-catalog-content\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.500018 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-catalog-content\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.500535 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-utilities\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.528021 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5ltb\" (UniqueName: \"kubernetes.io/projected/c9550f7b-cbf9-4409-bc39-4f7563380369-kube-api-access-n5ltb\") pod \"community-operators-rvfc2\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.605281 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.712000 3552 generic.go:334] "Generic (PLEG): container finished" podID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerID="702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645" exitCode=0 Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.712042 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerDied","Data":"702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645"} Mar 20 16:32:27 crc kubenswrapper[3552]: I0320 16:32:27.714168 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:32:28 crc kubenswrapper[3552]: I0320 16:32:28.129165 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rvfc2"] Mar 20 16:32:28 crc kubenswrapper[3552]: W0320 16:32:28.135630 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9550f7b_cbf9_4409_bc39_4f7563380369.slice/crio-f3b66b96d8c4c2b6eb0da6acaddf02781f08f532211b9b1658407eed0eb05fd5 WatchSource:0}: Error finding container f3b66b96d8c4c2b6eb0da6acaddf02781f08f532211b9b1658407eed0eb05fd5: Status 404 returned error can't find the container with id f3b66b96d8c4c2b6eb0da6acaddf02781f08f532211b9b1658407eed0eb05fd5 Mar 20 16:32:28 crc kubenswrapper[3552]: I0320 16:32:28.430728 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:32:28 crc kubenswrapper[3552]: E0320 16:32:28.431244 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:32:28 crc kubenswrapper[3552]: I0320 16:32:28.724313 3552 generic.go:334] "Generic (PLEG): container finished" podID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerID="e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088" exitCode=0 Mar 20 16:32:28 crc kubenswrapper[3552]: I0320 16:32:28.724527 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerDied","Data":"e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088"} Mar 20 16:32:28 crc kubenswrapper[3552]: I0320 16:32:28.726030 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerStarted","Data":"f3b66b96d8c4c2b6eb0da6acaddf02781f08f532211b9b1658407eed0eb05fd5"} Mar 20 16:32:29 crc kubenswrapper[3552]: I0320 16:32:29.734336 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerStarted","Data":"1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7"} Mar 20 16:32:29 crc kubenswrapper[3552]: I0320 16:32:29.736334 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerStarted","Data":"db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093"} Mar 20 16:32:36 crc kubenswrapper[3552]: I0320 16:32:36.785369 3552 generic.go:334] "Generic (PLEG): container finished" podID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerID="db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093" exitCode=0 Mar 20 16:32:36 crc kubenswrapper[3552]: I0320 16:32:36.785516 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerDied","Data":"db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093"} Mar 20 16:32:37 crc kubenswrapper[3552]: I0320 16:32:37.797368 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerStarted","Data":"406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b"} Mar 20 16:32:37 crc kubenswrapper[3552]: I0320 16:32:37.819176 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dfxdg" podStartSLOduration=3.401322688 podStartE2EDuration="12.819123156s" podCreationTimestamp="2026-03-20 16:32:25 +0000 UTC" firstStartedPulling="2026-03-20 16:32:27.713895521 +0000 UTC m=+4047.407592351" lastFinishedPulling="2026-03-20 16:32:37.131695989 +0000 UTC m=+4056.825392819" observedRunningTime="2026-03-20 16:32:37.81214444 +0000 UTC m=+4057.505841290" watchObservedRunningTime="2026-03-20 16:32:37.819123156 +0000 UTC m=+4057.512819996" Mar 20 16:32:41 crc kubenswrapper[3552]: I0320 16:32:41.439001 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:32:41 crc kubenswrapper[3552]: E0320 16:32:41.440862 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:32:44 crc kubenswrapper[3552]: I0320 16:32:44.874658 3552 generic.go:334] "Generic (PLEG): container finished" podID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerID="1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7" exitCode=0 Mar 20 16:32:44 crc kubenswrapper[3552]: I0320 16:32:44.874692 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerDied","Data":"1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7"} Mar 20 16:32:45 crc kubenswrapper[3552]: I0320 16:32:45.846861 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:45 crc kubenswrapper[3552]: I0320 16:32:45.847269 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:45 crc kubenswrapper[3552]: I0320 16:32:45.888835 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerStarted","Data":"86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26"} Mar 20 16:32:45 crc kubenswrapper[3552]: I0320 16:32:45.914214 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rvfc2" podStartSLOduration=2.498519876 podStartE2EDuration="18.914159965s" podCreationTimestamp="2026-03-20 16:32:27 +0000 UTC" firstStartedPulling="2026-03-20 16:32:28.725873077 +0000 UTC m=+4048.419569907" lastFinishedPulling="2026-03-20 16:32:45.141513166 +0000 UTC m=+4064.835209996" observedRunningTime="2026-03-20 16:32:45.903058469 +0000 UTC m=+4065.596755339" watchObservedRunningTime="2026-03-20 16:32:45.914159965 +0000 UTC m=+4065.607856805" Mar 20 16:32:45 crc kubenswrapper[3552]: I0320 16:32:45.938375 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:46 crc kubenswrapper[3552]: I0320 16:32:46.041647 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:46 crc kubenswrapper[3552]: I0320 16:32:46.078857 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfxdg"] Mar 20 16:32:47 crc kubenswrapper[3552]: I0320 16:32:47.605443 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:47 crc kubenswrapper[3552]: I0320 16:32:47.605766 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:47 crc kubenswrapper[3552]: I0320 16:32:47.904887 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dfxdg" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="registry-server" containerID="cri-o://406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b" gracePeriod=2 Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.464649 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.549500 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj787\" (UniqueName: \"kubernetes.io/projected/3abb33eb-4377-47b6-b653-6f0a7c826853-kube-api-access-rj787\") pod \"3abb33eb-4377-47b6-b653-6f0a7c826853\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.549572 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-catalog-content\") pod \"3abb33eb-4377-47b6-b653-6f0a7c826853\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.549830 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-utilities\") pod \"3abb33eb-4377-47b6-b653-6f0a7c826853\" (UID: \"3abb33eb-4377-47b6-b653-6f0a7c826853\") " Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.552349 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-utilities" (OuterVolumeSpecName: "utilities") pod "3abb33eb-4377-47b6-b653-6f0a7c826853" (UID: "3abb33eb-4377-47b6-b653-6f0a7c826853"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.559490 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3abb33eb-4377-47b6-b653-6f0a7c826853-kube-api-access-rj787" (OuterVolumeSpecName: "kube-api-access-rj787") pod "3abb33eb-4377-47b6-b653-6f0a7c826853" (UID: "3abb33eb-4377-47b6-b653-6f0a7c826853"). InnerVolumeSpecName "kube-api-access-rj787". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.652688 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-rj787\" (UniqueName: \"kubernetes.io/projected/3abb33eb-4377-47b6-b653-6f0a7c826853-kube-api-access-rj787\") on node \"crc\" DevicePath \"\"" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.652728 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.677945 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3abb33eb-4377-47b6-b653-6f0a7c826853" (UID: "3abb33eb-4377-47b6-b653-6f0a7c826853"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.680085 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-rvfc2" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="registry-server" probeResult="failure" output=< Mar 20 16:32:48 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:32:48 crc kubenswrapper[3552]: > Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.754593 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3abb33eb-4377-47b6-b653-6f0a7c826853-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.919957 3552 generic.go:334] "Generic (PLEG): container finished" podID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerID="406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b" exitCode=0 Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.920003 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerDied","Data":"406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b"} Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.920031 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfxdg" event={"ID":"3abb33eb-4377-47b6-b653-6f0a7c826853","Type":"ContainerDied","Data":"dda0b59f8be8043e72b9d392f1fc9df7da12cc753f7ead3ff768777487b2304e"} Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.920049 3552 scope.go:117] "RemoveContainer" containerID="406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.920097 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfxdg" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.965051 3552 scope.go:117] "RemoveContainer" containerID="db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093" Mar 20 16:32:48 crc kubenswrapper[3552]: I0320 16:32:48.983981 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfxdg"] Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.006010 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfxdg"] Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.023001 3552 scope.go:117] "RemoveContainer" containerID="702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.101421 3552 scope.go:117] "RemoveContainer" containerID="406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b" Mar 20 16:32:49 crc kubenswrapper[3552]: E0320 16:32:49.102159 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b\": container with ID starting with 406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b not found: ID does not exist" containerID="406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.102226 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b"} err="failed to get container status \"406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b\": rpc error: code = NotFound desc = could not find container \"406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b\": container with ID starting with 406f0911fbfd88863a5661d5c3a42d0c5159a4fa1186977223d22956c35a075b not found: ID does not exist" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.102248 3552 scope.go:117] "RemoveContainer" containerID="db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093" Mar 20 16:32:49 crc kubenswrapper[3552]: E0320 16:32:49.102674 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093\": container with ID starting with db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093 not found: ID does not exist" containerID="db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.102744 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093"} err="failed to get container status \"db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093\": rpc error: code = NotFound desc = could not find container \"db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093\": container with ID starting with db784fd240cdfde084d4e31f9966b500fbed11bf1b5560aa3e3a760c128c1093 not found: ID does not exist" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.102758 3552 scope.go:117] "RemoveContainer" containerID="702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645" Mar 20 16:32:49 crc kubenswrapper[3552]: E0320 16:32:49.103754 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645\": container with ID starting with 702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645 not found: ID does not exist" containerID="702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.103786 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645"} err="failed to get container status \"702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645\": rpc error: code = NotFound desc = could not find container \"702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645\": container with ID starting with 702c0138320aecb80e3fb2c605290c6dead603c3217723972bc11eb9cf6f7645 not found: ID does not exist" Mar 20 16:32:49 crc kubenswrapper[3552]: I0320 16:32:49.441235 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" path="/var/lib/kubelet/pods/3abb33eb-4377-47b6-b653-6f0a7c826853/volumes" Mar 20 16:32:55 crc kubenswrapper[3552]: I0320 16:32:55.432609 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:32:55 crc kubenswrapper[3552]: E0320 16:32:55.433855 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:32:57 crc kubenswrapper[3552]: I0320 16:32:57.698559 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:57 crc kubenswrapper[3552]: I0320 16:32:57.798083 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:57 crc kubenswrapper[3552]: I0320 16:32:57.836113 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rvfc2"] Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.023171 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rvfc2" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="registry-server" containerID="cri-o://86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26" gracePeriod=2 Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.545567 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.665171 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-catalog-content\") pod \"c9550f7b-cbf9-4409-bc39-4f7563380369\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.665359 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5ltb\" (UniqueName: \"kubernetes.io/projected/c9550f7b-cbf9-4409-bc39-4f7563380369-kube-api-access-n5ltb\") pod \"c9550f7b-cbf9-4409-bc39-4f7563380369\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.665395 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-utilities\") pod \"c9550f7b-cbf9-4409-bc39-4f7563380369\" (UID: \"c9550f7b-cbf9-4409-bc39-4f7563380369\") " Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.666065 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-utilities" (OuterVolumeSpecName: "utilities") pod "c9550f7b-cbf9-4409-bc39-4f7563380369" (UID: "c9550f7b-cbf9-4409-bc39-4f7563380369"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.671012 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9550f7b-cbf9-4409-bc39-4f7563380369-kube-api-access-n5ltb" (OuterVolumeSpecName: "kube-api-access-n5ltb") pod "c9550f7b-cbf9-4409-bc39-4f7563380369" (UID: "c9550f7b-cbf9-4409-bc39-4f7563380369"). InnerVolumeSpecName "kube-api-access-n5ltb". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.768041 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-n5ltb\" (UniqueName: \"kubernetes.io/projected/c9550f7b-cbf9-4409-bc39-4f7563380369-kube-api-access-n5ltb\") on node \"crc\" DevicePath \"\"" Mar 20 16:32:59 crc kubenswrapper[3552]: I0320 16:32:59.768104 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.046622 3552 generic.go:334] "Generic (PLEG): container finished" podID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerID="86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26" exitCode=0 Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.046954 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerDied","Data":"86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26"} Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.047002 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rvfc2" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.047078 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rvfc2" event={"ID":"c9550f7b-cbf9-4409-bc39-4f7563380369","Type":"ContainerDied","Data":"f3b66b96d8c4c2b6eb0da6acaddf02781f08f532211b9b1658407eed0eb05fd5"} Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.047110 3552 scope.go:117] "RemoveContainer" containerID="86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.108439 3552 scope.go:117] "RemoveContainer" containerID="1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.208239 3552 scope.go:117] "RemoveContainer" containerID="e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.245533 3552 scope.go:117] "RemoveContainer" containerID="86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26" Mar 20 16:33:00 crc kubenswrapper[3552]: E0320 16:33:00.246183 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26\": container with ID starting with 86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26 not found: ID does not exist" containerID="86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.246232 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26"} err="failed to get container status \"86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26\": rpc error: code = NotFound desc = could not find container \"86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26\": container with ID starting with 86cd4e3b5329e138d474d1cc28cac8ed5add54dc1e344552ee797848b7173b26 not found: ID does not exist" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.246252 3552 scope.go:117] "RemoveContainer" containerID="1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7" Mar 20 16:33:00 crc kubenswrapper[3552]: E0320 16:33:00.247462 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7\": container with ID starting with 1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7 not found: ID does not exist" containerID="1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.247552 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7"} err="failed to get container status \"1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7\": rpc error: code = NotFound desc = could not find container \"1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7\": container with ID starting with 1fce77dabfddb8e21628cae018e624f54b8772af5a00cae4e76346fcbbdf45d7 not found: ID does not exist" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.247576 3552 scope.go:117] "RemoveContainer" containerID="e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088" Mar 20 16:33:00 crc kubenswrapper[3552]: E0320 16:33:00.249195 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088\": container with ID starting with e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088 not found: ID does not exist" containerID="e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.249240 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088"} err="failed to get container status \"e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088\": rpc error: code = NotFound desc = could not find container \"e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088\": container with ID starting with e87795d94311348044cef07b38152ac76d18bf61ac2db94bc3495ab59681a088 not found: ID does not exist" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.271213 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9550f7b-cbf9-4409-bc39-4f7563380369" (UID: "c9550f7b-cbf9-4409-bc39-4f7563380369"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.279484 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9550f7b-cbf9-4409-bc39-4f7563380369-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.387428 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rvfc2"] Mar 20 16:33:00 crc kubenswrapper[3552]: I0320 16:33:00.399859 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rvfc2"] Mar 20 16:33:01 crc kubenswrapper[3552]: I0320 16:33:01.441464 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" path="/var/lib/kubelet/pods/c9550f7b-cbf9-4409-bc39-4f7563380369/volumes" Mar 20 16:33:01 crc kubenswrapper[3552]: I0320 16:33:01.473585 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:33:01 crc kubenswrapper[3552]: I0320 16:33:01.473672 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:33:01 crc kubenswrapper[3552]: I0320 16:33:01.473700 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:33:01 crc kubenswrapper[3552]: I0320 16:33:01.473725 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:33:01 crc kubenswrapper[3552]: I0320 16:33:01.473754 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:33:10 crc kubenswrapper[3552]: I0320 16:33:10.431426 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:33:10 crc kubenswrapper[3552]: E0320 16:33:10.432321 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:33:22 crc kubenswrapper[3552]: I0320 16:33:22.431300 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:33:22 crc kubenswrapper[3552]: E0320 16:33:22.432987 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:33:35 crc kubenswrapper[3552]: I0320 16:33:35.431472 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:33:35 crc kubenswrapper[3552]: E0320 16:33:35.432518 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:33:50 crc kubenswrapper[3552]: I0320 16:33:50.431921 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:33:50 crc kubenswrapper[3552]: E0320 16:33:50.433021 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:34:01 crc kubenswrapper[3552]: I0320 16:34:01.474182 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:34:01 crc kubenswrapper[3552]: I0320 16:34:01.475178 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:34:01 crc kubenswrapper[3552]: I0320 16:34:01.475236 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:34:01 crc kubenswrapper[3552]: I0320 16:34:01.475276 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:34:01 crc kubenswrapper[3552]: I0320 16:34:01.475395 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:34:04 crc kubenswrapper[3552]: I0320 16:34:04.431912 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:34:04 crc kubenswrapper[3552]: E0320 16:34:04.433173 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:34:16 crc kubenswrapper[3552]: I0320 16:34:16.431574 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:34:16 crc kubenswrapper[3552]: I0320 16:34:16.769197 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"ffc7492bb2352219f48d04a9ff8a621ca2721375bc277e13930e1e8a6110432d"} Mar 20 16:34:19 crc kubenswrapper[3552]: I0320 16:34:19.686987 3552 dynamic_cafile_content.go:211] "Failed to remove file watch, it may have been deleted" file="/etc/kubernetes/kubelet-ca.crt" err="fsnotify: can't remove non-existent watch: /etc/kubernetes/kubelet-ca.crt" Mar 20 16:35:01 crc kubenswrapper[3552]: I0320 16:35:01.476093 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:35:01 crc kubenswrapper[3552]: I0320 16:35:01.476805 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:35:01 crc kubenswrapper[3552]: I0320 16:35:01.476843 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:35:01 crc kubenswrapper[3552]: I0320 16:35:01.476879 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:35:01 crc kubenswrapper[3552]: I0320 16:35:01.476947 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:36:01 crc kubenswrapper[3552]: I0320 16:36:01.477523 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:36:01 crc kubenswrapper[3552]: I0320 16:36:01.478242 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:36:01 crc kubenswrapper[3552]: I0320 16:36:01.478275 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:36:01 crc kubenswrapper[3552]: I0320 16:36:01.478334 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:36:01 crc kubenswrapper[3552]: I0320 16:36:01.478422 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:36:08 crc kubenswrapper[3552]: I0320 16:36:08.752396 3552 generic.go:334] "Generic (PLEG): container finished" podID="bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" containerID="36b2065160badeb7a2c0565766e1067f5beacf8fbb7931cb5ebf6fee0bfae393" exitCode=1 Mar 20 16:36:08 crc kubenswrapper[3552]: I0320 16:36:08.752493 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92","Type":"ContainerDied","Data":"36b2065160badeb7a2c0565766e1067f5beacf8fbb7931cb5ebf6fee0bfae393"} Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.069751 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191231 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191396 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config-secret\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191611 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-temporary\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191690 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-workdir\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191796 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ca-certs\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191890 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ssh-key\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.191961 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.192039 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gv84n\" (UniqueName: \"kubernetes.io/projected/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-kube-api-access-gv84n\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.192109 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-config-data\") pod \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\" (UID: \"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92\") " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.194629 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.202264 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-config-data" (OuterVolumeSpecName: "config-data") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.206889 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "test-operator-logs") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.215571 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-kube-api-access-gv84n" (OuterVolumeSpecName: "kube-api-access-gv84n") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "kube-api-access-gv84n". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.228334 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.232309 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.247783 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.257876 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.292064 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" (UID: "bea2de7e-3b1c-4ab2-a39f-5f9980e81f92"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.295297 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.295342 3552 reconciler_common.go:300] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.295355 3552 reconciler_common.go:300] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.295369 3552 reconciler_common.go:300] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ca-certs\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.295378 3552 reconciler_common.go:300] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-ssh-key\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.296541 3552 reconciler_common.go:293] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.296566 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-gv84n\" (UniqueName: \"kubernetes.io/projected/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-kube-api-access-gv84n\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.296580 3552 reconciler_common.go:300] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-config-data\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.296594 3552 reconciler_common.go:300] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bea2de7e-3b1c-4ab2-a39f-5f9980e81f92-openstack-config\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.333309 3552 operation_generator.go:1001] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.398509 3552 reconciler_common.go:300] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.768794 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"bea2de7e-3b1c-4ab2-a39f-5f9980e81f92","Type":"ContainerDied","Data":"7a1b62a09c083f677cd53af0816a8cd66735b17332378a260fc639e19bda1657"} Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.769073 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a1b62a09c083f677cd53af0816a8cd66735b17332378a260fc639e19bda1657" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.768870 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.826477 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g894d"] Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.826689 3552 topology_manager.go:215] "Topology Admit Handler" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" podNamespace="openshift-marketplace" podName="redhat-operators-g894d" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.826979 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="extract-content" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.826992 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="extract-content" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.827014 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="extract-utilities" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827023 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="extract-utilities" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.827046 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="extract-utilities" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827054 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="extract-utilities" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.827066 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" containerName="tempest-tests-tempest-tests-runner" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827077 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" containerName="tempest-tests-tempest-tests-runner" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.827091 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="registry-server" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827099 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="registry-server" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.827120 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="extract-content" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827127 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="extract-content" Mar 20 16:36:10 crc kubenswrapper[3552]: E0320 16:36:10.827142 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="registry-server" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827150 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="registry-server" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827374 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="bea2de7e-3b1c-4ab2-a39f-5f9980e81f92" containerName="tempest-tests-tempest-tests-runner" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827426 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="3abb33eb-4377-47b6-b653-6f0a7c826853" containerName="registry-server" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.827438 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9550f7b-cbf9-4409-bc39-4f7563380369" containerName="registry-server" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.829154 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.837454 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g894d"] Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.907232 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-catalog-content\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.907301 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjjx8\" (UniqueName: \"kubernetes.io/projected/576a70a3-6852-4dff-ab43-0c39a342bfeb-kube-api-access-zjjx8\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:10 crc kubenswrapper[3552]: I0320 16:36:10.907723 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-utilities\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.009858 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-utilities\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.009968 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-catalog-content\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.010026 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-zjjx8\" (UniqueName: \"kubernetes.io/projected/576a70a3-6852-4dff-ab43-0c39a342bfeb-kube-api-access-zjjx8\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.010898 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-utilities\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.011332 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-catalog-content\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.026379 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjjx8\" (UniqueName: \"kubernetes.io/projected/576a70a3-6852-4dff-ab43-0c39a342bfeb-kube-api-access-zjjx8\") pod \"redhat-operators-g894d\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.142835 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.688160 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g894d"] Mar 20 16:36:11 crc kubenswrapper[3552]: I0320 16:36:11.777746 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerStarted","Data":"5685e0fc131b51c69f884b4758902a05fbd80f3d2528f58e8448914237166e8b"} Mar 20 16:36:12 crc kubenswrapper[3552]: I0320 16:36:12.785868 3552 generic.go:334] "Generic (PLEG): container finished" podID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerID="de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc" exitCode=0 Mar 20 16:36:12 crc kubenswrapper[3552]: I0320 16:36:12.785963 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerDied","Data":"de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc"} Mar 20 16:36:13 crc kubenswrapper[3552]: I0320 16:36:13.796252 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerStarted","Data":"e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0"} Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.071886 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.072559 3552 topology_manager.go:215] "Topology Admit Handler" podUID="995e0401-20ca-46f3-82bd-3b088d9413a4" podNamespace="openstack" podName="test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.073592 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.076068 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5lzdb" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.086195 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.217880 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.218450 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fclff\" (UniqueName: \"kubernetes.io/projected/995e0401-20ca-46f3-82bd-3b088d9413a4-kube-api-access-fclff\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.319932 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-fclff\" (UniqueName: \"kubernetes.io/projected/995e0401-20ca-46f3-82bd-3b088d9413a4-kube-api-access-fclff\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.320041 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.327785 3552 operation_generator.go:664] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.338335 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-fclff\" (UniqueName: \"kubernetes.io/projected/995e0401-20ca-46f3-82bd-3b088d9413a4-kube-api-access-fclff\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.377363 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"995e0401-20ca-46f3-82bd-3b088d9413a4\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.391564 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Mar 20 16:36:20 crc kubenswrapper[3552]: I0320 16:36:20.942428 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Mar 20 16:36:21 crc kubenswrapper[3552]: I0320 16:36:21.865470 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"995e0401-20ca-46f3-82bd-3b088d9413a4","Type":"ContainerStarted","Data":"00d8213f5117dcf83fe91418de355fd33c04e69ebde22ce571ef77653f5c5a9e"} Mar 20 16:36:24 crc kubenswrapper[3552]: I0320 16:36:24.890529 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"995e0401-20ca-46f3-82bd-3b088d9413a4","Type":"ContainerStarted","Data":"7c7f65353fc269dc591f63bdc7cead26250cacc411d1fd370f2115132bca23a6"} Mar 20 16:36:42 crc kubenswrapper[3552]: I0320 16:36:42.778579 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:36:42 crc kubenswrapper[3552]: I0320 16:36:42.779610 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:36:48 crc kubenswrapper[3552]: I0320 16:36:48.105542 3552 generic.go:334] "Generic (PLEG): container finished" podID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerID="e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0" exitCode=0 Mar 20 16:36:48 crc kubenswrapper[3552]: I0320 16:36:48.105655 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerDied","Data":"e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0"} Mar 20 16:36:48 crc kubenswrapper[3552]: I0320 16:36:48.127374 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=25.282900266 podStartE2EDuration="28.127324982s" podCreationTimestamp="2026-03-20 16:36:20 +0000 UTC" firstStartedPulling="2026-03-20 16:36:20.954443165 +0000 UTC m=+4280.648139995" lastFinishedPulling="2026-03-20 16:36:23.798867881 +0000 UTC m=+4283.492564711" observedRunningTime="2026-03-20 16:36:24.915828155 +0000 UTC m=+4284.609524985" watchObservedRunningTime="2026-03-20 16:36:48.127324982 +0000 UTC m=+4307.821021812" Mar 20 16:36:50 crc kubenswrapper[3552]: I0320 16:36:50.124508 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerStarted","Data":"86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310"} Mar 20 16:36:50 crc kubenswrapper[3552]: I0320 16:36:50.146778 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g894d" podStartSLOduration=4.315060985 podStartE2EDuration="40.146738524s" podCreationTimestamp="2026-03-20 16:36:10 +0000 UTC" firstStartedPulling="2026-03-20 16:36:12.787321295 +0000 UTC m=+4272.481018135" lastFinishedPulling="2026-03-20 16:36:48.618998844 +0000 UTC m=+4308.312695674" observedRunningTime="2026-03-20 16:36:50.140366345 +0000 UTC m=+4309.834063195" watchObservedRunningTime="2026-03-20 16:36:50.146738524 +0000 UTC m=+4309.840435344" Mar 20 16:36:51 crc kubenswrapper[3552]: I0320 16:36:51.143345 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:51 crc kubenswrapper[3552]: I0320 16:36:51.143779 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:36:52 crc kubenswrapper[3552]: I0320 16:36:52.255709 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g894d" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="registry-server" probeResult="failure" output=< Mar 20 16:36:52 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:36:52 crc kubenswrapper[3552]: > Mar 20 16:37:01 crc kubenswrapper[3552]: I0320 16:37:01.478874 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:37:01 crc kubenswrapper[3552]: I0320 16:37:01.479576 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:37:01 crc kubenswrapper[3552]: I0320 16:37:01.479621 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:37:01 crc kubenswrapper[3552]: I0320 16:37:01.479652 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:37:01 crc kubenswrapper[3552]: I0320 16:37:01.479722 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:37:02 crc kubenswrapper[3552]: I0320 16:37:02.235667 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g894d" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="registry-server" probeResult="failure" output=< Mar 20 16:37:02 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:37:02 crc kubenswrapper[3552]: > Mar 20 16:37:04 crc kubenswrapper[3552]: I0320 16:37:04.862809 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vzh4b/must-gather-ktqhf"] Mar 20 16:37:04 crc kubenswrapper[3552]: I0320 16:37:04.863781 3552 topology_manager.go:215] "Topology Admit Handler" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" podNamespace="openshift-must-gather-vzh4b" podName="must-gather-ktqhf" Mar 20 16:37:04 crc kubenswrapper[3552]: I0320 16:37:04.869691 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:04 crc kubenswrapper[3552]: I0320 16:37:04.873004 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vzh4b"/"openshift-service-ca.crt" Mar 20 16:37:04 crc kubenswrapper[3552]: I0320 16:37:04.874167 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vzh4b"/"kube-root-ca.crt" Mar 20 16:37:04 crc kubenswrapper[3552]: I0320 16:37:04.893376 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vzh4b/must-gather-ktqhf"] Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.046054 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/92ab524e-d28f-4085-971b-8741e16f9ccf-must-gather-output\") pod \"must-gather-ktqhf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.046272 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tszk\" (UniqueName: \"kubernetes.io/projected/92ab524e-d28f-4085-971b-8741e16f9ccf-kube-api-access-9tszk\") pod \"must-gather-ktqhf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.148066 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/92ab524e-d28f-4085-971b-8741e16f9ccf-must-gather-output\") pod \"must-gather-ktqhf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.148494 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9tszk\" (UniqueName: \"kubernetes.io/projected/92ab524e-d28f-4085-971b-8741e16f9ccf-kube-api-access-9tszk\") pod \"must-gather-ktqhf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.148601 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/92ab524e-d28f-4085-971b-8741e16f9ccf-must-gather-output\") pod \"must-gather-ktqhf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.171731 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tszk\" (UniqueName: \"kubernetes.io/projected/92ab524e-d28f-4085-971b-8741e16f9ccf-kube-api-access-9tszk\") pod \"must-gather-ktqhf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:05 crc kubenswrapper[3552]: I0320 16:37:05.191710 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:37:06 crc kubenswrapper[3552]: I0320 16:37:06.131674 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vzh4b/must-gather-ktqhf"] Mar 20 16:37:06 crc kubenswrapper[3552]: I0320 16:37:06.266104 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" event={"ID":"92ab524e-d28f-4085-971b-8741e16f9ccf","Type":"ContainerStarted","Data":"4341feaddd9f85d43d06d13478e3d61ee4dc95238b96700a5fefe3db48e8cd4f"} Mar 20 16:37:11 crc kubenswrapper[3552]: I0320 16:37:11.265296 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:37:11 crc kubenswrapper[3552]: I0320 16:37:11.372558 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:37:11 crc kubenswrapper[3552]: I0320 16:37:11.464091 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g894d"] Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.318490 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-g894d" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="registry-server" containerID="cri-o://86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310" gracePeriod=2 Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.753156 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.778146 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.778238 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.906948 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-catalog-content\") pod \"576a70a3-6852-4dff-ab43-0c39a342bfeb\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.907083 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-utilities\") pod \"576a70a3-6852-4dff-ab43-0c39a342bfeb\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.907221 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjjx8\" (UniqueName: \"kubernetes.io/projected/576a70a3-6852-4dff-ab43-0c39a342bfeb-kube-api-access-zjjx8\") pod \"576a70a3-6852-4dff-ab43-0c39a342bfeb\" (UID: \"576a70a3-6852-4dff-ab43-0c39a342bfeb\") " Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.908886 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-utilities" (OuterVolumeSpecName: "utilities") pod "576a70a3-6852-4dff-ab43-0c39a342bfeb" (UID: "576a70a3-6852-4dff-ab43-0c39a342bfeb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:37:12 crc kubenswrapper[3552]: I0320 16:37:12.916197 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/576a70a3-6852-4dff-ab43-0c39a342bfeb-kube-api-access-zjjx8" (OuterVolumeSpecName: "kube-api-access-zjjx8") pod "576a70a3-6852-4dff-ab43-0c39a342bfeb" (UID: "576a70a3-6852-4dff-ab43-0c39a342bfeb"). InnerVolumeSpecName "kube-api-access-zjjx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.009466 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.009825 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-zjjx8\" (UniqueName: \"kubernetes.io/projected/576a70a3-6852-4dff-ab43-0c39a342bfeb-kube-api-access-zjjx8\") on node \"crc\" DevicePath \"\"" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.327890 3552 generic.go:334] "Generic (PLEG): container finished" podID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerID="86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310" exitCode=0 Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.327929 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g894d" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.327935 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerDied","Data":"86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310"} Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.327968 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g894d" event={"ID":"576a70a3-6852-4dff-ab43-0c39a342bfeb","Type":"ContainerDied","Data":"5685e0fc131b51c69f884b4758902a05fbd80f3d2528f58e8448914237166e8b"} Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.331446 3552 scope.go:117] "RemoveContainer" containerID="86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.375706 3552 scope.go:117] "RemoveContainer" containerID="e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.889695 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "576a70a3-6852-4dff-ab43-0c39a342bfeb" (UID: "576a70a3-6852-4dff-ab43-0c39a342bfeb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.942959 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/576a70a3-6852-4dff-ab43-0c39a342bfeb-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:37:13 crc kubenswrapper[3552]: I0320 16:37:13.989389 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g894d"] Mar 20 16:37:14 crc kubenswrapper[3552]: I0320 16:37:14.001508 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-g894d"] Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.441574 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" path="/var/lib/kubelet/pods/576a70a3-6852-4dff-ab43-0c39a342bfeb/volumes" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.626326 3552 scope.go:117] "RemoveContainer" containerID="de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.667826 3552 scope.go:117] "RemoveContainer" containerID="86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310" Mar 20 16:37:15 crc kubenswrapper[3552]: E0320 16:37:15.670169 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310\": container with ID starting with 86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310 not found: ID does not exist" containerID="86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.670233 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310"} err="failed to get container status \"86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310\": rpc error: code = NotFound desc = could not find container \"86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310\": container with ID starting with 86dc801ffcd90a3955b7910ce26f276595580213ca9423b5eb38b51848f41310 not found: ID does not exist" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.670251 3552 scope.go:117] "RemoveContainer" containerID="e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0" Mar 20 16:37:15 crc kubenswrapper[3552]: E0320 16:37:15.670588 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0\": container with ID starting with e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0 not found: ID does not exist" containerID="e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.670617 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0"} err="failed to get container status \"e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0\": rpc error: code = NotFound desc = could not find container \"e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0\": container with ID starting with e4e3da586bd972b3547103cdcb04c0373740e989b7e37fc1a4845b7d1b5d4bc0 not found: ID does not exist" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.670628 3552 scope.go:117] "RemoveContainer" containerID="de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc" Mar 20 16:37:15 crc kubenswrapper[3552]: E0320 16:37:15.670842 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc\": container with ID starting with de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc not found: ID does not exist" containerID="de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc" Mar 20 16:37:15 crc kubenswrapper[3552]: I0320 16:37:15.670861 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc"} err="failed to get container status \"de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc\": rpc error: code = NotFound desc = could not find container \"de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc\": container with ID starting with de54367522435c7ee06b349227cba76f0736de52cd2ca2b37381c398b63146dc not found: ID does not exist" Mar 20 16:37:16 crc kubenswrapper[3552]: I0320 16:37:16.363262 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" event={"ID":"92ab524e-d28f-4085-971b-8741e16f9ccf","Type":"ContainerStarted","Data":"303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741"} Mar 20 16:37:16 crc kubenswrapper[3552]: I0320 16:37:16.363302 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" event={"ID":"92ab524e-d28f-4085-971b-8741e16f9ccf","Type":"ContainerStarted","Data":"45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9"} Mar 20 16:37:16 crc kubenswrapper[3552]: I0320 16:37:16.381497 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" podStartSLOduration=2.853732947 podStartE2EDuration="12.381451394s" podCreationTimestamp="2026-03-20 16:37:04 +0000 UTC" firstStartedPulling="2026-03-20 16:37:06.141325794 +0000 UTC m=+4325.835022624" lastFinishedPulling="2026-03-20 16:37:15.669044241 +0000 UTC m=+4335.362741071" observedRunningTime="2026-03-20 16:37:16.375548626 +0000 UTC m=+4336.069245456" watchObservedRunningTime="2026-03-20 16:37:16.381451394 +0000 UTC m=+4336.075148224" Mar 20 16:37:21 crc kubenswrapper[3552]: E0320 16:37:21.253358 3552 upgradeaware.go:425] Error proxying data from client to backend: readfrom tcp 38.102.83.200:60182->38.102.83.200:41689: write tcp 38.102.83.200:60182->38.102.83.200:41689: write: broken pipe Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.373432 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-ksjbf"] Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.373825 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4532a061-43b9-4237-a717-72abcc0d31d3" podNamespace="openshift-must-gather-vzh4b" podName="crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: E0320 16:37:22.375990 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="registry-server" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.376021 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="registry-server" Mar 20 16:37:22 crc kubenswrapper[3552]: E0320 16:37:22.376052 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="extract-utilities" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.376061 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="extract-utilities" Mar 20 16:37:22 crc kubenswrapper[3552]: E0320 16:37:22.376080 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="extract-content" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.376089 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="extract-content" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.376332 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="576a70a3-6852-4dff-ab43-0c39a342bfeb" containerName="registry-server" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.376954 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.381822 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-must-gather-vzh4b"/"default-dockercfg-jm6ht" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.425775 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w65j\" (UniqueName: \"kubernetes.io/projected/4532a061-43b9-4237-a717-72abcc0d31d3-kube-api-access-9w65j\") pod \"crc-debug-ksjbf\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.425848 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4532a061-43b9-4237-a717-72abcc0d31d3-host\") pod \"crc-debug-ksjbf\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.527443 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-9w65j\" (UniqueName: \"kubernetes.io/projected/4532a061-43b9-4237-a717-72abcc0d31d3-kube-api-access-9w65j\") pod \"crc-debug-ksjbf\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.527511 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4532a061-43b9-4237-a717-72abcc0d31d3-host\") pod \"crc-debug-ksjbf\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.530049 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4532a061-43b9-4237-a717-72abcc0d31d3-host\") pod \"crc-debug-ksjbf\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.550734 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w65j\" (UniqueName: \"kubernetes.io/projected/4532a061-43b9-4237-a717-72abcc0d31d3-kube-api-access-9w65j\") pod \"crc-debug-ksjbf\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:22 crc kubenswrapper[3552]: I0320 16:37:22.699218 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:37:23 crc kubenswrapper[3552]: I0320 16:37:23.439783 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" event={"ID":"4532a061-43b9-4237-a717-72abcc0d31d3","Type":"ContainerStarted","Data":"6829ac3a405ff2cfb32ee11c3c20123f45d0b1b36e80a33a44314049fb7998c0"} Mar 20 16:37:39 crc kubenswrapper[3552]: I0320 16:37:39.610439 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" event={"ID":"4532a061-43b9-4237-a717-72abcc0d31d3","Type":"ContainerStarted","Data":"bf1257187f17874c456a745118764c3dc6b3fa43d229014ad529193d5f618e04"} Mar 20 16:37:39 crc kubenswrapper[3552]: I0320 16:37:39.632505 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" podStartSLOduration=1.668923604 podStartE2EDuration="17.632448305s" podCreationTimestamp="2026-03-20 16:37:22 +0000 UTC" firstStartedPulling="2026-03-20 16:37:22.744908672 +0000 UTC m=+4342.438605492" lastFinishedPulling="2026-03-20 16:37:38.708433363 +0000 UTC m=+4358.402130193" observedRunningTime="2026-03-20 16:37:39.623033384 +0000 UTC m=+4359.316730214" watchObservedRunningTime="2026-03-20 16:37:39.632448305 +0000 UTC m=+4359.326145145" Mar 20 16:37:42 crc kubenswrapper[3552]: I0320 16:37:42.778774 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:37:42 crc kubenswrapper[3552]: I0320 16:37:42.779268 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:37:42 crc kubenswrapper[3552]: I0320 16:37:42.779302 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:37:42 crc kubenswrapper[3552]: I0320 16:37:42.780266 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ffc7492bb2352219f48d04a9ff8a621ca2721375bc277e13930e1e8a6110432d"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:37:42 crc kubenswrapper[3552]: I0320 16:37:42.780432 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://ffc7492bb2352219f48d04a9ff8a621ca2721375bc277e13930e1e8a6110432d" gracePeriod=600 Mar 20 16:37:43 crc kubenswrapper[3552]: I0320 16:37:43.644509 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="ffc7492bb2352219f48d04a9ff8a621ca2721375bc277e13930e1e8a6110432d" exitCode=0 Mar 20 16:37:43 crc kubenswrapper[3552]: I0320 16:37:43.644545 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"ffc7492bb2352219f48d04a9ff8a621ca2721375bc277e13930e1e8a6110432d"} Mar 20 16:37:43 crc kubenswrapper[3552]: I0320 16:37:43.644909 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6"} Mar 20 16:37:43 crc kubenswrapper[3552]: I0320 16:37:43.644938 3552 scope.go:117] "RemoveContainer" containerID="21d0018251af971a8cabcfdab063aa18009017f057d98b8ea545da3e0d9e2dd6" Mar 20 16:38:01 crc kubenswrapper[3552]: I0320 16:38:01.481672 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:38:01 crc kubenswrapper[3552]: I0320 16:38:01.482351 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:38:01 crc kubenswrapper[3552]: I0320 16:38:01.482391 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:38:01 crc kubenswrapper[3552]: I0320 16:38:01.482445 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:38:01 crc kubenswrapper[3552]: I0320 16:38:01.482514 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:38:24 crc kubenswrapper[3552]: I0320 16:38:24.045002 3552 generic.go:334] "Generic (PLEG): container finished" podID="4532a061-43b9-4237-a717-72abcc0d31d3" containerID="bf1257187f17874c456a745118764c3dc6b3fa43d229014ad529193d5f618e04" exitCode=0 Mar 20 16:38:24 crc kubenswrapper[3552]: I0320 16:38:24.045157 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" event={"ID":"4532a061-43b9-4237-a717-72abcc0d31d3","Type":"ContainerDied","Data":"bf1257187f17874c456a745118764c3dc6b3fa43d229014ad529193d5f618e04"} Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.082923 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.116266 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-ksjbf"] Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.124368 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-ksjbf"] Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.161520 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4532a061-43b9-4237-a717-72abcc0d31d3-host\") pod \"4532a061-43b9-4237-a717-72abcc0d31d3\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.161651 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4532a061-43b9-4237-a717-72abcc0d31d3-host" (OuterVolumeSpecName: "host") pod "4532a061-43b9-4237-a717-72abcc0d31d3" (UID: "4532a061-43b9-4237-a717-72abcc0d31d3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.161683 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w65j\" (UniqueName: \"kubernetes.io/projected/4532a061-43b9-4237-a717-72abcc0d31d3-kube-api-access-9w65j\") pod \"4532a061-43b9-4237-a717-72abcc0d31d3\" (UID: \"4532a061-43b9-4237-a717-72abcc0d31d3\") " Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.162368 3552 reconciler_common.go:300] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4532a061-43b9-4237-a717-72abcc0d31d3-host\") on node \"crc\" DevicePath \"\"" Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.167206 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4532a061-43b9-4237-a717-72abcc0d31d3-kube-api-access-9w65j" (OuterVolumeSpecName: "kube-api-access-9w65j") pod "4532a061-43b9-4237-a717-72abcc0d31d3" (UID: "4532a061-43b9-4237-a717-72abcc0d31d3"). InnerVolumeSpecName "kube-api-access-9w65j". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.263820 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-9w65j\" (UniqueName: \"kubernetes.io/projected/4532a061-43b9-4237-a717-72abcc0d31d3-kube-api-access-9w65j\") on node \"crc\" DevicePath \"\"" Mar 20 16:38:25 crc kubenswrapper[3552]: I0320 16:38:25.442590 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4532a061-43b9-4237-a717-72abcc0d31d3" path="/var/lib/kubelet/pods/4532a061-43b9-4237-a717-72abcc0d31d3/volumes" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.061359 3552 scope.go:117] "RemoveContainer" containerID="bf1257187f17874c456a745118764c3dc6b3fa43d229014ad529193d5f618e04" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.061413 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-ksjbf" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.373254 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-hq9dd"] Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.373800 3552 topology_manager.go:215] "Topology Admit Handler" podUID="4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" podNamespace="openshift-must-gather-vzh4b" podName="crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: E0320 16:38:26.374154 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4532a061-43b9-4237-a717-72abcc0d31d3" containerName="container-00" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.374169 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4532a061-43b9-4237-a717-72abcc0d31d3" containerName="container-00" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.374522 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4532a061-43b9-4237-a717-72abcc0d31d3" containerName="container-00" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.375374 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.378607 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-must-gather-vzh4b"/"default-dockercfg-jm6ht" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.487533 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlbgf\" (UniqueName: \"kubernetes.io/projected/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-kube-api-access-wlbgf\") pod \"crc-debug-hq9dd\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.487624 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-host\") pod \"crc-debug-hq9dd\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.589000 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-host\") pod \"crc-debug-hq9dd\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.589178 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-wlbgf\" (UniqueName: \"kubernetes.io/projected/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-kube-api-access-wlbgf\") pod \"crc-debug-hq9dd\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.589206 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-host\") pod \"crc-debug-hq9dd\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.609225 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlbgf\" (UniqueName: \"kubernetes.io/projected/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-kube-api-access-wlbgf\") pod \"crc-debug-hq9dd\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: I0320 16:38:26.731368 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:26 crc kubenswrapper[3552]: W0320 16:38:26.760078 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ac974fa_fbbd_4758_bbd2_b50047cd4fa8.slice/crio-bf47f2a4838dd7f4c40e8c0df43d5cecb175f040287d51fb3523115d03ad25a3 WatchSource:0}: Error finding container bf47f2a4838dd7f4c40e8c0df43d5cecb175f040287d51fb3523115d03ad25a3: Status 404 returned error can't find the container with id bf47f2a4838dd7f4c40e8c0df43d5cecb175f040287d51fb3523115d03ad25a3 Mar 20 16:38:27 crc kubenswrapper[3552]: I0320 16:38:27.082137 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" event={"ID":"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8","Type":"ContainerStarted","Data":"bf47f2a4838dd7f4c40e8c0df43d5cecb175f040287d51fb3523115d03ad25a3"} Mar 20 16:38:28 crc kubenswrapper[3552]: I0320 16:38:28.090293 3552 generic.go:334] "Generic (PLEG): container finished" podID="4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" containerID="453b4440445642387fa7e79367aae19e1c92bccb0e2b614f1f7c1bcdbe47dd5d" exitCode=0 Mar 20 16:38:28 crc kubenswrapper[3552]: I0320 16:38:28.090426 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" event={"ID":"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8","Type":"ContainerDied","Data":"453b4440445642387fa7e79367aae19e1c92bccb0e2b614f1f7c1bcdbe47dd5d"} Mar 20 16:38:28 crc kubenswrapper[3552]: I0320 16:38:28.426221 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-hq9dd"] Mar 20 16:38:28 crc kubenswrapper[3552]: I0320 16:38:28.434697 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-hq9dd"] Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.219655 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.342965 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlbgf\" (UniqueName: \"kubernetes.io/projected/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-kube-api-access-wlbgf\") pod \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.343225 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-host\") pod \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\" (UID: \"4ac974fa-fbbd-4758-bbd2-b50047cd4fa8\") " Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.343621 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-host" (OuterVolumeSpecName: "host") pod "4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" (UID: "4ac974fa-fbbd-4758-bbd2-b50047cd4fa8"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.349411 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-kube-api-access-wlbgf" (OuterVolumeSpecName: "kube-api-access-wlbgf") pod "4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" (UID: "4ac974fa-fbbd-4758-bbd2-b50047cd4fa8"). InnerVolumeSpecName "kube-api-access-wlbgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.441114 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" path="/var/lib/kubelet/pods/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8/volumes" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.445537 3552 reconciler_common.go:300] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-host\") on node \"crc\" DevicePath \"\"" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.445570 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-wlbgf\" (UniqueName: \"kubernetes.io/projected/4ac974fa-fbbd-4758-bbd2-b50047cd4fa8-kube-api-access-wlbgf\") on node \"crc\" DevicePath \"\"" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.697549 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-q5xxv"] Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.697781 3552 topology_manager.go:215] "Topology Admit Handler" podUID="ef5d180a-869d-4f11-bcd1-48861c44cf36" podNamespace="openshift-must-gather-vzh4b" podName="crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: E0320 16:38:29.698098 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" containerName="container-00" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.698117 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" containerName="container-00" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.698366 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ac974fa-fbbd-4758-bbd2-b50047cd4fa8" containerName="container-00" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.699195 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.853586 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef5d180a-869d-4f11-bcd1-48861c44cf36-host\") pod \"crc-debug-q5xxv\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.853657 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92sts\" (UniqueName: \"kubernetes.io/projected/ef5d180a-869d-4f11-bcd1-48861c44cf36-kube-api-access-92sts\") pod \"crc-debug-q5xxv\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.955516 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef5d180a-869d-4f11-bcd1-48861c44cf36-host\") pod \"crc-debug-q5xxv\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.955893 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-92sts\" (UniqueName: \"kubernetes.io/projected/ef5d180a-869d-4f11-bcd1-48861c44cf36-kube-api-access-92sts\") pod \"crc-debug-q5xxv\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.955812 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef5d180a-869d-4f11-bcd1-48861c44cf36-host\") pod \"crc-debug-q5xxv\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:29 crc kubenswrapper[3552]: I0320 16:38:29.974936 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-92sts\" (UniqueName: \"kubernetes.io/projected/ef5d180a-869d-4f11-bcd1-48861c44cf36-kube-api-access-92sts\") pod \"crc-debug-q5xxv\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:30 crc kubenswrapper[3552]: I0320 16:38:30.025167 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:30 crc kubenswrapper[3552]: W0320 16:38:30.078212 3552 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef5d180a_869d_4f11_bcd1_48861c44cf36.slice/crio-2647f8552eebbc3524adc87c1f3e9e70e8bce806cbd2196068b6105b25ad9757 WatchSource:0}: Error finding container 2647f8552eebbc3524adc87c1f3e9e70e8bce806cbd2196068b6105b25ad9757: Status 404 returned error can't find the container with id 2647f8552eebbc3524adc87c1f3e9e70e8bce806cbd2196068b6105b25ad9757 Mar 20 16:38:30 crc kubenswrapper[3552]: I0320 16:38:30.106150 3552 scope.go:117] "RemoveContainer" containerID="453b4440445642387fa7e79367aae19e1c92bccb0e2b614f1f7c1bcdbe47dd5d" Mar 20 16:38:30 crc kubenswrapper[3552]: I0320 16:38:30.106170 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-hq9dd" Mar 20 16:38:30 crc kubenswrapper[3552]: I0320 16:38:30.107782 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" event={"ID":"ef5d180a-869d-4f11-bcd1-48861c44cf36","Type":"ContainerStarted","Data":"2647f8552eebbc3524adc87c1f3e9e70e8bce806cbd2196068b6105b25ad9757"} Mar 20 16:38:31 crc kubenswrapper[3552]: I0320 16:38:31.120512 3552 generic.go:334] "Generic (PLEG): container finished" podID="ef5d180a-869d-4f11-bcd1-48861c44cf36" containerID="a18dc6326f8c400fc5482b967f6ac3151036984ff3b42def25c73a16e3b0149a" exitCode=0 Mar 20 16:38:31 crc kubenswrapper[3552]: I0320 16:38:31.120707 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" event={"ID":"ef5d180a-869d-4f11-bcd1-48861c44cf36","Type":"ContainerDied","Data":"a18dc6326f8c400fc5482b967f6ac3151036984ff3b42def25c73a16e3b0149a"} Mar 20 16:38:31 crc kubenswrapper[3552]: I0320 16:38:31.167365 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-q5xxv"] Mar 20 16:38:31 crc kubenswrapper[3552]: I0320 16:38:31.185850 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vzh4b/crc-debug-q5xxv"] Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.264643 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.397096 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92sts\" (UniqueName: \"kubernetes.io/projected/ef5d180a-869d-4f11-bcd1-48861c44cf36-kube-api-access-92sts\") pod \"ef5d180a-869d-4f11-bcd1-48861c44cf36\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.397210 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef5d180a-869d-4f11-bcd1-48861c44cf36-host\") pod \"ef5d180a-869d-4f11-bcd1-48861c44cf36\" (UID: \"ef5d180a-869d-4f11-bcd1-48861c44cf36\") " Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.397480 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef5d180a-869d-4f11-bcd1-48861c44cf36-host" (OuterVolumeSpecName: "host") pod "ef5d180a-869d-4f11-bcd1-48861c44cf36" (UID: "ef5d180a-869d-4f11-bcd1-48861c44cf36"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.398126 3552 reconciler_common.go:300] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef5d180a-869d-4f11-bcd1-48861c44cf36-host\") on node \"crc\" DevicePath \"\"" Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.403501 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef5d180a-869d-4f11-bcd1-48861c44cf36-kube-api-access-92sts" (OuterVolumeSpecName: "kube-api-access-92sts") pod "ef5d180a-869d-4f11-bcd1-48861c44cf36" (UID: "ef5d180a-869d-4f11-bcd1-48861c44cf36"). InnerVolumeSpecName "kube-api-access-92sts". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:38:32 crc kubenswrapper[3552]: I0320 16:38:32.519731 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-92sts\" (UniqueName: \"kubernetes.io/projected/ef5d180a-869d-4f11-bcd1-48861c44cf36-kube-api-access-92sts\") on node \"crc\" DevicePath \"\"" Mar 20 16:38:33 crc kubenswrapper[3552]: I0320 16:38:33.139755 3552 scope.go:117] "RemoveContainer" containerID="a18dc6326f8c400fc5482b967f6ac3151036984ff3b42def25c73a16e3b0149a" Mar 20 16:38:33 crc kubenswrapper[3552]: I0320 16:38:33.139803 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/crc-debug-q5xxv" Mar 20 16:38:33 crc kubenswrapper[3552]: I0320 16:38:33.443840 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef5d180a-869d-4f11-bcd1-48861c44cf36" path="/var/lib/kubelet/pods/ef5d180a-869d-4f11-bcd1-48861c44cf36/volumes" Mar 20 16:39:01 crc kubenswrapper[3552]: I0320 16:39:01.482906 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:39:01 crc kubenswrapper[3552]: I0320 16:39:01.485239 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:39:01 crc kubenswrapper[3552]: I0320 16:39:01.485366 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:39:01 crc kubenswrapper[3552]: I0320 16:39:01.485556 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:39:01 crc kubenswrapper[3552]: I0320 16:39:01.485729 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:39:27 crc kubenswrapper[3552]: I0320 16:39:27.805468 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5675b7c4bb-854mw_798beca9-b89f-42b2-9e24-cf98a854b880/barbican-api/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.021723 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5675b7c4bb-854mw_798beca9-b89f-42b2-9e24-cf98a854b880/barbican-api-log/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.104368 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-69d7546cf6-hwprx_b5733b56-3f0c-4b1f-9075-22590d21d3b4/barbican-keystone-listener-log/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.119186 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-69d7546cf6-hwprx_b5733b56-3f0c-4b1f-9075-22590d21d3b4/barbican-keystone-listener/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.274272 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6bf8c5549-pmzmz_6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775/barbican-worker/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.298588 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6bf8c5549-pmzmz_6d8b0bf5-dba9-46ad-9ec9-c95f4f5dc775/barbican-worker-log/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.564610 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1/ceilometer-central-agent/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.598381 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-89dkn_91e771c4-0b23-4802-ba94-e2596a5b688f/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.735508 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1/ceilometer-notification-agent/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.825131 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1/sg-core/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.863717 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ab0d7c6e-8376-4e4b-94fe-c2e84a6523b1/proxy-httpd/0.log" Mar 20 16:39:28 crc kubenswrapper[3552]: I0320 16:39:28.963607 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2a2a764d-837e-455a-9404-b306cc90147d/cinder-api/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.050959 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2a2a764d-837e-455a-9404-b306cc90147d/cinder-api-log/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.134553 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_dd0f7af7-1f3c-4738-900a-d19e917f9a37/cinder-scheduler/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.214502 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_dd0f7af7-1f3c-4738-900a-d19e917f9a37/probe/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.537920 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-wr7jn_7f951458-ca42-4880-b3f2-faefb8cdbb2a/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.675699 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-dz6mb_4da7fc22-4c28-40c6-93f3-6b7e09f855be/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.680602 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-68599879df-x6nqg_95aeed66-e4e9-42ec-8bf3-88b0a6947263/init/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.840471 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-68599879df-x6nqg_95aeed66-e4e9-42ec-8bf3-88b0a6947263/init/0.log" Mar 20 16:39:29 crc kubenswrapper[3552]: I0320 16:39:29.867763 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-68599879df-x6nqg_95aeed66-e4e9-42ec-8bf3-88b0a6947263/dnsmasq-dns/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.038004 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-kj6jz_ba8b3ba8-fb04-46b1-89af-0e8f6e7c1e89/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.203155 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_21994c9d-6fc9-428c-8242-f05b41d74c68/glance-httpd/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.241146 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_21994c9d-6fc9-428c-8242-f05b41d74c68/glance-log/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.294585 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_39006f12-301b-4c3d-a0bd-8b19313c4843/glance-httpd/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.369586 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_39006f12-301b-4c3d-a0bd-8b19313c4843/glance-log/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.532057 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_horizon-9c7459748-mvczs_e30af56a-3534-429c-bbe2-3014515d530f/horizon/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.817349 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-h8dzm_7b22c393-016f-45a8-b806-d10bb8dc57fe/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:30 crc kubenswrapper[3552]: I0320 16:39:30.932857 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_horizon-9c7459748-mvczs_e30af56a-3534-429c-bbe2-3014515d530f/horizon-log/0.log" Mar 20 16:39:31 crc kubenswrapper[3552]: I0320 16:39:31.328849 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_keystone-8487bc87b9-4thvb_7be78447-08e2-4e07-8b7b-f5faa5e093eb/keystone-api/0.log" Mar 20 16:39:31 crc kubenswrapper[3552]: I0320 16:39:31.361395 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29567041-xdqml_176300c2-c75c-415a-8a97-8fc1227581cb/keystone-cron/0.log" Mar 20 16:39:31 crc kubenswrapper[3552]: I0320 16:39:31.415238 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-77jvc_ae6c8b60-dfd9-432a-9c27-4768c6cbe57d/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:31 crc kubenswrapper[3552]: I0320 16:39:31.578335 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_596194fb-82df-4137-b62a-0f29c83d4978/kube-state-metrics/0.log" Mar 20 16:39:32 crc kubenswrapper[3552]: I0320 16:39:32.081924 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7558f7559f-rntkm_b9a18de2-c36e-4cfc-af47-d5143257da26/neutron-api/0.log" Mar 20 16:39:32 crc kubenswrapper[3552]: I0320 16:39:32.173602 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7558f7559f-rntkm_b9a18de2-c36e-4cfc-af47-d5143257da26/neutron-httpd/0.log" Mar 20 16:39:32 crc kubenswrapper[3552]: I0320 16:39:32.434345 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-tv78c_7ad3b2bc-4451-4440-b295-0be9dbcc2892/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:32 crc kubenswrapper[3552]: I0320 16:39:32.579864 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-sgj26_763b4ba4-050a-48e5-a22e-eb51ceaec61b/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:32 crc kubenswrapper[3552]: I0320 16:39:32.974614 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_51c26f37-ba5c-4900-832b-1fdf603b5b62/nova-cell0-conductor-conductor/0.log" Mar 20 16:39:33 crc kubenswrapper[3552]: I0320 16:39:33.074627 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_82d5b176-ae10-446e-8994-39e52dd4611d/nova-api-log/0.log" Mar 20 16:39:33 crc kubenswrapper[3552]: I0320 16:39:33.262212 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_6dedb4ba-7f06-4600-bfc8-f9e8596fc013/nova-cell1-conductor-conductor/0.log" Mar 20 16:39:33 crc kubenswrapper[3552]: I0320 16:39:33.384316 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_cf9d9b6c-39f4-430a-a8ef-9b632d0bf96c/nova-cell1-novncproxy-novncproxy/0.log" Mar 20 16:39:33 crc kubenswrapper[3552]: I0320 16:39:33.438741 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_82d5b176-ae10-446e-8994-39e52dd4611d/nova-api-api/0.log" Mar 20 16:39:33 crc kubenswrapper[3552]: I0320 16:39:33.773554 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d3c60297-64e7-4110-a1ec-410b35338011/nova-metadata-log/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.226984 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_6d77cf2f-1599-4fe2-84fa-925dec6a7e26/nova-scheduler-scheduler/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.242810 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d3c60297-64e7-4110-a1ec-410b35338011/nova-metadata-metadata/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.430740 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_887f0b69-2c6b-44fa-b0d4-0af7b2e89654/mysql-bootstrap/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.589846 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_887f0b69-2c6b-44fa-b0d4-0af7b2e89654/mysql-bootstrap/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.608811 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_887f0b69-2c6b-44fa-b0d4-0af7b2e89654/galera/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.627910 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-mhhbp_7e975c55-e5ef-4c8c-b6dc-1af5da847c65/nova-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:34 crc kubenswrapper[3552]: I0320 16:39:34.785772 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b5c00f51-e5f5-4edb-8998-e4463051ecac/mysql-bootstrap/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.022373 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b5c00f51-e5f5-4edb-8998-e4463051ecac/galera/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.047375 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_1137d7d3-a9ff-4002-9b7f-174802428ba7/openstackclient/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.083580 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b5c00f51-e5f5-4edb-8998-e4463051ecac/mysql-bootstrap/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.356765 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-ztxhh_9d5405cc-3ba4-46fd-b566-be99e325e65c/openstack-network-exporter/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.402193 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-c89mf_854348bd-6351-4ba6-82c7-664311074caf/ovn-controller/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.576589 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qrf8z_af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0/ovsdb-server-init/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.799757 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qrf8z_af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0/ovsdb-server/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.802468 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qrf8z_af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0/ovs-vswitchd/0.log" Mar 20 16:39:35 crc kubenswrapper[3552]: I0320 16:39:35.837048 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qrf8z_af01ed61-3eb6-4ca7-b4c4-ca71ab654dd0/ovsdb-server-init/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.061347 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_74241f0f-72bf-49b6-b849-11361c3b86e5/openstack-network-exporter/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.147966 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_74241f0f-72bf-49b6-b849-11361c3b86e5/ovn-northd/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.266561 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-d2md6_149bba52-4065-4344-845f-1fb933e5833c/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.286878 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a03ab7e3-7def-4bd8-9ec5-93e9b3098b08/openstack-network-exporter/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.355586 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a03ab7e3-7def-4bd8-9ec5-93e9b3098b08/ovsdbserver-nb/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.523464 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e4d74fff-0397-4b4c-ac20-6fd72086c84a/openstack-network-exporter/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.590553 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e4d74fff-0397-4b4c-ac20-6fd72086c84a/ovsdbserver-sb/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.826092 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5aa16a4d-0492-4319-ae5d-87e5a20bec39/init-config-reloader/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.858305 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c5fc7598d-gk2l6_5410b014-55f4-4359-98fd-a7e4bca67721/placement-log/0.log" Mar 20 16:39:36 crc kubenswrapper[3552]: I0320 16:39:36.866213 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c5fc7598d-gk2l6_5410b014-55f4-4359-98fd-a7e4bca67721/placement-api/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.062560 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5aa16a4d-0492-4319-ae5d-87e5a20bec39/prometheus/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.077520 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5aa16a4d-0492-4319-ae5d-87e5a20bec39/config-reloader/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.133979 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5aa16a4d-0492-4319-ae5d-87e5a20bec39/init-config-reloader/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.134693 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5aa16a4d-0492-4319-ae5d-87e5a20bec39/thanos-sidecar/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.384561 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f71c29cd-5055-41bb-b3f8-6183a9be2b7f/setup-container/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.534863 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f71c29cd-5055-41bb-b3f8-6183a9be2b7f/setup-container/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.540139 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f71c29cd-5055-41bb-b3f8-6183a9be2b7f/rabbitmq/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.601495 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58197235-68b0-45dd-9df3-6825c76c4df8/setup-container/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.881210 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58197235-68b0-45dd-9df3-6825c76c4df8/setup-container/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.891261 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58197235-68b0-45dd-9df3-6825c76c4df8/rabbitmq/0.log" Mar 20 16:39:37 crc kubenswrapper[3552]: I0320 16:39:37.970241 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-h4s2f_0a5c936b-91f0-4c32-9f10-f232a07072f0/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:38 crc kubenswrapper[3552]: I0320 16:39:38.154336 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-rdsr2_0b3fef6a-f2e0-4b73-8254-f79a9dd63846/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:38 crc kubenswrapper[3552]: I0320 16:39:38.436739 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-rzlgk_fd195b5e-d78c-40c1-8e30-e9f2464b2eb2/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:38 crc kubenswrapper[3552]: I0320 16:39:38.632000 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-dmpxq_c0a9da87-25f8-42b8-94cd-9c25f87989d3/ssh-known-hosts-edpm-deployment/0.log" Mar 20 16:39:38 crc kubenswrapper[3552]: I0320 16:39:38.643564 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-dt75l_b2f7ac87-e7ee-475a-8aea-91d2123e861d/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:38 crc kubenswrapper[3552]: I0320 16:39:38.920024 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-746656bf7-ktbs6_444a7139-f192-4a10-8047-c84d83d05dab/proxy-server/0.log" Mar 20 16:39:38 crc kubenswrapper[3552]: I0320 16:39:38.978808 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-746656bf7-ktbs6_444a7139-f192-4a10-8047-c84d83d05dab/proxy-httpd/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.099429 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-q5q9g_7961927b-2515-44c1-b350-16985a6c6c73/swift-ring-rebalance/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.259331 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/account-reaper/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.277773 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/account-auditor/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.322537 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/account-replicator/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.374307 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/account-server/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.567954 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/container-auditor/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.568319 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/container-server/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.598471 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/container-updater/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.625961 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/container-replicator/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.753606 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/object-expirer/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.846422 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/object-auditor/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.856827 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/object-server/0.log" Mar 20 16:39:39 crc kubenswrapper[3552]: I0320 16:39:39.902016 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/object-replicator/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.015224 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/object-updater/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.071564 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/rsync/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.159723 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dd24d70f-864e-4803-8e8c-9d9e5aadfa84/swift-recon-cron/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.539660 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_995e0401-20ca-46f3-82bd-3b088d9413a4/test-operator-logs-container/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.751483 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-sfhm6_6f4150fa-2847-4186-91f2-144e7e53ffae/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.760216 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_bea2de7e-3b1c-4ab2-a39f-5f9980e81f92/tempest-tests-tempest-tests-runner/0.log" Mar 20 16:39:40 crc kubenswrapper[3552]: I0320 16:39:40.880617 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-jlsb9_6cfc2f22-9946-4e63-9255-8e174a5bcb2f/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Mar 20 16:39:41 crc kubenswrapper[3552]: I0320 16:39:41.383178 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_cef2d399-77a5-4f2f-ac1d-801e04745c2e/watcher-applier/0.log" Mar 20 16:39:41 crc kubenswrapper[3552]: I0320 16:39:41.482123 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_5cbde9b3-0a58-4706-bb8c-5a8694ddb51c/watcher-api-log/0.log" Mar 20 16:39:42 crc kubenswrapper[3552]: I0320 16:39:42.214566 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_598e089a-dfab-47cc-89bc-f70192f43beb/watcher-decision-engine/0.log" Mar 20 16:39:43 crc kubenswrapper[3552]: I0320 16:39:43.496314 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_5cbde9b3-0a58-4706-bb8c-5a8694ddb51c/watcher-api/0.log" Mar 20 16:39:55 crc kubenswrapper[3552]: I0320 16:39:55.247253 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_762b23fe-ac75-4df1-b0b7-441f4720c635/memcached/0.log" Mar 20 16:40:01 crc kubenswrapper[3552]: I0320 16:40:01.487040 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:40:01 crc kubenswrapper[3552]: I0320 16:40:01.488000 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:40:01 crc kubenswrapper[3552]: I0320 16:40:01.488064 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:40:01 crc kubenswrapper[3552]: I0320 16:40:01.488118 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:40:01 crc kubenswrapper[3552]: I0320 16:40:01.488216 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:40:10 crc kubenswrapper[3552]: I0320 16:40:10.872754 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/util/0.log" Mar 20 16:40:10 crc kubenswrapper[3552]: I0320 16:40:10.995121 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/util/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.050261 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/pull/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.089061 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/pull/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.228413 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/util/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.247115 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/pull/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.275554 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_7a5d6df43e82bcd9f777035cdd0143ddec736ae8e19d633ac86bc7d1d14jfg8_af734422-b4a5-4d64-8057-fef16a296420/extract/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.512801 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-85887bf46b-ddjrz_cba9b971-9846-434b-be0f-c49675291fc6/manager/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.658177 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-674dd8bdd9-pvjxb_4ecd96e3-ccef-44d8-b099-803a81f0d35d/manager/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.840777 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-f8f7f758d-r5lmz_354e53f6-1fa0-4b98-8a19-741caf032b5a/manager/0.log" Mar 20 16:40:11 crc kubenswrapper[3552]: I0320 16:40:11.947644 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5dfddf8d94-vwkxd_105486bf-4db8-4e8b-b7e6-b36f272e8042/manager/0.log" Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.221975 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6f77bd5775-9s4lh_ce6883bb-9cc0-494d-acc0-28ab4cb8ba66/manager/0.log" Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.500756 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-dcb9f85b6-xjgbk_4e980880-d1a1-40dc-9a05-394a72cfb983/manager/0.log" Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.700720 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-b68b4cfdf-h8ljt_85fc9bb8-3e61-4a3f-bc9a-54c327b0b278/manager/0.log" Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.782934 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.783015 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.865815 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-854bd9766b-t7rzd_c64483e3-8ac2-4242-9a5e-85839f40cb42/manager/0.log" Mar 20 16:40:12 crc kubenswrapper[3552]: I0320 16:40:12.929677 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-75d675bfc4-mlclf_951c8176-4fe8-41d9-8bea-c6bf299dec7c/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.178045 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6ff8887465-j5znl_f5ec1a5f-b0ee-4476-9347-d5e3244b1ea1/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.410323 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-c9c4d5ccf-zqzbw_a9eee008-53da-425f-bb6e-7a39e6b07754/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.481525 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5666895465-zvh67_4ad6253a-a3ac-4428-a1e0-4059ca5b02f5/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.579450 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5475cc9b67-clc46_e288b597-0ccf-4abc-9f7a-634d63242553/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.678795 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7598758bfb-fd8f7_b808d486-c6e9-4167-92f4-7c854ead72f7/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.768886 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6557559db7m9cdd_6620147f-34ac-4892-9c3c-7886a2bd6558/manager/0.log" Mar 20 16:40:13 crc kubenswrapper[3552]: I0320 16:40:13.933949 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-646b9cdcdc-9z4d6_9d6f615c-4f4b-4ee3-9127-a5b7ce55c5dd/operator/0.log" Mar 20 16:40:14 crc kubenswrapper[3552]: I0320 16:40:14.192722 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-c64kg_13bbb361-b8be-4148-b9a0-e1fe341db57b/registry-server/0.log" Mar 20 16:40:14 crc kubenswrapper[3552]: I0320 16:40:14.420122 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6dd5cfd685-sv9dk_b1a25fa6-822d-4376-8721-bd7802437838/manager/0.log" Mar 20 16:40:14 crc kubenswrapper[3552]: I0320 16:40:14.567624 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-666c4df8d6-k4pgg_71432b9e-c028-469c-904e-9a74b0ffa5ec/manager/0.log" Mar 20 16:40:14 crc kubenswrapper[3552]: I0320 16:40:14.651201 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-7d6d47d9fb-x69tf_b9502382-d33a-43df-bbfa-f4462e6c426a/operator/0.log" Mar 20 16:40:14 crc kubenswrapper[3552]: I0320 16:40:14.918711 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5fccf6868d-8zrxq_9ad61c28-5162-4ceb-b703-79e0f4d20a43/manager/0.log" Mar 20 16:40:15 crc kubenswrapper[3552]: I0320 16:40:15.165236 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-665dbccdfd-t2b8h_42323066-562e-46fc-8616-3d244ae35b2d/manager/0.log" Mar 20 16:40:15 crc kubenswrapper[3552]: I0320 16:40:15.201151 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-597d5dd64-rmm62_145a49ae-4a3b-4096-8fdb-974a93c8194a/manager/0.log" Mar 20 16:40:15 crc kubenswrapper[3552]: I0320 16:40:15.205771 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-c9f654fc4-lxs7j_b672dc41-4a65-47db-aeeb-d9858305445e/manager/0.log" Mar 20 16:40:15 crc kubenswrapper[3552]: I0320 16:40:15.408653 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6dc7cb7d75-5cl45_f4aee2da-fb97-4e64-a4d1-223cf14816e8/manager/0.log" Mar 20 16:40:35 crc kubenswrapper[3552]: I0320 16:40:35.438497 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-649bd778b4-tt5tw_45a8038e-e7f2-4d93-a6f5-7753aa54e63f/control-plane-machine-set-operator/2.log" Mar 20 16:40:35 crc kubenswrapper[3552]: I0320 16:40:35.646695 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-788b7c6b6c-ctdmb_4f8aa612-9da0-4a2b-911e-6a1764a4e74e/kube-rbac-proxy/1.log" Mar 20 16:40:35 crc kubenswrapper[3552]: I0320 16:40:35.731276 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-788b7c6b6c-ctdmb_4f8aa612-9da0-4a2b-911e-6a1764a4e74e/machine-api-operator/1.log" Mar 20 16:40:42 crc kubenswrapper[3552]: I0320 16:40:42.778688 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:40:42 crc kubenswrapper[3552]: I0320 16:40:42.779624 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:40:50 crc kubenswrapper[3552]: I0320 16:40:50.243278 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-758df9885c-2km6z_4581d21f-88be-409f-8105-be49568258e0/cert-manager-controller/0.log" Mar 20 16:40:50 crc kubenswrapper[3552]: I0320 16:40:50.379315 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-676dd9bd64-gk2js_8527c5a5-b8f1-4156-bb87-ebca589bf2ab/cert-manager-cainjector/0.log" Mar 20 16:40:50 crc kubenswrapper[3552]: I0320 16:40:50.441007 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-855f577f79-r4z4f_25fd0780-accd-456f-8df8-206823b15fe7/cert-manager-webhook/0.log" Mar 20 16:41:01 crc kubenswrapper[3552]: I0320 16:41:01.489218 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:41:01 crc kubenswrapper[3552]: I0320 16:41:01.489840 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:41:01 crc kubenswrapper[3552]: I0320 16:41:01.489867 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:41:01 crc kubenswrapper[3552]: I0320 16:41:01.489908 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:41:01 crc kubenswrapper[3552]: I0320 16:41:01.489953 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:41:04 crc kubenswrapper[3552]: I0320 16:41:04.090430 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-78d6dd6fc5-w48cn_5c9f4509-5c04-4b38-a0ac-65d9f19d252c/nmstate-console-plugin/0.log" Mar 20 16:41:04 crc kubenswrapper[3552]: I0320 16:41:04.264599 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-thhvp_97f8069b-8a3d-4ad5-83ba-cfb21db47084/nmstate-handler/0.log" Mar 20 16:41:04 crc kubenswrapper[3552]: I0320 16:41:04.400905 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5bbb58f86c-vm8rb_9ecdf50d-ae92-4621-9c2e-f19b7ed40399/nmstate-operator/0.log" Mar 20 16:41:04 crc kubenswrapper[3552]: I0320 16:41:04.542030 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-857c948b4f-n6m8f_3bc33104-28eb-4add-b717-d172823f68e5/nmstate-webhook/0.log" Mar 20 16:41:12 crc kubenswrapper[3552]: I0320 16:41:12.778813 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:41:12 crc kubenswrapper[3552]: I0320 16:41:12.779485 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:41:12 crc kubenswrapper[3552]: I0320 16:41:12.779540 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:41:12 crc kubenswrapper[3552]: I0320 16:41:12.780743 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:41:12 crc kubenswrapper[3552]: I0320 16:41:12.780966 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" gracePeriod=600 Mar 20 16:41:12 crc kubenswrapper[3552]: E0320 16:41:12.917568 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:41:13 crc kubenswrapper[3552]: I0320 16:41:13.410645 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" exitCode=0 Mar 20 16:41:13 crc kubenswrapper[3552]: I0320 16:41:13.410675 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6"} Mar 20 16:41:13 crc kubenswrapper[3552]: I0320 16:41:13.410727 3552 scope.go:117] "RemoveContainer" containerID="ffc7492bb2352219f48d04a9ff8a621ca2721375bc277e13930e1e8a6110432d" Mar 20 16:41:13 crc kubenswrapper[3552]: I0320 16:41:13.411571 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:41:13 crc kubenswrapper[3552]: E0320 16:41:13.412196 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.352894 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-glw2k"] Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.355200 3552 topology_manager.go:215] "Topology Admit Handler" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" podNamespace="openshift-marketplace" podName="certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: E0320 16:41:18.355669 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="ef5d180a-869d-4f11-bcd1-48861c44cf36" containerName="container-00" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.355800 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef5d180a-869d-4f11-bcd1-48861c44cf36" containerName="container-00" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.356177 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef5d180a-869d-4f11-bcd1-48861c44cf36" containerName="container-00" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.358096 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.374569 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-glw2k"] Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.495204 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-utilities\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.495739 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8qsw\" (UniqueName: \"kubernetes.io/projected/232b6300-f15a-4802-aa2d-f2ac79833069-kube-api-access-z8qsw\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.496054 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-catalog-content\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.598060 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-utilities\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.598439 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-z8qsw\" (UniqueName: \"kubernetes.io/projected/232b6300-f15a-4802-aa2d-f2ac79833069-kube-api-access-z8qsw\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.598609 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-catalog-content\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.598637 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-utilities\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.599295 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-catalog-content\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.623448 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8qsw\" (UniqueName: \"kubernetes.io/projected/232b6300-f15a-4802-aa2d-f2ac79833069-kube-api-access-z8qsw\") pod \"certified-operators-glw2k\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:18 crc kubenswrapper[3552]: I0320 16:41:18.684473 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:19 crc kubenswrapper[3552]: I0320 16:41:19.148757 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-glw2k"] Mar 20 16:41:19 crc kubenswrapper[3552]: I0320 16:41:19.485294 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerStarted","Data":"36154696e1780cd3b79c4baf71ba2620c5443477f8f6b609231da94d55249950"} Mar 20 16:41:20 crc kubenswrapper[3552]: I0320 16:41:20.494024 3552 generic.go:334] "Generic (PLEG): container finished" podID="232b6300-f15a-4802-aa2d-f2ac79833069" containerID="78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164" exitCode=0 Mar 20 16:41:20 crc kubenswrapper[3552]: I0320 16:41:20.494062 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerDied","Data":"78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164"} Mar 20 16:41:20 crc kubenswrapper[3552]: I0320 16:41:20.498347 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:41:21 crc kubenswrapper[3552]: I0320 16:41:21.510746 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerStarted","Data":"d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408"} Mar 20 16:41:22 crc kubenswrapper[3552]: I0320 16:41:22.284860 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-658fcfcb8b-54w4m_85ec366f-2967-4402-b803-d1bad5b8dcba/prometheus-operator/0.log" Mar 20 16:41:22 crc kubenswrapper[3552]: I0320 16:41:22.488622 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852_7e93d32b-5615-49fa-a73f-9c81c8cd8cd0/prometheus-operator-admission-webhook/0.log" Mar 20 16:41:22 crc kubenswrapper[3552]: I0320 16:41:22.661874 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j_4f85d41f-8ca4-49e6-88a8-a1aab6f2156f/prometheus-operator-admission-webhook/0.log" Mar 20 16:41:22 crc kubenswrapper[3552]: I0320 16:41:22.723103 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-6dfc56bc4d-27zhf_2831a7cc-6c26-47e1-bd6d-3dfc81f021f9/operator/0.log" Mar 20 16:41:22 crc kubenswrapper[3552]: I0320 16:41:22.959708 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-7b66ccd595-8l6z8_d7e52e7d-5410-447e-8b22-9d97e1b98f74/perses-operator/0.log" Mar 20 16:41:28 crc kubenswrapper[3552]: I0320 16:41:28.434180 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:41:28 crc kubenswrapper[3552]: E0320 16:41:28.435105 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:41:28 crc kubenswrapper[3552]: I0320 16:41:28.570562 3552 generic.go:334] "Generic (PLEG): container finished" podID="232b6300-f15a-4802-aa2d-f2ac79833069" containerID="d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408" exitCode=0 Mar 20 16:41:28 crc kubenswrapper[3552]: I0320 16:41:28.571043 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerDied","Data":"d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408"} Mar 20 16:41:29 crc kubenswrapper[3552]: I0320 16:41:29.582017 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerStarted","Data":"825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f"} Mar 20 16:41:29 crc kubenswrapper[3552]: I0320 16:41:29.603844 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-glw2k" podStartSLOduration=3.148667786 podStartE2EDuration="11.603797271s" podCreationTimestamp="2026-03-20 16:41:18 +0000 UTC" firstStartedPulling="2026-03-20 16:41:20.4965684 +0000 UTC m=+4580.190265230" lastFinishedPulling="2026-03-20 16:41:28.951697855 +0000 UTC m=+4588.645394715" observedRunningTime="2026-03-20 16:41:29.597134704 +0000 UTC m=+4589.290831544" watchObservedRunningTime="2026-03-20 16:41:29.603797271 +0000 UTC m=+4589.297494101" Mar 20 16:41:38 crc kubenswrapper[3552]: I0320 16:41:38.685354 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:38 crc kubenswrapper[3552]: I0320 16:41:38.686469 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:38 crc kubenswrapper[3552]: I0320 16:41:38.796478 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.401556 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-568fbbdf85-2dltt_5752c9c1-702c-4b5d-9c15-f978fae70a49/controller/0.log" Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.425497 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-568fbbdf85-2dltt_5752c9c1-702c-4b5d-9c15-f978fae70a49/kube-rbac-proxy/0.log" Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.656583 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-597cc54c77-tc5lk_09ba95df-e4cc-4819-8244-426a6a13e8e8/manager/0.log" Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.722214 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8c8685c97-pdplx_7e937e0b-c631-4b12-a33e-d98c139398d5/webhook-server/0.log" Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.767663 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.819824 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-glw2k"] Mar 20 16:41:39 crc kubenswrapper[3552]: I0320 16:41:39.828886 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-frr-files/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.061342 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-frr-files/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.086571 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-reloader/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.092374 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-metrics/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.099037 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-reloader/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.302483 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-reloader/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.318762 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-frr-files/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.326302 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-metrics/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.332396 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-metrics/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.522497 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-frr-files/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.539024 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-reloader/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.571055 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/cp-metrics/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.723841 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/frr-metrics/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.754098 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/kube-rbac-proxy-frr/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.773852 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/kube-rbac-proxy/0.log" Mar 20 16:41:40 crc kubenswrapper[3552]: I0320 16:41:40.926395 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/reloader/0.log" Mar 20 16:41:41 crc kubenswrapper[3552]: I0320 16:41:41.379897 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/speaker/0.log" Mar 20 16:41:41 crc kubenswrapper[3552]: I0320 16:41:41.695522 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-glw2k" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="registry-server" containerID="cri-o://825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f" gracePeriod=2 Mar 20 16:41:41 crc kubenswrapper[3552]: I0320 16:41:41.807504 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-pg572_cdb4ac00-d8b3-4293-ab54-0e8be9a7e8e7/frr/0.log" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.151302 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.255515 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-catalog-content\") pod \"232b6300-f15a-4802-aa2d-f2ac79833069\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.255659 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-utilities\") pod \"232b6300-f15a-4802-aa2d-f2ac79833069\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.255914 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8qsw\" (UniqueName: \"kubernetes.io/projected/232b6300-f15a-4802-aa2d-f2ac79833069-kube-api-access-z8qsw\") pod \"232b6300-f15a-4802-aa2d-f2ac79833069\" (UID: \"232b6300-f15a-4802-aa2d-f2ac79833069\") " Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.256704 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-utilities" (OuterVolumeSpecName: "utilities") pod "232b6300-f15a-4802-aa2d-f2ac79833069" (UID: "232b6300-f15a-4802-aa2d-f2ac79833069"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.266598 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/232b6300-f15a-4802-aa2d-f2ac79833069-kube-api-access-z8qsw" (OuterVolumeSpecName: "kube-api-access-z8qsw") pod "232b6300-f15a-4802-aa2d-f2ac79833069" (UID: "232b6300-f15a-4802-aa2d-f2ac79833069"). InnerVolumeSpecName "kube-api-access-z8qsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.358243 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-z8qsw\" (UniqueName: \"kubernetes.io/projected/232b6300-f15a-4802-aa2d-f2ac79833069-kube-api-access-z8qsw\") on node \"crc\" DevicePath \"\"" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.358606 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.431032 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:41:42 crc kubenswrapper[3552]: E0320 16:41:42.431680 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.500931 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "232b6300-f15a-4802-aa2d-f2ac79833069" (UID: "232b6300-f15a-4802-aa2d-f2ac79833069"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.562559 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/232b6300-f15a-4802-aa2d-f2ac79833069-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.700004 3552 generic.go:334] "Generic (PLEG): container finished" podID="232b6300-f15a-4802-aa2d-f2ac79833069" containerID="825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f" exitCode=0 Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.700043 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerDied","Data":"825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f"} Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.700065 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-glw2k" event={"ID":"232b6300-f15a-4802-aa2d-f2ac79833069","Type":"ContainerDied","Data":"36154696e1780cd3b79c4baf71ba2620c5443477f8f6b609231da94d55249950"} Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.700082 3552 scope.go:117] "RemoveContainer" containerID="825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.700201 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-glw2k" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.752986 3552 scope.go:117] "RemoveContainer" containerID="d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.781910 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-glw2k"] Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.802139 3552 scope.go:117] "RemoveContainer" containerID="78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.804420 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-glw2k"] Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.831317 3552 scope.go:117] "RemoveContainer" containerID="825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f" Mar 20 16:41:42 crc kubenswrapper[3552]: E0320 16:41:42.831758 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f\": container with ID starting with 825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f not found: ID does not exist" containerID="825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.831798 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f"} err="failed to get container status \"825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f\": rpc error: code = NotFound desc = could not find container \"825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f\": container with ID starting with 825a74e36dd798c7de3d9a146b57113641b81dba508c9e6db06811a7b6cd0e1f not found: ID does not exist" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.831809 3552 scope.go:117] "RemoveContainer" containerID="d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408" Mar 20 16:41:42 crc kubenswrapper[3552]: E0320 16:41:42.832016 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408\": container with ID starting with d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408 not found: ID does not exist" containerID="d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.832041 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408"} err="failed to get container status \"d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408\": rpc error: code = NotFound desc = could not find container \"d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408\": container with ID starting with d2583159b008488e064da4a4410103d4bb6994d5f4a59f9bb83f4aca961c8408 not found: ID does not exist" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.832050 3552 scope.go:117] "RemoveContainer" containerID="78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164" Mar 20 16:41:42 crc kubenswrapper[3552]: E0320 16:41:42.832262 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164\": container with ID starting with 78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164 not found: ID does not exist" containerID="78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164" Mar 20 16:41:42 crc kubenswrapper[3552]: I0320 16:41:42.832286 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164"} err="failed to get container status \"78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164\": rpc error: code = NotFound desc = could not find container \"78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164\": container with ID starting with 78b4c7a2c3b31e91323412b7b45b9f54a61742528d68362c98ef131b5a8df164 not found: ID does not exist" Mar 20 16:41:43 crc kubenswrapper[3552]: I0320 16:41:43.443158 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" path="/var/lib/kubelet/pods/232b6300-f15a-4802-aa2d-f2ac79833069/volumes" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.068595 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/util/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.290508 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/util/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.313259 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/pull/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.356089 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/pull/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.431012 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:41:56 crc kubenswrapper[3552]: E0320 16:41:56.431571 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.510548 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/util/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.532559 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/pull/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.545580 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_93d662022be5376a0ed3676a120a68427f47e4653a19a985adf9239726qlmlb_23ef2ba0-4158-4d94-8e17-ebb474c6b977/extract/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.686609 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/util/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.861607 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/util/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.890483 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/pull/0.log" Mar 20 16:41:56 crc kubenswrapper[3552]: I0320 16:41:56.964172 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/pull/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.075071 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/util/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.089760 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/pull/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.157699 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_9e91a457e2fd72fb8a0c514f6ac2c6d4a020c5799eb71ae92362bc27b6lghbb_a6e8a3cc-14b9-4494-bb05-d4a3431b22fd/extract/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.262960 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/extract-utilities/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.426287 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/extract-utilities/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.459807 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/extract-content/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.498809 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/extract-content/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.651759 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/extract-content/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.661087 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/extract-utilities/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.674638 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dcwv8_95f52fb1-4064-4f8b-8031-fe8351bebc06/registry-server/0.log" Mar 20 16:41:57 crc kubenswrapper[3552]: I0320 16:41:57.828492 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/extract-utilities/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.093899 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/extract-utilities/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.114690 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/extract-content/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.125204 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/extract-content/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.245757 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/extract-utilities/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.283074 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/extract-content/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.337815 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dm8n5_1d0b6cae-c446-4d8c-a8ae-41523c726770/registry-server/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.433076 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/util/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.658995 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/pull/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.659763 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/pull/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.669124 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/util/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.847282 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/util/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.855969 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/extract/0.log" Mar 20 16:41:58 crc kubenswrapper[3552]: I0320 16:41:58.865472 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f2b601e89080a03f83d328f2cceb3d0b8fb3591b7594afb1989be33c3epp2zf_de1b0afb-ad33-40f3-a3ed-919158631191/pull/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.078334 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-8b455464d-tb8k4_0836a98f-28f3-4a11-9409-4cb07a04c016/marketplace-operator/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.105908 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/extract-utilities/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.304793 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/extract-utilities/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.316190 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/extract-content/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.330706 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/extract-content/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.515002 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/extract-utilities/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.552556 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/registry-server/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.580982 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bdhkj_70ff60e6-4747-4506-9cf4-913661b8e689/extract-content/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.711250 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/extract-utilities/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.839205 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/extract-content/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.880522 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/extract-content/0.log" Mar 20 16:41:59 crc kubenswrapper[3552]: I0320 16:41:59.889746 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/extract-utilities/0.log" Mar 20 16:42:00 crc kubenswrapper[3552]: I0320 16:42:00.106065 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/extract-content/0.log" Mar 20 16:42:00 crc kubenswrapper[3552]: I0320 16:42:00.108280 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/extract-utilities/0.log" Mar 20 16:42:00 crc kubenswrapper[3552]: I0320 16:42:00.139287 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pwxgv_222b106d-1c3f-47e0-bf17-27ff566f7b95/registry-server/0.log" Mar 20 16:42:01 crc kubenswrapper[3552]: I0320 16:42:01.490705 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:42:01 crc kubenswrapper[3552]: I0320 16:42:01.491339 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:42:01 crc kubenswrapper[3552]: I0320 16:42:01.491408 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:42:01 crc kubenswrapper[3552]: I0320 16:42:01.491495 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:42:01 crc kubenswrapper[3552]: I0320 16:42:01.491553 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:42:07 crc kubenswrapper[3552]: I0320 16:42:07.432541 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:42:07 crc kubenswrapper[3552]: E0320 16:42:07.433499 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:42:13 crc kubenswrapper[3552]: I0320 16:42:13.114706 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-75bb9cbdd4-kq852_7e93d32b-5615-49fa-a73f-9c81c8cd8cd0/prometheus-operator-admission-webhook/0.log" Mar 20 16:42:13 crc kubenswrapper[3552]: I0320 16:42:13.120998 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-658fcfcb8b-54w4m_85ec366f-2967-4402-b803-d1bad5b8dcba/prometheus-operator/0.log" Mar 20 16:42:13 crc kubenswrapper[3552]: I0320 16:42:13.138097 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-75bb9cbdd4-lg64j_4f85d41f-8ca4-49e6-88a8-a1aab6f2156f/prometheus-operator-admission-webhook/0.log" Mar 20 16:42:13 crc kubenswrapper[3552]: I0320 16:42:13.318428 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-7b66ccd595-8l6z8_d7e52e7d-5410-447e-8b22-9d97e1b98f74/perses-operator/0.log" Mar 20 16:42:13 crc kubenswrapper[3552]: I0320 16:42:13.338937 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-6dfc56bc4d-27zhf_2831a7cc-6c26-47e1-bd6d-3dfc81f021f9/operator/0.log" Mar 20 16:42:21 crc kubenswrapper[3552]: I0320 16:42:21.439162 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:42:21 crc kubenswrapper[3552]: E0320 16:42:21.440478 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:42:34 crc kubenswrapper[3552]: I0320 16:42:34.431434 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:42:34 crc kubenswrapper[3552]: E0320 16:42:34.435556 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.779451 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pb2lf"] Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.780562 3552 topology_manager.go:215] "Topology Admit Handler" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" podNamespace="openshift-marketplace" podName="redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: E0320 16:42:46.781033 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="extract-utilities" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.781056 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="extract-utilities" Mar 20 16:42:46 crc kubenswrapper[3552]: E0320 16:42:46.781079 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="extract-content" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.781092 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="extract-content" Mar 20 16:42:46 crc kubenswrapper[3552]: E0320 16:42:46.781128 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="registry-server" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.781141 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="registry-server" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.781548 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="232b6300-f15a-4802-aa2d-f2ac79833069" containerName="registry-server" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.784456 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.801767 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pb2lf"] Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.860725 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56zx7\" (UniqueName: \"kubernetes.io/projected/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-kube-api-access-56zx7\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.860967 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-catalog-content\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.861019 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-utilities\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.962498 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-catalog-content\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.962561 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-utilities\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.962605 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-56zx7\" (UniqueName: \"kubernetes.io/projected/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-kube-api-access-56zx7\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.963362 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-catalog-content\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.963613 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-utilities\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:46 crc kubenswrapper[3552]: I0320 16:42:46.981531 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-56zx7\" (UniqueName: \"kubernetes.io/projected/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-kube-api-access-56zx7\") pod \"redhat-marketplace-pb2lf\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:47 crc kubenswrapper[3552]: I0320 16:42:47.130790 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:42:47 crc kubenswrapper[3552]: I0320 16:42:47.432559 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:42:47 crc kubenswrapper[3552]: E0320 16:42:47.437432 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:42:47 crc kubenswrapper[3552]: I0320 16:42:47.626790 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pb2lf"] Mar 20 16:42:48 crc kubenswrapper[3552]: I0320 16:42:48.283180 3552 generic.go:334] "Generic (PLEG): container finished" podID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerID="96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e" exitCode=0 Mar 20 16:42:48 crc kubenswrapper[3552]: I0320 16:42:48.283537 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerDied","Data":"96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e"} Mar 20 16:42:48 crc kubenswrapper[3552]: I0320 16:42:48.283589 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerStarted","Data":"1272893ddfcb3d438c9011f0fe0e62702bf0150760b885226f26c0ae5e693011"} Mar 20 16:42:50 crc kubenswrapper[3552]: I0320 16:42:50.303739 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerStarted","Data":"babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482"} Mar 20 16:42:56 crc kubenswrapper[3552]: I0320 16:42:56.351485 3552 generic.go:334] "Generic (PLEG): container finished" podID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerID="babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482" exitCode=0 Mar 20 16:42:56 crc kubenswrapper[3552]: I0320 16:42:56.351605 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerDied","Data":"babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482"} Mar 20 16:42:57 crc kubenswrapper[3552]: I0320 16:42:57.363583 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerStarted","Data":"bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5"} Mar 20 16:42:57 crc kubenswrapper[3552]: I0320 16:42:57.392067 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pb2lf" podStartSLOduration=3.016776565 podStartE2EDuration="11.392001219s" podCreationTimestamp="2026-03-20 16:42:46 +0000 UTC" firstStartedPulling="2026-03-20 16:42:48.286248918 +0000 UTC m=+4667.979945748" lastFinishedPulling="2026-03-20 16:42:56.661473572 +0000 UTC m=+4676.355170402" observedRunningTime="2026-03-20 16:42:57.380066941 +0000 UTC m=+4677.073763781" watchObservedRunningTime="2026-03-20 16:42:57.392001219 +0000 UTC m=+4677.085698059" Mar 20 16:42:59 crc kubenswrapper[3552]: I0320 16:42:59.431836 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:42:59 crc kubenswrapper[3552]: E0320 16:42:59.432894 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:43:01 crc kubenswrapper[3552]: I0320 16:43:01.492761 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:43:01 crc kubenswrapper[3552]: I0320 16:43:01.495012 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:43:01 crc kubenswrapper[3552]: I0320 16:43:01.495147 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:43:01 crc kubenswrapper[3552]: I0320 16:43:01.495262 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:43:01 crc kubenswrapper[3552]: I0320 16:43:01.495480 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:43:07 crc kubenswrapper[3552]: I0320 16:43:07.133674 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:43:07 crc kubenswrapper[3552]: I0320 16:43:07.134458 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:43:07 crc kubenswrapper[3552]: I0320 16:43:07.257580 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:43:07 crc kubenswrapper[3552]: I0320 16:43:07.639985 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:43:07 crc kubenswrapper[3552]: I0320 16:43:07.689311 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pb2lf"] Mar 20 16:43:09 crc kubenswrapper[3552]: I0320 16:43:09.527003 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pb2lf" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="registry-server" containerID="cri-o://bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5" gracePeriod=2 Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.014027 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.160376 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56zx7\" (UniqueName: \"kubernetes.io/projected/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-kube-api-access-56zx7\") pod \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.160781 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-utilities\") pod \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.161059 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-catalog-content\") pod \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\" (UID: \"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538\") " Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.161210 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-utilities" (OuterVolumeSpecName: "utilities") pod "b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" (UID: "b15ad3e2-e1c1-4dea-9ba2-a9ed59574538"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.161561 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.167039 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-kube-api-access-56zx7" (OuterVolumeSpecName: "kube-api-access-56zx7") pod "b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" (UID: "b15ad3e2-e1c1-4dea-9ba2-a9ed59574538"). InnerVolumeSpecName "kube-api-access-56zx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.264626 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-56zx7\" (UniqueName: \"kubernetes.io/projected/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-kube-api-access-56zx7\") on node \"crc\" DevicePath \"\"" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.303629 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" (UID: "b15ad3e2-e1c1-4dea-9ba2-a9ed59574538"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.367498 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.535171 3552 generic.go:334] "Generic (PLEG): container finished" podID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerID="bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5" exitCode=0 Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.535220 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerDied","Data":"bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5"} Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.535240 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pb2lf" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.535257 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pb2lf" event={"ID":"b15ad3e2-e1c1-4dea-9ba2-a9ed59574538","Type":"ContainerDied","Data":"1272893ddfcb3d438c9011f0fe0e62702bf0150760b885226f26c0ae5e693011"} Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.535281 3552 scope.go:117] "RemoveContainer" containerID="bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.589152 3552 scope.go:117] "RemoveContainer" containerID="babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.610237 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pb2lf"] Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.623496 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pb2lf"] Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.627993 3552 scope.go:117] "RemoveContainer" containerID="96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.705316 3552 scope.go:117] "RemoveContainer" containerID="bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5" Mar 20 16:43:10 crc kubenswrapper[3552]: E0320 16:43:10.705877 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5\": container with ID starting with bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5 not found: ID does not exist" containerID="bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.705922 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5"} err="failed to get container status \"bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5\": rpc error: code = NotFound desc = could not find container \"bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5\": container with ID starting with bef7d0e892a7e4a6adf3d7448e46998097aa21ba6c35415eb5a274ef2455e9f5 not found: ID does not exist" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.705933 3552 scope.go:117] "RemoveContainer" containerID="babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482" Mar 20 16:43:10 crc kubenswrapper[3552]: E0320 16:43:10.706284 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482\": container with ID starting with babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482 not found: ID does not exist" containerID="babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.706307 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482"} err="failed to get container status \"babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482\": rpc error: code = NotFound desc = could not find container \"babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482\": container with ID starting with babad55d67639ba31ffc3e83fa728def5d8dd0f22ee303636a5804f20c1ec482 not found: ID does not exist" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.706315 3552 scope.go:117] "RemoveContainer" containerID="96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e" Mar 20 16:43:10 crc kubenswrapper[3552]: E0320 16:43:10.706558 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e\": container with ID starting with 96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e not found: ID does not exist" containerID="96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e" Mar 20 16:43:10 crc kubenswrapper[3552]: I0320 16:43:10.706574 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e"} err="failed to get container status \"96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e\": rpc error: code = NotFound desc = could not find container \"96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e\": container with ID starting with 96fdb786b19e0cb3c4e2ba39105ec81f4467f54fdcda7bd2753e786b36fc766e not found: ID does not exist" Mar 20 16:43:11 crc kubenswrapper[3552]: I0320 16:43:11.435775 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:43:11 crc kubenswrapper[3552]: E0320 16:43:11.436797 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:43:11 crc kubenswrapper[3552]: I0320 16:43:11.450020 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" path="/var/lib/kubelet/pods/b15ad3e2-e1c1-4dea-9ba2-a9ed59574538/volumes" Mar 20 16:43:25 crc kubenswrapper[3552]: I0320 16:43:25.431064 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:43:25 crc kubenswrapper[3552]: E0320 16:43:25.432204 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.133842 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9nqnc"] Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.134562 3552 topology_manager.go:215] "Topology Admit Handler" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" podNamespace="openshift-marketplace" podName="community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: E0320 16:43:30.134880 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="registry-server" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.134894 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="registry-server" Mar 20 16:43:30 crc kubenswrapper[3552]: E0320 16:43:30.134912 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="extract-content" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.134921 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="extract-content" Mar 20 16:43:30 crc kubenswrapper[3552]: E0320 16:43:30.134944 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="extract-utilities" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.134953 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="extract-utilities" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.135212 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="b15ad3e2-e1c1-4dea-9ba2-a9ed59574538" containerName="registry-server" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.138482 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.150809 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9nqnc"] Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.284365 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b76gg\" (UniqueName: \"kubernetes.io/projected/c788ce87-c573-4498-8c08-0c64ee28638e-kube-api-access-b76gg\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.284550 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-catalog-content\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.285044 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-utilities\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.386851 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-utilities\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.387013 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-b76gg\" (UniqueName: \"kubernetes.io/projected/c788ce87-c573-4498-8c08-0c64ee28638e-kube-api-access-b76gg\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.387053 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-catalog-content\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.387565 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-utilities\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.387672 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-catalog-content\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.416052 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-b76gg\" (UniqueName: \"kubernetes.io/projected/c788ce87-c573-4498-8c08-0c64ee28638e-kube-api-access-b76gg\") pod \"community-operators-9nqnc\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.461614 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:30 crc kubenswrapper[3552]: I0320 16:43:30.929185 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9nqnc"] Mar 20 16:43:31 crc kubenswrapper[3552]: I0320 16:43:31.731876 3552 generic.go:334] "Generic (PLEG): container finished" podID="c788ce87-c573-4498-8c08-0c64ee28638e" containerID="60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34" exitCode=0 Mar 20 16:43:31 crc kubenswrapper[3552]: I0320 16:43:31.731926 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerDied","Data":"60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34"} Mar 20 16:43:31 crc kubenswrapper[3552]: I0320 16:43:31.731946 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerStarted","Data":"5cfbc1a19ff530254168b16496429027bd3094be1eb63b5048a5cf3181c35c20"} Mar 20 16:43:32 crc kubenswrapper[3552]: I0320 16:43:32.739354 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerStarted","Data":"9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d"} Mar 20 16:43:36 crc kubenswrapper[3552]: I0320 16:43:36.434253 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:43:36 crc kubenswrapper[3552]: E0320 16:43:36.435538 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:43:45 crc kubenswrapper[3552]: I0320 16:43:45.839210 3552 generic.go:334] "Generic (PLEG): container finished" podID="c788ce87-c573-4498-8c08-0c64ee28638e" containerID="9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d" exitCode=0 Mar 20 16:43:45 crc kubenswrapper[3552]: I0320 16:43:45.839279 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerDied","Data":"9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d"} Mar 20 16:43:47 crc kubenswrapper[3552]: I0320 16:43:47.858607 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerStarted","Data":"98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e"} Mar 20 16:43:50 crc kubenswrapper[3552]: I0320 16:43:50.430699 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:43:50 crc kubenswrapper[3552]: E0320 16:43:50.431724 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:43:50 crc kubenswrapper[3552]: I0320 16:43:50.462449 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:50 crc kubenswrapper[3552]: I0320 16:43:50.462491 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:43:51 crc kubenswrapper[3552]: I0320 16:43:51.553098 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-9nqnc" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="registry-server" probeResult="failure" output=< Mar 20 16:43:51 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:43:51 crc kubenswrapper[3552]: > Mar 20 16:44:00 crc kubenswrapper[3552]: I0320 16:44:00.561543 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:44:00 crc kubenswrapper[3552]: I0320 16:44:00.593820 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9nqnc" podStartSLOduration=16.198264263 podStartE2EDuration="30.593764781s" podCreationTimestamp="2026-03-20 16:43:30 +0000 UTC" firstStartedPulling="2026-03-20 16:43:31.73652935 +0000 UTC m=+4711.430226180" lastFinishedPulling="2026-03-20 16:43:46.132029868 +0000 UTC m=+4725.825726698" observedRunningTime="2026-03-20 16:43:47.890898036 +0000 UTC m=+4727.584594876" watchObservedRunningTime="2026-03-20 16:44:00.593764781 +0000 UTC m=+4740.287461611" Mar 20 16:44:00 crc kubenswrapper[3552]: I0320 16:44:00.643989 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:44:00 crc kubenswrapper[3552]: I0320 16:44:00.704358 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9nqnc"] Mar 20 16:44:01 crc kubenswrapper[3552]: I0320 16:44:01.495846 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:44:01 crc kubenswrapper[3552]: I0320 16:44:01.496445 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:44:01 crc kubenswrapper[3552]: I0320 16:44:01.496517 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:44:01 crc kubenswrapper[3552]: I0320 16:44:01.496621 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:44:01 crc kubenswrapper[3552]: I0320 16:44:01.496653 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.016359 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9nqnc" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="registry-server" containerID="cri-o://98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e" gracePeriod=2 Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.431226 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:44:02 crc kubenswrapper[3552]: E0320 16:44:02.432305 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.443004 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.548771 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b76gg\" (UniqueName: \"kubernetes.io/projected/c788ce87-c573-4498-8c08-0c64ee28638e-kube-api-access-b76gg\") pod \"c788ce87-c573-4498-8c08-0c64ee28638e\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.549001 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-catalog-content\") pod \"c788ce87-c573-4498-8c08-0c64ee28638e\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.549031 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-utilities\") pod \"c788ce87-c573-4498-8c08-0c64ee28638e\" (UID: \"c788ce87-c573-4498-8c08-0c64ee28638e\") " Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.550091 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-utilities" (OuterVolumeSpecName: "utilities") pod "c788ce87-c573-4498-8c08-0c64ee28638e" (UID: "c788ce87-c573-4498-8c08-0c64ee28638e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.554863 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c788ce87-c573-4498-8c08-0c64ee28638e-kube-api-access-b76gg" (OuterVolumeSpecName: "kube-api-access-b76gg") pod "c788ce87-c573-4498-8c08-0c64ee28638e" (UID: "c788ce87-c573-4498-8c08-0c64ee28638e"). InnerVolumeSpecName "kube-api-access-b76gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.651808 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-b76gg\" (UniqueName: \"kubernetes.io/projected/c788ce87-c573-4498-8c08-0c64ee28638e-kube-api-access-b76gg\") on node \"crc\" DevicePath \"\"" Mar 20 16:44:02 crc kubenswrapper[3552]: I0320 16:44:02.651863 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.026270 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" event={"ID":"92ab524e-d28f-4085-971b-8741e16f9ccf","Type":"ContainerDied","Data":"45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9"} Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.027177 3552 scope.go:117] "RemoveContainer" containerID="45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.027638 3552 generic.go:334] "Generic (PLEG): container finished" podID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerID="45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9" exitCode=0 Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.030800 3552 generic.go:334] "Generic (PLEG): container finished" podID="c788ce87-c573-4498-8c08-0c64ee28638e" containerID="98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e" exitCode=0 Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.030844 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerDied","Data":"98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e"} Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.030866 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9nqnc" event={"ID":"c788ce87-c573-4498-8c08-0c64ee28638e","Type":"ContainerDied","Data":"5cfbc1a19ff530254168b16496429027bd3094be1eb63b5048a5cf3181c35c20"} Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.030881 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9nqnc" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.030886 3552 scope.go:117] "RemoveContainer" containerID="98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.081390 3552 scope.go:117] "RemoveContainer" containerID="9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.147612 3552 scope.go:117] "RemoveContainer" containerID="60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.179232 3552 scope.go:117] "RemoveContainer" containerID="98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e" Mar 20 16:44:03 crc kubenswrapper[3552]: E0320 16:44:03.180048 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e\": container with ID starting with 98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e not found: ID does not exist" containerID="98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.180104 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e"} err="failed to get container status \"98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e\": rpc error: code = NotFound desc = could not find container \"98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e\": container with ID starting with 98786f1dffbd58b8ec27061fdc81bcf5fd9be5ff800bea2da8305aedc2c2751e not found: ID does not exist" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.180118 3552 scope.go:117] "RemoveContainer" containerID="9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d" Mar 20 16:44:03 crc kubenswrapper[3552]: E0320 16:44:03.180859 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d\": container with ID starting with 9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d not found: ID does not exist" containerID="9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.180923 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d"} err="failed to get container status \"9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d\": rpc error: code = NotFound desc = could not find container \"9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d\": container with ID starting with 9769bf26b13e2dffe305119da822840f7bd05c19dcfc31d22938a389a6a6f56d not found: ID does not exist" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.180942 3552 scope.go:117] "RemoveContainer" containerID="60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34" Mar 20 16:44:03 crc kubenswrapper[3552]: E0320 16:44:03.185709 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34\": container with ID starting with 60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34 not found: ID does not exist" containerID="60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.185752 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34"} err="failed to get container status \"60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34\": rpc error: code = NotFound desc = could not find container \"60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34\": container with ID starting with 60ba25d746a77948fc89cb8ad6a67cabbd8f65a963c31986b4a3fbc33c048a34 not found: ID does not exist" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.214495 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c788ce87-c573-4498-8c08-0c64ee28638e" (UID: "c788ce87-c573-4498-8c08-0c64ee28638e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.262696 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c788ce87-c573-4498-8c08-0c64ee28638e-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.363160 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9nqnc"] Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.373152 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9nqnc"] Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.462672 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" path="/var/lib/kubelet/pods/c788ce87-c573-4498-8c08-0c64ee28638e/volumes" Mar 20 16:44:03 crc kubenswrapper[3552]: I0320 16:44:03.590787 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vzh4b_must-gather-ktqhf_92ab524e-d28f-4085-971b-8741e16f9ccf/gather/0.log" Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.139792 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vzh4b/must-gather-ktqhf"] Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.140561 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="copy" containerID="cri-o://303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741" gracePeriod=2 Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.153491 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vzh4b/must-gather-ktqhf"] Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.451830 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vzh4b_must-gather-ktqhf_92ab524e-d28f-4085-971b-8741e16f9ccf/copy/0.log" Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.452452 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.573964 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tszk\" (UniqueName: \"kubernetes.io/projected/92ab524e-d28f-4085-971b-8741e16f9ccf-kube-api-access-9tszk\") pod \"92ab524e-d28f-4085-971b-8741e16f9ccf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.574044 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/92ab524e-d28f-4085-971b-8741e16f9ccf-must-gather-output\") pod \"92ab524e-d28f-4085-971b-8741e16f9ccf\" (UID: \"92ab524e-d28f-4085-971b-8741e16f9ccf\") " Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.579787 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92ab524e-d28f-4085-971b-8741e16f9ccf-kube-api-access-9tszk" (OuterVolumeSpecName: "kube-api-access-9tszk") pod "92ab524e-d28f-4085-971b-8741e16f9ccf" (UID: "92ab524e-d28f-4085-971b-8741e16f9ccf"). InnerVolumeSpecName "kube-api-access-9tszk". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.677490 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-9tszk\" (UniqueName: \"kubernetes.io/projected/92ab524e-d28f-4085-971b-8741e16f9ccf-kube-api-access-9tszk\") on node \"crc\" DevicePath \"\"" Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.768202 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92ab524e-d28f-4085-971b-8741e16f9ccf-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "92ab524e-d28f-4085-971b-8741e16f9ccf" (UID: "92ab524e-d28f-4085-971b-8741e16f9ccf"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:44:12 crc kubenswrapper[3552]: I0320 16:44:12.779264 3552 reconciler_common.go:300] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/92ab524e-d28f-4085-971b-8741e16f9ccf-must-gather-output\") on node \"crc\" DevicePath \"\"" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.132579 3552 logs.go:325] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vzh4b_must-gather-ktqhf_92ab524e-d28f-4085-971b-8741e16f9ccf/copy/0.log" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.141708 3552 generic.go:334] "Generic (PLEG): container finished" podID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerID="303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741" exitCode=143 Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.141785 3552 scope.go:117] "RemoveContainer" containerID="303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.141859 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vzh4b/must-gather-ktqhf" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.188592 3552 scope.go:117] "RemoveContainer" containerID="45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.295573 3552 scope.go:117] "RemoveContainer" containerID="303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741" Mar 20 16:44:13 crc kubenswrapper[3552]: E0320 16:44:13.296032 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741\": container with ID starting with 303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741 not found: ID does not exist" containerID="303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.296089 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741"} err="failed to get container status \"303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741\": rpc error: code = NotFound desc = could not find container \"303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741\": container with ID starting with 303c6bc09799c7f99b460a9d0f653b8bf2da30712a4b4810ab30fbbec8f76741 not found: ID does not exist" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.296105 3552 scope.go:117] "RemoveContainer" containerID="45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9" Mar 20 16:44:13 crc kubenswrapper[3552]: E0320 16:44:13.296498 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9\": container with ID starting with 45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9 not found: ID does not exist" containerID="45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.296536 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9"} err="failed to get container status \"45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9\": rpc error: code = NotFound desc = could not find container \"45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9\": container with ID starting with 45c1f8f4011c7b712ec95ec7c161070e2e8664e302af8d63e793412927fe02f9 not found: ID does not exist" Mar 20 16:44:13 crc kubenswrapper[3552]: I0320 16:44:13.457938 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" path="/var/lib/kubelet/pods/92ab524e-d28f-4085-971b-8741e16f9ccf/volumes" Mar 20 16:44:16 crc kubenswrapper[3552]: I0320 16:44:16.432206 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:44:16 crc kubenswrapper[3552]: E0320 16:44:16.434080 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:44:29 crc kubenswrapper[3552]: I0320 16:44:29.431073 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:44:29 crc kubenswrapper[3552]: E0320 16:44:29.432282 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:44:41 crc kubenswrapper[3552]: I0320 16:44:41.438305 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:44:41 crc kubenswrapper[3552]: E0320 16:44:41.443170 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:44:54 crc kubenswrapper[3552]: I0320 16:44:54.431108 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:44:54 crc kubenswrapper[3552]: E0320 16:44:54.432456 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.224615 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6"] Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225288 3552 topology_manager.go:215] "Topology Admit Handler" podUID="04169968-fd5f-4bb8-bbf7-d46288d7eabd" podNamespace="openshift-operator-lifecycle-manager" podName="collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: E0320 16:45:00.225629 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="copy" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225647 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="copy" Mar 20 16:45:00 crc kubenswrapper[3552]: E0320 16:45:00.225676 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="gather" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225685 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="gather" Mar 20 16:45:00 crc kubenswrapper[3552]: E0320 16:45:00.225703 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="registry-server" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225709 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="registry-server" Mar 20 16:45:00 crc kubenswrapper[3552]: E0320 16:45:00.225718 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="extract-content" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225724 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="extract-content" Mar 20 16:45:00 crc kubenswrapper[3552]: E0320 16:45:00.225744 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="extract-utilities" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225751 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="extract-utilities" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225974 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="copy" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.225990 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="92ab524e-d28f-4085-971b-8741e16f9ccf" containerName="gather" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.226005 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="c788ce87-c573-4498-8c08-0c64ee28638e" containerName="registry-server" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.226657 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.229241 3552 reflector.go:351] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.229729 3552 reflector.go:351] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-45g9d" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.235001 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6"] Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.321581 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qjk4\" (UniqueName: \"kubernetes.io/projected/04169968-fd5f-4bb8-bbf7-d46288d7eabd-kube-api-access-7qjk4\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.322013 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/04169968-fd5f-4bb8-bbf7-d46288d7eabd-config-volume\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.322197 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/04169968-fd5f-4bb8-bbf7-d46288d7eabd-secret-volume\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.424442 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/04169968-fd5f-4bb8-bbf7-d46288d7eabd-config-volume\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.424514 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/04169968-fd5f-4bb8-bbf7-d46288d7eabd-secret-volume\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.424670 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-7qjk4\" (UniqueName: \"kubernetes.io/projected/04169968-fd5f-4bb8-bbf7-d46288d7eabd-kube-api-access-7qjk4\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.425360 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/04169968-fd5f-4bb8-bbf7-d46288d7eabd-config-volume\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.437275 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/04169968-fd5f-4bb8-bbf7-d46288d7eabd-secret-volume\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.444611 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qjk4\" (UniqueName: \"kubernetes.io/projected/04169968-fd5f-4bb8-bbf7-d46288d7eabd-kube-api-access-7qjk4\") pod \"collect-profiles-29567085-qj6l6\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:00 crc kubenswrapper[3552]: I0320 16:45:00.553208 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.034361 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6"] Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.497876 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.498342 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.498380 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.498461 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.498532 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.585887 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" event={"ID":"04169968-fd5f-4bb8-bbf7-d46288d7eabd","Type":"ContainerStarted","Data":"eefa009e47dca525a24d2aa8b7e6ed604829549e39426171b57a3c905d52aa12"} Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.585925 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" event={"ID":"04169968-fd5f-4bb8-bbf7-d46288d7eabd","Type":"ContainerStarted","Data":"ba3cd8fccd17a9eb91f4bcefa85f6ce575aebb3a67d3ed30b16f8471f343fe2b"} Mar 20 16:45:01 crc kubenswrapper[3552]: I0320 16:45:01.601577 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" podStartSLOduration=1.60153845 podStartE2EDuration="1.60153845s" podCreationTimestamp="2026-03-20 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-03-20 16:45:01.598116579 +0000 UTC m=+4801.291813429" watchObservedRunningTime="2026-03-20 16:45:01.60153845 +0000 UTC m=+4801.295235280" Mar 20 16:45:02 crc kubenswrapper[3552]: I0320 16:45:02.594917 3552 generic.go:334] "Generic (PLEG): container finished" podID="04169968-fd5f-4bb8-bbf7-d46288d7eabd" containerID="eefa009e47dca525a24d2aa8b7e6ed604829549e39426171b57a3c905d52aa12" exitCode=0 Mar 20 16:45:02 crc kubenswrapper[3552]: I0320 16:45:02.595134 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" event={"ID":"04169968-fd5f-4bb8-bbf7-d46288d7eabd","Type":"ContainerDied","Data":"eefa009e47dca525a24d2aa8b7e6ed604829549e39426171b57a3c905d52aa12"} Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.840709 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.906719 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/04169968-fd5f-4bb8-bbf7-d46288d7eabd-secret-volume\") pod \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.906870 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qjk4\" (UniqueName: \"kubernetes.io/projected/04169968-fd5f-4bb8-bbf7-d46288d7eabd-kube-api-access-7qjk4\") pod \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.907253 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/04169968-fd5f-4bb8-bbf7-d46288d7eabd-config-volume\") pod \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\" (UID: \"04169968-fd5f-4bb8-bbf7-d46288d7eabd\") " Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.908155 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04169968-fd5f-4bb8-bbf7-d46288d7eabd-config-volume" (OuterVolumeSpecName: "config-volume") pod "04169968-fd5f-4bb8-bbf7-d46288d7eabd" (UID: "04169968-fd5f-4bb8-bbf7-d46288d7eabd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.912222 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04169968-fd5f-4bb8-bbf7-d46288d7eabd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "04169968-fd5f-4bb8-bbf7-d46288d7eabd" (UID: "04169968-fd5f-4bb8-bbf7-d46288d7eabd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.914856 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04169968-fd5f-4bb8-bbf7-d46288d7eabd-kube-api-access-7qjk4" (OuterVolumeSpecName: "kube-api-access-7qjk4") pod "04169968-fd5f-4bb8-bbf7-d46288d7eabd" (UID: "04169968-fd5f-4bb8-bbf7-d46288d7eabd"). InnerVolumeSpecName "kube-api-access-7qjk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.927162 3552 reconciler_common.go:300] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/04169968-fd5f-4bb8-bbf7-d46288d7eabd-secret-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.927207 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-7qjk4\" (UniqueName: \"kubernetes.io/projected/04169968-fd5f-4bb8-bbf7-d46288d7eabd-kube-api-access-7qjk4\") on node \"crc\" DevicePath \"\"" Mar 20 16:45:03 crc kubenswrapper[3552]: I0320 16:45:03.927221 3552 reconciler_common.go:300] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/04169968-fd5f-4bb8-bbf7-d46288d7eabd-config-volume\") on node \"crc\" DevicePath \"\"" Mar 20 16:45:04 crc kubenswrapper[3552]: I0320 16:45:04.619863 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" event={"ID":"04169968-fd5f-4bb8-bbf7-d46288d7eabd","Type":"ContainerDied","Data":"ba3cd8fccd17a9eb91f4bcefa85f6ce575aebb3a67d3ed30b16f8471f343fe2b"} Mar 20 16:45:04 crc kubenswrapper[3552]: I0320 16:45:04.619941 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29567085-qj6l6" Mar 20 16:45:04 crc kubenswrapper[3552]: I0320 16:45:04.621775 3552 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba3cd8fccd17a9eb91f4bcefa85f6ce575aebb3a67d3ed30b16f8471f343fe2b" Mar 20 16:45:04 crc kubenswrapper[3552]: I0320 16:45:04.697500 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4"] Mar 20 16:45:04 crc kubenswrapper[3552]: I0320 16:45:04.704661 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29567040-27kr4"] Mar 20 16:45:05 crc kubenswrapper[3552]: I0320 16:45:05.466686 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2" path="/var/lib/kubelet/pods/4d5a577d-5e87-40ef-b5c2-a3303ec4a2b2/volumes" Mar 20 16:45:08 crc kubenswrapper[3552]: I0320 16:45:08.431174 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:45:08 crc kubenswrapper[3552]: E0320 16:45:08.433219 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:45:21 crc kubenswrapper[3552]: I0320 16:45:21.438817 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:45:21 crc kubenswrapper[3552]: E0320 16:45:21.440476 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:45:28 crc kubenswrapper[3552]: I0320 16:45:28.081720 3552 scope.go:117] "RemoveContainer" containerID="c857bc69d61890ea5276f45fac8c4684e45fa41a051ffecccfced34dd846e99a" Mar 20 16:45:34 crc kubenswrapper[3552]: I0320 16:45:34.431100 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:45:34 crc kubenswrapper[3552]: E0320 16:45:34.432297 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:45:45 crc kubenswrapper[3552]: I0320 16:45:45.432151 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:45:45 crc kubenswrapper[3552]: E0320 16:45:45.433814 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:45:57 crc kubenswrapper[3552]: I0320 16:45:57.431809 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:45:57 crc kubenswrapper[3552]: E0320 16:45:57.433992 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:46:01 crc kubenswrapper[3552]: I0320 16:46:01.499211 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:46:01 crc kubenswrapper[3552]: I0320 16:46:01.499978 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:46:01 crc kubenswrapper[3552]: I0320 16:46:01.500018 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:46:01 crc kubenswrapper[3552]: I0320 16:46:01.500059 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:46:01 crc kubenswrapper[3552]: I0320 16:46:01.500120 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:46:11 crc kubenswrapper[3552]: I0320 16:46:11.437644 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:46:11 crc kubenswrapper[3552]: E0320 16:46:11.439581 3552 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zpnhg_openshift-machine-config-operator(9d0dcce3-d96e-48cb-9b9f-362105911589)\"" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" Mar 20 16:46:23 crc kubenswrapper[3552]: I0320 16:46:23.434015 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:46:24 crc kubenswrapper[3552]: I0320 16:46:24.459771 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"8712ab98b2464c5a5a4240ecc06f6bba1ae50f8eff72e439fa1c4b87d5c5000c"} Mar 20 16:47:01 crc kubenswrapper[3552]: I0320 16:47:01.500573 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:47:01 crc kubenswrapper[3552]: I0320 16:47:01.501420 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:47:01 crc kubenswrapper[3552]: I0320 16:47:01.501461 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:47:01 crc kubenswrapper[3552]: I0320 16:47:01.501530 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:47:01 crc kubenswrapper[3552]: I0320 16:47:01.501600 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.122938 3552 kubelet.go:2429] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8smf7"] Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.123775 3552 topology_manager.go:215] "Topology Admit Handler" podUID="5a98cf16-3640-4d23-84ec-455302721457" podNamespace="openshift-marketplace" podName="redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: E0320 16:47:50.124160 3552 cpu_manager.go:396] "RemoveStaleState: removing container" podUID="04169968-fd5f-4bb8-bbf7-d46288d7eabd" containerName="collect-profiles" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.124173 3552 state_mem.go:107] "Deleted CPUSet assignment" podUID="04169968-fd5f-4bb8-bbf7-d46288d7eabd" containerName="collect-profiles" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.124396 3552 memory_manager.go:354] "RemoveStaleState removing state" podUID="04169968-fd5f-4bb8-bbf7-d46288d7eabd" containerName="collect-profiles" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.126097 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.151108 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8smf7"] Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.206525 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-catalog-content\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.206581 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqtkw\" (UniqueName: \"kubernetes.io/projected/5a98cf16-3640-4d23-84ec-455302721457-kube-api-access-gqtkw\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.207141 3552 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-utilities\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.309733 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-utilities\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.309832 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-catalog-content\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.309871 3552 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"kube-api-access-gqtkw\" (UniqueName: \"kubernetes.io/projected/5a98cf16-3640-4d23-84ec-455302721457-kube-api-access-gqtkw\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.310238 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-utilities\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.310309 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-catalog-content\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.334567 3552 operation_generator.go:721] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqtkw\" (UniqueName: \"kubernetes.io/projected/5a98cf16-3640-4d23-84ec-455302721457-kube-api-access-gqtkw\") pod \"redhat-operators-8smf7\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.444363 3552 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:47:50 crc kubenswrapper[3552]: I0320 16:47:50.944140 3552 kubelet.go:2436] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8smf7"] Mar 20 16:47:51 crc kubenswrapper[3552]: I0320 16:47:51.344549 3552 generic.go:334] "Generic (PLEG): container finished" podID="5a98cf16-3640-4d23-84ec-455302721457" containerID="9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699" exitCode=0 Mar 20 16:47:51 crc kubenswrapper[3552]: I0320 16:47:51.344600 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerDied","Data":"9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699"} Mar 20 16:47:51 crc kubenswrapper[3552]: I0320 16:47:51.344867 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerStarted","Data":"6150165f59fd18bd2310758269b29cf82066c15435d004d8afdd78f6c74e1f8c"} Mar 20 16:47:51 crc kubenswrapper[3552]: I0320 16:47:51.346899 3552 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Mar 20 16:47:52 crc kubenswrapper[3552]: I0320 16:47:52.354461 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerStarted","Data":"d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325"} Mar 20 16:48:01 crc kubenswrapper[3552]: I0320 16:48:01.502344 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:48:01 crc kubenswrapper[3552]: I0320 16:48:01.502992 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:48:01 crc kubenswrapper[3552]: I0320 16:48:01.503019 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:48:01 crc kubenswrapper[3552]: I0320 16:48:01.503045 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:48:01 crc kubenswrapper[3552]: I0320 16:48:01.503088 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:48:29 crc kubenswrapper[3552]: I0320 16:48:29.688115 3552 generic.go:334] "Generic (PLEG): container finished" podID="5a98cf16-3640-4d23-84ec-455302721457" containerID="d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325" exitCode=0 Mar 20 16:48:29 crc kubenswrapper[3552]: I0320 16:48:29.689651 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerDied","Data":"d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325"} Mar 20 16:48:30 crc kubenswrapper[3552]: I0320 16:48:30.700181 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerStarted","Data":"76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2"} Mar 20 16:48:30 crc kubenswrapper[3552]: I0320 16:48:30.726228 3552 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8smf7" podStartSLOduration=2.014873855 podStartE2EDuration="40.72615892s" podCreationTimestamp="2026-03-20 16:47:50 +0000 UTC" firstStartedPulling="2026-03-20 16:47:51.346565769 +0000 UTC m=+4971.040262599" lastFinishedPulling="2026-03-20 16:48:30.057850834 +0000 UTC m=+5009.751547664" observedRunningTime="2026-03-20 16:48:30.722309597 +0000 UTC m=+5010.416006447" watchObservedRunningTime="2026-03-20 16:48:30.72615892 +0000 UTC m=+5010.419855770" Mar 20 16:48:40 crc kubenswrapper[3552]: I0320 16:48:40.445464 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:48:40 crc kubenswrapper[3552]: I0320 16:48:40.446126 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:48:41 crc kubenswrapper[3552]: I0320 16:48:41.563385 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8smf7" podUID="5a98cf16-3640-4d23-84ec-455302721457" containerName="registry-server" probeResult="failure" output=< Mar 20 16:48:41 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:48:41 crc kubenswrapper[3552]: > Mar 20 16:48:42 crc kubenswrapper[3552]: I0320 16:48:42.778443 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:48:42 crc kubenswrapper[3552]: I0320 16:48:42.780363 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:48:51 crc kubenswrapper[3552]: I0320 16:48:51.542241 3552 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8smf7" podUID="5a98cf16-3640-4d23-84ec-455302721457" containerName="registry-server" probeResult="failure" output=< Mar 20 16:48:51 crc kubenswrapper[3552]: timeout: failed to connect service ":50051" within 1s Mar 20 16:48:51 crc kubenswrapper[3552]: > Mar 20 16:49:00 crc kubenswrapper[3552]: I0320 16:49:00.563005 3552 kubelet.go:2533] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:49:00 crc kubenswrapper[3552]: I0320 16:49:00.648649 3552 kubelet.go:2533] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:49:00 crc kubenswrapper[3552]: I0320 16:49:00.696231 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8smf7"] Mar 20 16:49:01 crc kubenswrapper[3552]: I0320 16:49:01.504034 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:49:01 crc kubenswrapper[3552]: I0320 16:49:01.504139 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:49:01 crc kubenswrapper[3552]: I0320 16:49:01.504179 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" Mar 20 16:49:01 crc kubenswrapper[3552]: I0320 16:49:01.504251 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:49:01 crc kubenswrapper[3552]: I0320 16:49:01.504332 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:49:02 crc kubenswrapper[3552]: I0320 16:49:02.570661 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8smf7" podUID="5a98cf16-3640-4d23-84ec-455302721457" containerName="registry-server" containerID="cri-o://76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2" gracePeriod=2 Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.256305 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.408708 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-catalog-content\") pod \"5a98cf16-3640-4d23-84ec-455302721457\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.408859 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-utilities\") pod \"5a98cf16-3640-4d23-84ec-455302721457\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.408955 3552 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqtkw\" (UniqueName: \"kubernetes.io/projected/5a98cf16-3640-4d23-84ec-455302721457-kube-api-access-gqtkw\") pod \"5a98cf16-3640-4d23-84ec-455302721457\" (UID: \"5a98cf16-3640-4d23-84ec-455302721457\") " Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.411577 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-utilities" (OuterVolumeSpecName: "utilities") pod "5a98cf16-3640-4d23-84ec-455302721457" (UID: "5a98cf16-3640-4d23-84ec-455302721457"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.431683 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a98cf16-3640-4d23-84ec-455302721457-kube-api-access-gqtkw" (OuterVolumeSpecName: "kube-api-access-gqtkw") pod "5a98cf16-3640-4d23-84ec-455302721457" (UID: "5a98cf16-3640-4d23-84ec-455302721457"). InnerVolumeSpecName "kube-api-access-gqtkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.512899 3552 reconciler_common.go:300] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-utilities\") on node \"crc\" DevicePath \"\"" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.512935 3552 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-gqtkw\" (UniqueName: \"kubernetes.io/projected/5a98cf16-3640-4d23-84ec-455302721457-kube-api-access-gqtkw\") on node \"crc\" DevicePath \"\"" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.562233 3552 generic.go:334] "Generic (PLEG): container finished" podID="5a98cf16-3640-4d23-84ec-455302721457" containerID="76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2" exitCode=0 Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.562522 3552 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8smf7" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.562460 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerDied","Data":"76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2"} Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.562858 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8smf7" event={"ID":"5a98cf16-3640-4d23-84ec-455302721457","Type":"ContainerDied","Data":"6150165f59fd18bd2310758269b29cf82066c15435d004d8afdd78f6c74e1f8c"} Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.562891 3552 scope.go:117] "RemoveContainer" containerID="76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.607464 3552 scope.go:117] "RemoveContainer" containerID="d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.658622 3552 scope.go:117] "RemoveContainer" containerID="9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.698444 3552 scope.go:117] "RemoveContainer" containerID="76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2" Mar 20 16:49:03 crc kubenswrapper[3552]: E0320 16:49:03.702022 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2\": container with ID starting with 76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2 not found: ID does not exist" containerID="76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.702110 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2"} err="failed to get container status \"76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2\": rpc error: code = NotFound desc = could not find container \"76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2\": container with ID starting with 76dfb12f9b9110227abbc1c30c943881a19b57c5c8504e7918b3c408b4b8aad2 not found: ID does not exist" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.702131 3552 scope.go:117] "RemoveContainer" containerID="d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325" Mar 20 16:49:03 crc kubenswrapper[3552]: E0320 16:49:03.702826 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325\": container with ID starting with d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325 not found: ID does not exist" containerID="d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.702863 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325"} err="failed to get container status \"d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325\": rpc error: code = NotFound desc = could not find container \"d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325\": container with ID starting with d61b3e3792aace50564cfde141d9baced1d5fe20165102b7d27c11eeefca3325 not found: ID does not exist" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.702877 3552 scope.go:117] "RemoveContainer" containerID="9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699" Mar 20 16:49:03 crc kubenswrapper[3552]: E0320 16:49:03.703852 3552 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699\": container with ID starting with 9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699 not found: ID does not exist" containerID="9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699" Mar 20 16:49:03 crc kubenswrapper[3552]: I0320 16:49:03.703917 3552 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699"} err="failed to get container status \"9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699\": rpc error: code = NotFound desc = could not find container \"9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699\": container with ID starting with 9de45f94bac11969ca139dd146754616bd7e0ce670f5ead2a1c94aa86f6bc699 not found: ID does not exist" Mar 20 16:49:04 crc kubenswrapper[3552]: I0320 16:49:04.438111 3552 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a98cf16-3640-4d23-84ec-455302721457" (UID: "5a98cf16-3640-4d23-84ec-455302721457"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Mar 20 16:49:04 crc kubenswrapper[3552]: I0320 16:49:04.535628 3552 reconciler_common.go:300] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a98cf16-3640-4d23-84ec-455302721457-catalog-content\") on node \"crc\" DevicePath \"\"" Mar 20 16:49:04 crc kubenswrapper[3552]: I0320 16:49:04.572369 3552 kubelet.go:2445] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8smf7"] Mar 20 16:49:04 crc kubenswrapper[3552]: I0320 16:49:04.581564 3552 kubelet.go:2439] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8smf7"] Mar 20 16:49:05 crc kubenswrapper[3552]: I0320 16:49:05.450322 3552 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a98cf16-3640-4d23-84ec-455302721457" path="/var/lib/kubelet/pods/5a98cf16-3640-4d23-84ec-455302721457/volumes" Mar 20 16:49:12 crc kubenswrapper[3552]: I0320 16:49:12.778925 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:49:12 crc kubenswrapper[3552]: I0320 16:49:12.780561 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.778006 3552 patch_prober.go:28] interesting pod/machine-config-daemon-zpnhg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.778604 3552 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.778646 3552 kubelet.go:2533] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.779618 3552 kuberuntime_manager.go:1029] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8712ab98b2464c5a5a4240ecc06f6bba1ae50f8eff72e439fa1c4b87d5c5000c"} pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.779789 3552 kuberuntime_container.go:770] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" podUID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerName="machine-config-daemon" containerID="cri-o://8712ab98b2464c5a5a4240ecc06f6bba1ae50f8eff72e439fa1c4b87d5c5000c" gracePeriod=600 Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.925934 3552 generic.go:334] "Generic (PLEG): container finished" podID="9d0dcce3-d96e-48cb-9b9f-362105911589" containerID="8712ab98b2464c5a5a4240ecc06f6bba1ae50f8eff72e439fa1c4b87d5c5000c" exitCode=0 Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.925987 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerDied","Data":"8712ab98b2464c5a5a4240ecc06f6bba1ae50f8eff72e439fa1c4b87d5c5000c"} Mar 20 16:49:42 crc kubenswrapper[3552]: I0320 16:49:42.926023 3552 scope.go:117] "RemoveContainer" containerID="29fd15324518af021457b8c123b278c0f5ed168e31a2d810914a162b102210a6" Mar 20 16:49:43 crc kubenswrapper[3552]: I0320 16:49:43.935339 3552 kubelet.go:2461] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zpnhg" event={"ID":"9d0dcce3-d96e-48cb-9b9f-362105911589","Type":"ContainerStarted","Data":"05f2c6f02b218ecc2fdc86e5dd175967716c0ade7b22f08cdee53044b464dc70"} Mar 20 16:50:01 crc kubenswrapper[3552]: I0320 16:50:01.504975 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-etcd/etcd-crc" status="Running" Mar 20 16:50:01 crc kubenswrapper[3552]: I0320 16:50:01.505828 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" status="Running" Mar 20 16:50:01 crc kubenswrapper[3552]: I0320 16:50:01.505871 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" status="Running" Mar 20 16:50:01 crc kubenswrapper[3552]: I0320 16:50:01.505919 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-controller-manager/kube-controller-manager-crc" status="Running" Mar 20 16:50:01 crc kubenswrapper[3552]: I0320 16:50:01.505996 3552 kubelet_getters.go:187] "Pod status updated" pod="openshift-kube-apiserver/kube-apiserver-crc" status="Running" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515157275311024454 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015157275312017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015157262653016521 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015157262653015471 5ustar corecore

OOom-EO詻{PyTuda/]dĽk_#JQë?NY5ME/ҝJhbfVξtn(>[b66mH،L8~[4dvj4bU~Dǒ\jl[/Ԃ[\Me$VRJCʲşbv.t%R ԍQu:5OB{$3@P:;3N*4-i␭Vvn/0BWi&B6PNR&kDCZ5?3I@ >X0 t!&TapX%U5xT!:3/R!:Zq WYwνy -!֡-^)"]D.hrEjP\% R1bsĹ X*b3|%7=wA6hK3)%?D.LAJZHF QbVY^yϞ~ܞ^AM2 eHk8ٔXp3Kt614*J:Z4ˆ@ˮ"p߳b=e:LZmrk8hoi/pzvJ^D2dQ@j'}bVET_WoRМT4b)ItfsO5R߾vytet?.P̨'1Tkt;WIC}5GI;qF%TF:dڊocu+9WKsˏWU C36RS~3=|9{A̴F^u`*C'hYm81?V;\sYLeW[;}{{j{{\Q̛1E]z90hedH\Ia65CVPE6ŅcQ2Q.66'dF%ENvq.ܽZyqOB*w//w0{J1o&_(iQNe{n:>{6$%Zc>6;B{iSޚ;uKO`ӒnG9TOw9G̋zx \`:8In?䷢w;@8չFTLNVScyͳq>$ؤqM? OO{sE33@P NZ[TUp8^)sQ^ŧw?U or!0"e%%I@擊hMԙT%21d-Ct|򙫝`$}Z|K)+PdTr[g{F1a[Hs{yNeN ^cOlّd;Auk#ٲؒ3A#-w]].$f:0ɐyeyl)Lo1tbb|1O6]x MS:ǦK4F>Z ǻ) " b~+Sd1EaZ(d6kQ-Q'-96 t½6o]e -{(^1MsJ> 1 K\+c$yѽ- oԨ?_5š4iC("6"?9N qeoHem8{ Z&Ą.3W FM}etdFA>H, R FXFGh$jwg+6cMdf71~iRGcd!PwR@t^,0RT1eBm!Q2qb"7š\F{:Ou@Xl$//ۨ(}1a%[ J|Bw`?[>SVFtLmkN[m'XL*ø7~ py$6o<}'wqԤ ŗzDmbILۺ[W} ݤ:-qu{92Wy:|"1nO֋xʀWJZ9Ԝ&.lMY0ܜ7p~r'`wWG Nv>M}ƂW,-%X7$0M9=+m؍"gumӍM}lf@;on9kj6)xՒ#jkmdh4n#q%_̧Ɍuk~CChp-b3Uau\8DZlI94CPo=2*B1.b:BЖe(.- n+qhm-A *3cb CblPCS[ePkĤ9ϭ94H{iD5p=L";zżpY m+ _N^n^M\T}3W\c5k͜+gM\5vt}o.[g +@[;+!kJ m=qZ|B`Pe~`N"Z&R E%ʲR.Ɍ(d TE KcL*8i.4QVpMܬk\쁛QV d$ LF8+LIxϡM1 yC)^PȂE`DWyY6D6ȝhnP=xֽ5_61HQuHX(0AMH`P6ܚbvAi̒M̫YIwCj9?=/ru%崓SLK Lfh<) xa~?!Ҿ+VkS2tryS!t묉(ڣ;ЅVbdRIu#ECe7 E^˅/^yH;ӂ5J:#[0թ8n]S WXOr8W*k OHKV-$MоIua!M|^'\nr)y2$n SzI48P(("3TNedMXIaesP8T˸VR90h*u VfWpUmvP@Z}rbuۻn*2T#֚`#Ŧb9fl*=2 ;lAb:']EA[`Vun*ڧ@!NvROzo.y4: )huS fh{vV UhdI 2(saݵ5ME|`hC_8, 02['TQښͰ*Y3;˝X>ƺ 12S32砸hOF!l5d^D|ʚ7 IqkJfhċUQx5rb/Z!2"98r"*d\c™ r-R6Z cSkhuw>y~R ׉w %Ũ 8DZN\ %Yq)ɊP:()8L"932OuRO\q]rSmz) hkM G `!a'`O"q`E$ӛt{4/.\q6Wf"PVci<_{<4fGn/#X` t.#̄h0YbzH6Wy4qoݘ.*svM^4׿V.X]ˋ/9md6ඓ >x#A~2};[R0KøbBV׋xэܖm~/hX6o㷽 wd^Γ-B03,R/iW4"Bk/P YŤs^ٔDwqh2@r3|)-fBr Pܺ$B!s DB(9+2Ѫ=ƢD1M/'E|IHN>wgN$ jNEs pFhz2+DPM&`bH 1 0(r$r>`mId$+DQ6[oN憏Ͽa&~@r_ y-DD̓WxF nTYU&ℚkf:b7RZSJ ÆOS)Z'0 )D*z3Ez 1@ D@_q/E?^M_ڑ{@mLDa}r'@ӶRȵڨc&@o_Eܡ 3^p2u}% PD9yziKoPz~ߞ|gm7=WoAօ7׶+!Nѱx1޲#!,ƒuG\ XYǀyx>鯛샒Bg[$i+A0 ]Cu 29$*3ϒLڦu C1{Xоџ(Y3@ׁ!mz5kޒE=W4 ;r& Q)*wttL&:&TE (`c=z/Tq;H`!=H|Nh:Om WF48 b Vpr'3RŎaqpX\N++ѲJ°uR.O'W8}*:ekVV&\iޅrb+Ah9Ơ׺1|SNY"{U(%wV$vH|M4GJښ5XU+DZB̌XG|Eox{l uCjyG쵳vpp~{v}|YoNuT/.LKlT*,+j՚̥Z)6/ލY>d=G[1ǔ#jZvr* C ^;}'qoigxXO+j]цu7@:Q}ok@rσ6""в̃ uplCC>f %o ?fOfE^ޕvA$R\OoHo~q3ĆCOr =ޥZX*S%BB{ESȾ lH)Dq}:F!;2y1Eϕu'k-aYQ J$`wQ;E0ML8iG:6qXk80弌bԂeVP]bJG#邏43C k%"+x[u REg PLJGyXg*x˩ͺ@=M@E,-N2:Е8H0evvleUe<%#9|4vu=XWev|uoez0ᇢD2 *r8ʃHAdh 53DIρ(>xN} O 0B2}Hx8gbGFEAɿ֓)Gȩk\x褞ςf-Y%adiS3;adfyY"-K}(5H=Xz+1QJ`2 PAϜ?o//0[i>){wޠ#kO+Ax\$utTd*EVvE85*V{[~&fޟ- $٬TpU#\0|#;%ӗ}/`Wy1= Kl9x["c׌g+p ⍹9㑘\3<拓^Rɽws,Ʉ[&$Α[ z]VEȁ/!KX3bdUGε)Bո괕;H<9!mרىGØmkKG# S6MqՎT'xO{31i6i*P1? ꤦY0o?Lr9;um)!5}[!ȎE)` bBv 7 W 5h^7SYFtxDG[/j j{L1'9TP[ 9BBG%[>2/FTy~ĽYZFj { Z\BĮl{edP]чTZEmCQ{}8ۼnk`;h5 TT0c!߾ LǎZS%R`MT1BQC $(#!!/2͂$[r Bm!9jֱ8ych[KG=rmיyW3Rn(%x_YzO W45TQ̪bKµ4ޡ-=^th4[k]%c Ќ.n=(6bw.y,Z(g kAv :OCŇi1*77Ch"e'D+'(-jcC)xqIRbȌ\k&ZΔ}#}:`:: :LM" jc%)V{^0oia `vC0xg6 ,2ZL{UM4KId%kUÚ!Fx9 xY)X-i2cjJ&o[' ;Uvj0I^S) 3G`XVJ6wVm䰽 zY5#FA/.}[ !yw(bdUgC@Kj=o09aiq`$Ftl*bk'*N$/GƜlrڸ^0swm&9uH7; :M/.m,~toHhzK Z!z RWWuM5/|4l?.yk/d?oz۩:٫Yn#TcyS%Z'E{G3 ~I=+ϝ|Iu6]ĽemqtuϺs4MM{ 0L?ꌙ[j. zw*+٦5Oϟf~{AU+f ,hseȓuh{toCC:9wtw^~\sZ&4)9ptkBL~ԙ3Q:?'Ʉ3`\x{c gH.dgaQ}v*7yXmo1?&7ly\w} e|Q_F°\-bW*X m1l]ؖ-ܳU*,敝\fRQoyt>ߔ[ץ]eq!D[›On4a'=ܓ7? EczH_hy4؇A3`Dɢ,yo$%he,ZZ$`@d0/"〕:?= %v6õ/K3ųW^7TE~x=0zry>szз^嗿'+`U!}'w~O]Ϋ???hY5<`pDbKl3Q%x鑌FJ4Mr- زٚwfsPSѷ 8vҝ>A^QWgWO;/D;bt [)бd(&`oX! `E[H%>9E}e]wZkB*ncxon掵@Nu-Kp}qV, qԎ.^h_!F$h,wޅ CMrvD x,+#yH'ū'ςv"(iH-BL]K U& ^}yu="P'fӨ0-Ee!DHV!i^AdFx1e8.mgoS fgyVk\$Z m?pU-ĉ~ypno vX߹kYofh3Jevev+[bQD2H<&\B Y*+#:;w6 6 \$#\cO0ESJI 1^sy~r|@sqZɃiW,3cCݓaս,<'9@BI/ɒ1"/$}itk"^ArR2Ons*KNO{_[b9),hLm!$5V%հ3cHu~, p+T-n)Ő*l΅!d-B>w+m1uUWғJC @V0fu6,ik9B|_~hVNʲƪbDfiR՜EROҧa?_'O1賁u<2wƬ-B6=OnYk@\&{zD6 R3½òVa=փC[RP-AJM6Xhh{yq~uv@ZC092:HbJP9rN _:KtdS}p.:&VP62 DjAWa dIW-!є|V*% 0QJ6JgM4ՙr,YMcܮ'+SL>VYK['m&XQhk (dJ>фP6ߤ;âj Exu$mS.Iēa|L[ (]oL+!J#FDVI׷}+=JpmS6HD UQ)hXI%%5J/^.iP(#/fYYpO@Qf{<}!R@ɞjo"9_FEaCtχs07 / .[^ E<*} d0$9ȽnrZls,Dۇ"_SRe- ɥS"9V5jMZ*p<_"I_:gExV5 x`eמN瓩$II1A(hX07gYV7!yxqu^w{R6,֧wvmH> ݋j'CD<(r7Z K\SO}[pJ]PjG*JgIj*Mo- JH/B'90#?g-'RVd6AfF8'6B;ŸEЫC!_N2ffB"6ShN ߱1x~ lhjxXH58HRu'tN1VthĀBdzl,/Oʓ? /Y^CLҮ kiY:fsBu*%^8R&7;W9H4 :i9^wvY(0_U1rpBegB3K¤Xq$:25X4fzȃ˷x Hӱ\~i^4Ш{c:kdȣPmwi+uock 2:qnߐC Q9 #4cNUX Vϣ`Sk>߰γЅ_&1 ;!a@ҬR Y1bG\w*DeJ[0-ZXi_Yێb4}vsg=QWڅJ)j*6ܯ6K%4%R:r:0\͔@:6IY~fm+#ٓ T[r.>. MF "BFk͘b+R͑C  3wE.j.(!1'Y~6`8VNSrHy.chFj[IzSZ>0Qq0~WNW-i{cJjnar6t+krsl$'ƠQ@M,l21FN!hAM]Idl^h^Ey/8ak|wl,VIb&tkƃ)n.nqvU)UL* _dYD1$I\bih{TnLbs&3Pf#tH &dߗW'F!uӫ.[R7 Ym zR&!߇w"S?'xu>ޔ_^~}xGӇwR}= aaM 2Ap&h'a{'G=ou÷쿘?_ KL=hijh877lwϯeydP~m> GD) )Yw^@Y \Lk.q&V=qlyX*̛~ڗ{9pr+b{n@JW_>:za-.ݷfAn?!} 9t VSֿV; ei:nS$BX{T1 $9SNT4 FzizaF>1_|u* .JoL*P YY'^.7S%s8oAG8⍥-K0bj=>wnXqh{*TxfD/ }%I \:"fq,w}1~Y8HxkthFh!{+%:Ur%c+uw&$GDnTHYF鬉:SYuc*E4#Ŝ=Øδ%!,^ڇ?J׮/̦y:A1賁uڑ"r@":[Nc&GJ! --p}Ʋʝp0Kz{9sgYxIiy)s/g?Q=Py+CqR?zCvrs!80)cCgέ!=|o {Vk/3f})҂a8h#)wUV`WтV [NxUG^3aAxͪ^#|̯QYE(S<ɕΊ,teZ hKMFyD Ac, R QYlf'H,Ca_Yb6aZψOe] ѺԇQ+'0R_u2%}#qa"FFӴK]3R JF>TYd Żk+`}IgI}jmD(DU2 k2i>hrο ѳ@ƞMd r!(dkeIB"lő2ri"e!䔓Ba-IYB)sB̂%^-Y8D0Ziͮ)Q@~-=r -fM.p0k!y#gt!"`SI"Z >&2#OrdJ&UQ'H%$!a6Y^ ){S:DWYCsb]&z0Kv0y>5%4Sr퍿jock xP?̏݊ϧ( YdBdzdeF654c+ͻf EkUW#{b*cIX>PJN*)TLʤ#M<p}$5 䏒\"ZJ2&xfɑ%'_gmJa nRB*3¸DĹ*ShYvY)(Fh@ڠ b!qU"oV-9<[b˔5aq1vDO㪶ɡ/Ǯz7kM'4{\\?Qoa}O/OQdlTXP>Ɛ~af9t".qw;*2LX9Ua#)I1_0^kbk֎(, #~'_|M=Fb^0NX#f^Γ8ޝJWg'ޝsXMZ TT6]'jF7+mhN,  2XEԖ>iq+kE(bZ[펁YomnkjˬǦUӅ.v}O]nlEmsdy#%m.pqn![Z|?>|p jϛ+XnljUrRQܷYɷ%>O8sCm|?ʍ52+߿Ӑ =}|gM76h&ӋqN;d}H.- yQ!ε^ISs1͕%lS/{sv痦ک+,)lt~v悋YA8R(t1XR@E-tUW^8&gXiYib_8 xoFSΪq : sR=Vo(iT) Sa]jSYi큫SǾˬ1JBlTҘAbϧ N;|J֟~yufo/_=X }>0(Q`1MN!l@kI'%O\V!0n)F &#Bh.ls`*Kby6PAӐhUqYID*ЃZSkyLK7y67 Tz߾}8Y n89[iؽ>0yVQJqs`P<(}> ;$ }"otY%Z-),8MTPe!)SqN3*՛$NI-h:v~ڛo5H/~SkzԲ2¦iq]|YwB~tI{\W"KߞrvoݎAޔʩRSS-i˺6orz㪢$GzC-uڧ~r>O#mvHP0(A4.h]QtJq$YE}Fhƭ}mڎOaX~)\n˥^W&z]7U]{/q؛s8~&͟wi6urݟoC?6}uajLn;nqr9kb[k~'+h ^ 4&+>kaZzzAmzhvy2vTmPee-4Q6Q@/9DT']8j=d^,ZhI4q1hփ1B8 2J:r.9P &W.vXY ( ZOLXDEl $CՖʼnGSrZuw~s/]j(7ɚ79 ؊yī AL{d# xMFbjStE ]*#1#1z $5iBZP6BJ*<*gu2ZuqkR2(F,nw( D",uuԭdZd=8 EUP$d$? R?s"OBsC.s^a(QФ][*ZRCX)E2ZCS%ö+v Rv2d"\ FAHְNVh|Bfp)"(ى:egLpU(uQ[I0TzGt&Kt 8r4Zhd!m/ZÒѐ UD^l,C~Ji&r u9RkN+]rtQ2gO#ADŹJ-H/"w@"ͱ85)i7֪l|DH2iyh %Oɬ%{!4 #RpIȶј;.ALlDh$֑vj$-ta >@GEsjHY). `#&+uTd,"0;<*rؕL+e&pmH|H+ϡUZUa:@jWyj 7t* a )Ӵm8w7u-)Gae E3w>&5O&s!L,sGcQO-59>bv-ÁmoWHCF) |zj¸<˻jyࠓ,,]]oMsMߣE^Ր0-qMv A6)we0_x 6((&\7m8Ec.)AM0DKҼs8= 콲s7?62 i 1ߧX@OriQsO"rxBۈ)81Y[PBwW#`2V,s 4+q0<#jֆ1pWkR7o\0P2\JQ1*#ʗKA2kF3`=/hvX?1y+nоwIcLA$i !3TR% +<iHhO"{v?btQ#RS  OBSqqژ]μ;Al+2WĔ !D5wBG-+Z &}s`;fǚs} H@?E\ϽZj5P  sUBA%L9"Z7l"4xğO$n #:EmdRuG.`DЕZAr5L<Ճ ^*{_m$^ F:} ]n:sdF4$ڈdKL')*3J[ xؠl Vkxv.o>mz NLIgdByp$zzqy~+/Ae'"Ngi+h6 j&nF`߳yug6 5c4$?^W-ew!F8}*T=aIg=K`iZFv̬c<вVwi5;بF{w8MvM}sgM9)?Xh0QgZ `:3a!etXx㽛q 'Qk-1Œ-4u!+lMFJܙ}Iz*t,0,C 5YmsS5p?GYPyfu @ F[' gS2ysd'TiPѰqfd>sX`)y$6=> 9ғ6[t $XV1GRQ'c CP#*8 I%mIaL8=y6ABfoBu@J8yL?i۬ó% c26eåIQ#gZpǼOU?TxGBvhx95m1Pg :QGbQln&!Hk]6 Iaff 2׵F'Dޕ\Bcվ0CUFGmjk1`IǏ4) ﲑ΂ Yꋰ)< 'rH&l=Ha&֯ ԼzH#RYʘKSKq3$MS4$rT S&AJTՇ{;zD8 ȣ[r.X0@d @ 捋>Y)(V?O{ sܓG:yT)iIKc%]W։v*D))MLD<z~t&#튓z|HvR :}k@33-(eqAN'i&`}.%I0inh4\*ד4_ҟo逌>43q;iw烹^4_q.k_QW?4W>L/&w65ry;wJJ'oQ8Yo[ȷ?_|qE#i 88}2,I+[Q^̿}B)ysPX;Ez6(+ .`Fя?Lgp2xQܐ4;pQGiۆiDGt1vq J ނ 77^"GG$g{ \4^8HmS.5~:+`RGG&]#LD|-[Aepf?ޘv׻Z0ml,gO~n`O. W`axu-Xg pэ4b\KM |qMYsU@|yw0o]OF9Do`' 2XD ñ8I0O/>'Wm~FaueLUK ?6q,-b7rdT4&f覎2@B4hhAZ(ˎBƂP}Y[n [Vf! &375sO2!SgR%NahR һ)J@j0LO5gi7RY1Gku/GPJekps?ՙ|ͱMqԕNϖWQʤ{r( 5i 6,dd M_˘9_O)i~!\qdne |0D:G`jEKwpL%,LwQ7Hu6*?̆I݄R4D0J‡җ1+Ŵʉc;5&WV:B:CItf TR%ERC_{=Y98]LFo a)ᣵ9D!Ui,iϚ0߷- ɭ401:~ٗ5ɨ5hv%i#΂A)m"+Sc2L[hz[%m~-u"^f "EZBJ!$Zh8OձpF(C Ҙ3V>թM'{41XW2EReD#Zf$y}1[,gIs 01Ew['_mX#x6r%R51BPfYqA&CHɢR9U{t<$sF0D,LLY9Ɨף=yEUѠ~L敡)vhC&4u>?HJ4uP ueg`F!*hsjl_b$S^Օ@9dz.P(룟3t"i)sN5jLUyOqq!%%`4Eqt&>Y*JZC?Pmݧ%2&tU.ôbX.Y`ʫa/_,#߁_v*?nAtwq L("\TYFh/_4*?l5F_ا:]ЕZfL2A41!Or H[ZpQ0}Vgmf% QF8J" d>Gi]VW;̑b}TrL:P MI$xAL1Lw<C{j%9H0ˆftIJk 'CMY^PR42z _TiB`jᶎ pY[:ϩ2~6MlA}$t12[)o:^gRE as 595͎ZQِRf$iR%I`QhdJH 09J{Z)etiQrEy'~i b 0f(jaa]1ig{/q0i[\|e7l?[`fգulk "T) K 4$9D*۵M䵠 DT ϩTIm1뷶[RF%s M<nKI Y*ǩgۡkkr B wHt8 RY}ţ]l/1#E@,]b RT"!&xcE_xlpBJ3JJ#Z1wT ux%2 Ɨ^^sF摶U/5=Dn޽D𚰠JJ,B%t l)e[[E -rRE?"vÙ\VdͶ}A]6࢖R jIXF!G|ks ".DzEDpV>*;LFݱe |N`$ˎ:Gⴷ}svoM/Q8QXYX!QU+:~j|kқ=0_J30 PA 5D5 a?)=L}uam9 -/,_OnV i_GKlvn拼ZN+~P߽YVl[]KپQ_P\iv8;\]ꃿwKjP7R_]ٵ_!r! |Q|dV@"vJx4@KU۲mB_ԙl[aVC{\GnYI\K(qlUb#ߞ8R)i4s~,,gO"7?k[u/h|;겿SUҾ)yNM|U>VvPj X>YJԚ/e|.WK[*ٶ=܉nUU)J[}1>kP-g,dZVo7`'7nrBM;6k|3_9hX$p'jMlOZ-°S \ SmPI(}<ϛz4~M2 IOv!y5M ibt$ -y/J”`1NxhrYHUEaFE';K0ܧ? _4NO. >boҊvRܗgb4 L>˝mص{߾zGֱ |xtuLrsݖѕk[j>wWӭuzP]UH-ઽʾ{^!q\Ɂotr9>mpOa]:X =ըTZMDTK}v=>~to;3+!VeΆҵ5Kc@_7U'%Ƣ s`k^ǯ>.Zjhj)Wr>QzPEbsz!趘},Rw3K9d0;G5z e JbH뺯DGGpLT Y7$U ҙZH>z}ҊhʳF ?zD7ҩ䐢E:AH۠Ar1Z׼-8 1(L`p7l=6VW 6M ]uOin{EjbH+J4N@o|Q+k1) Euu6@HK(|GdxlW3sČ0 :FCQ;S[+_T%U ɬF|0d/l1#NZb~78L#e5D*43X[W 0n>|#^H7u^u *,k[fvTj鰄r `k̐9]B3SKnn4ktxxpXS0CM׌FE7@t.ڽvI)v\i188Zu6 + Ё~hxWeE yF~8OWf*Pξg, 0z-6g}eU):h熩#`#s4t$K|am4bw?]i;:!`I gBEAQc +Ð_LT(]RDCjɄS+j׹|3d@+{*dh۫Еm$h%hyyW&!U* J _V@D5b tVxHрy%V׃bMݐ0d@U5uA7kp )s T*q+JWG bz. *Xd hH(#`yV[I6`S`n=X)*55`@"fO$#9k++ʑVAj=XWՀoay"F`w|5! Jnh`4`V؟< Vśfe(S Fۥ;:ho&lDqzjN_8H5ks)q!82LJZλ@ 9G.SZ6XZ\(+[S¯z1UU7A:Q,9zF̳sV)t2ruHR2 &dfjek93Gs[/]o*D订79a EX00bj ٠rFށwLV>Ul>l_cWߑ? )Wd@GtP[[sKTU@K@71eVQtQ+u0ɶsc#ӨT I7:|L;=(a0&֏, ฦMTM>rss7ة vF ]3fx ͫ\#]*A C\*FDM~ޞ^^xƒ]=)Zi4;dj>#~! b4Iz~^>CIT1 UPL8Zu*TLk)D.vѶ]mmhE.vѶ]mmhE.vѶ]mmhE.vѶ]mmhE.vѶ]mmhE.vѶ]mmh_BWӦk=1R>|14@AK/Nz+FFQ'2S3^슾}}g<> ' q[AX:A8u#G#$  MM=nʿ/ր[MaP7?vE/D9Zz#YwaPZIåQԌL¥R+7t9-FPm@K&WYY奅~n ytF }<g;(<`@= *E!E/LY)BlOU~m?דG Ԡa 5 O5!SM ʶ*[_Yn_u&Z&(䭪x2{2;5!&d1J:f\ɪU0E0֜`Z*fBf7m۞}1i~lM )_^a$.D-,D| \O#r^g%Exoy:G|^vPj+7~zc^×:'m гZ?n>~ΐ%se2 "?׌ViE^B sfdo{~nbW4V1'([vw{uMú'ԫ 4m_qLRcy-&18@U6R[ˊirL % 5 #pf}w!(6fڱ99)Oz مUg4?x k%)x~躗5TQ]w<[u1xψ^|5mر Y 7)g߉ub^' 0EK6a[=G(i7 Ojvw'?>Ԁnow>=U~Mas ߠ?&F׽BУs65oFUMqN{2Neԛ_w`IҊbᮉop6Bj\%#Ƙ)89z|^QVO iV<,3ceiS'&Cri@Sk4pHag<`׀5a y8zGa|`!}R [mfpwSFÂq}vF%QgRBsm\,<ٷLnXTn:c,z=y<>YBtxsâ"r 7#Gbc=Z!ؖy1JڣL)"̓)%6$<8ǹҤ$hwl[~3MERt*+/*a*FgLà!*m3C50':Z`R3#"\WڼAKylwkq.U B?~de%4J?*z > A;S5Qr*ْSڷj%%9L6``m&5K;KI!;WX)v()lJZs,6=8La`¡ݾtHV !aJPmC5$t!1cJZ˨HD +}UQRJUQ`"ܣ7#DiZs%V$֛•{B sRX,JAZV>$]d4p2JL:nvhdӀ;J \"- -D".14b mB>dd+2긌TV E`t#Vj,GfH}ybS4rN5S; z,; ZE%Y,)2mA0-9jm:3LX~^0ʷ{|ۻk-LFl7"{m9h7.XpPWZO89 mjSjhE}gg+ A/N?`=q[=BVsis 4ulZ}}5*q+ܞEvӂ> 1/[:7x$JVJ7yTU*ET.tY.o/4[WE'l254KY]4p5?˻OB]擦khKwLYR|q5$Rk.&F0kXM}oVEuiw$%Q +|2el9=o|\4qkaj4},mA\ hwp\oAi!`kIt)j32H4!9 ZSDFjC 8WYWҋlsOX,!%gx\kI+Dx\p <Ύ͐,HgIlŒ8ۻy&5;˝w/XHhqCV2;;f͆SFl[NLI0`ոt]#m[Íok3&غ4Jb 8踵ZFBG(Bh(IƚL˟k_ecA=IWOC|gM $ e-b(LDDF]"d>:%9:[J}Bivu"1ٚ(%P C(+t%udGnvD654e|ɞȉܽ䡍Z[O6Rt*ȒYg@2p0oKy|QFvNgS#kx̻hY=on?ԝ&K&jX >^Q!2%9g:dϢR%(Ke)Mzz=kz~hIh-H8i'@3ز)p_"]TJRb@ܜ-֖؏[{Vٝ WJd.Т‰ Y\AJny Uvk1De(P򤞂l\ 聗=Kkhs[cfW>AGvOO3tcvx2#hbrUCUp/dM?׃#ŻPs屶F+4:cu$.kSQ[]2U\ٳP++dQ9v0 C8ΗG`y{k$\25ܷd 3Y{W]NhXasw1 j1([E귄.9sZ TmX{$4650B̒mvM 惚K$ ,o"8\[M3Y2x+ܐl`2-۴I愸;ebW,p.a^ 3qP5jkG~ a]wѸP-1x%T-Lଝ׵-Ded1br6J2 W&y|9Eh[-RXj.]mV)r< [( tS)SljUV>g"k k)3 )eշ`nS~7Z=T$2 9*IʄY^8GC2eѳ {(9=q5[腮p Kջ3OXPj$ե2AeJdã˷8(j+CDC1L>垅 Q'd8\7<;!` Z%(50Zh49qm',9#x&SR|8 #MG̐tw`ue'/u`7ᴻ d,dd!d!7DF2!Z}&6&Z VVE[]5@3#y(Y#:110u/bcs!3m wv_ ?ȹDڲNbb ,BR=ĔetьD!*&濟#/Aa<6ýSgؖWRfS(dWcP8oD!!y} ޹&x8: L*ksP`E7݄}Pehr6Tj>R[ˆGATG-džf9M&OM1`{)BeH&$'DY&O@uP`T Y!$/+,[pѲ/35*C+F\RL5|X2A љ-h.GoU2ZY1h<+hfSHqD0LO?mC*E0ZQ՘b 7ƒ8;\Q PB),L)14;tNoloaþLq-SMb`r+-~rr|;MWi;+Z\@rҒ]+Wp{o: *Йuw뤜 ^o?h;zɹқ<_\= BMlPEdt [%x,B:g[Z/p{]w6#x& -4DY7?Dlur={ʔhe|ϵ& Xih44V+ JCk-[܂O4-2P!B57IȽG\a)"OUD(=Z!OPf wNXrU cV`̤ٷ-BVlʘ3R.dM}b6;iU~j]5Rs{o4sE2> DSmІQLvѤ+贶 SUU#C֥ɨ 5U+DUbP胲ڦh@Ӷ\j6*FMAѠ'T k5dz?sUZ0dЃ5WI=ΆH#mI$YRV2S,I_MԠ\M`&N Dj` !k?aehtkGU{ ٤neTUA@UA*a⻂TEl R_Q1ՖdfB 6f*5hcHCTjqI|8l,gy:ۖ1Dwz=HZ[y.XY~}ZR!).FbP8 8={|ݖ1Bwvāg$,b{y<:x0맔?I^-+AEW %L !ۻGtg?BbnhtQ ׿]px4 1a{cgjW.bC4g!)e!> Z4ޕח` 1bN@]dշ>iͽ:x7JWaz7R䑷j͠k7XW[ 2EV>eoLU8}x(VٵGrΈ7C#~>ej?Fr,ѧr{s|0jK2X0h=H:O@$oNeKQs0VSU"ي|DE=@i1\z1M}ϖxU\|`< |.Vob$GC؇!6mfM6Sm 8Y2MFp(=FvaW){TwYjruETC5 s1 V[ːuВJ&ŐgB,ێk_&̑]||8V=/8!7ZZyy/6Wunſ<&^yCod:r3)PESDeA.~9&txn.hFi,͇nv?{{Q|JJs{'(} e;ɤ[=z]1r~8hvszR|m:R'xrSC·x7i~9~?uA '_ Nw?n_,0.@r,K6Ƃg$YȏC[1G+ #u$YUM)~7elz.Vtn 6]ZRl޼l^̭rӘsH}}`8mTcsP Iw ;b4"hb8i#iԫ g²"9Z Sht2:3].&PngAzx&񣚷OUJ8GcH.9*y2\a}ƺ$]!(Xx*ͳ'eXHO (ktV[X =i>ǝ-eVF[Z(`b%!DYC IVp.JNMQ<⨋&-r%2SLd,-tT$ RXJc2;/}y 츶#O QCF>M px?h!7 J ʖDs]nm0̜o04>R6ũ_ezqzԫ{;PYU}#L$~yy]Ƒ]`Mws_+I6uݪ/P7UH9^;>r7oVwEzN=,sz8G"qT b xFuxgwoV߾9 "y)# mՇҤ;gӇ?nL8->@ 打-_MQRSؗ b)CR^<F:<Ca,qo]syT]_)BW)IyR++/?P "_ҾNHg$qXL ^0%P@-&{bWk)5Q";̟,;fc3R晇Xҙ*׌&Bm1)@RQ .=-HJAKV$/?`[nhpnhX :w6*ɥT]zhq&ha,Wvp%=9qLFEEnVJ3"{Fب$IFgUFW6Nb (kT2{yf2 N̼riϢhApx1Zр8w "'eNZ sSU*X@YPMẁGm C%8s7pr1HK?j5k[E_5^;p1Pj 0=EGc?4UehVJG V5A82L[AeMX"ϒyR%a`yKεCXp-k6gb36D!`X)W 5`e:5LhŌ+ݎ7GYR`_PS:-G~4z%9vF4J3`4 |6R?8ݦ;;ޢ\ ˽raz3s1AHLȔ=qNqb5(q;Oq: pT3 5zZK``S9"X<*B*7b~kp{D42yX=_[ :z5E708Q-+f(Az:f7S2J&q 3+iSɳ 7BGE7|ۀX~v+E{ E| 8N/ f^׃nrPxxewS%ۯ55lF9/.mkyM]/VH-}Jy2ck "ˆҐQbRLDɃb+wL닫&"ʞ}b^CZs!Pě:8D6r mYU@f>:I*,[ngLJikvu2O yʼ Cq߰fw?YBX %2L><^VIPqǕVL2AQ2J0S2bErFKI]֯p,Vٛ<ӹG5p[~]G<T%aB#2Kjl%\hEMlR,P3Yqé["~qulITJZѦ.'ɁX,LVS_% Hϒ;2u[Ku%$|>" {x@{x2BD49 4ݒ". KWsT:fk AwnwijhFZ|n~rmc{WݙHݩZdsEқ5~Nm*;?joH ^/6fPSk:AHLj4xmފ,X.;@6}$Xo3j-}ɠ%YrYmID>w}E?\'} jСǕMlkJV!ubigWDZ  FQDoWz7Tc!oY &cT1l)a7Y!GGg;vT@@(lEh9Zc:{<.7+6->t ƹG<ܵec<,h=NF8 V ]SFSۀ VQP݃QSyw&J$"z֥b(.ʌy-3Mw1~zb5sHLRXQpdpr'XA8` l[O6nv8_?Ƞ $XbV.;C+ѠXiN9kSu 3z/I4*ۚtHIQ4xcwj(2(C6<ڼR1uduCmgCe9 6(It-AZy*9$]iܶ+yjS"bvL_ Xݲˆb'yj=b9ox LzlYhd[ Ijd$ y},rw3qZPJ8"x+hBܝ jEz݅VEd$"0%PTUNuC)umӲ3*QQx*@ʦ.E^|cGAC[ξ:3쒏fwuSP{"ŠX *}vvtJ9k2~Y9a ,VJ hVKIF}uRs,^ŤDL1e&0Dz<;ڹ&ؠ "Y< M9;p;*4ŵˆ,j Nj @*`5(PEVyiW *;:!b71?2zSzdά!3Zteh**I~(WaRG͎aSi\ѩ.>rƤgeU& &;+s#GA_N:~Cty_Wf/u۫t_Qy(U д""jrTui}_)#6E:݈<K%ipep$ s;Y%%b%mŪ P)Ȇ4*@I9X"s\ňӆ ,ɝx/faCӅ63;l&ގ l˛cW|/|JeUCV}xƘD]t;|qFL6|~˛$1T1>Yeٞ6)w8z:]f7Sq9[Cٿ*9mOcrU?-8O 桳K[s-[ev*plNtp=TY˧}+uR0ʍ!}פ~ O?g4(uIgXv<7oKu8ۦ.\LChIZ>}u&2}w< 죣BzMCbAf%mazA9`T20%4d)1Ѱ%$EEʚى໰t2yw5;+X9*ez5^po|;m˲QXF糏e|6&~d$;|5 ?*lw70}-DbS鶰3N?щGⱳ'v>qw%3{3,ٚb3hc _.UV{!_Z  g]=Tma~ 懦mm l%<=u62,t;oL3gAk%ikVJ3 @ȰSb.Pm A)&T yҎ@Q:Ružֲ\s ̑ w*s'қQ qf{,:,H$2yAՓ( i[@YAY)K,x ,x=ҲdtΩ5ɖkTHuI,cK9d](]dS2p@R-XP?5:%ɩ_zh2>Ъve8x:)^4f|[)#rPe1Eb8$À]IKؗ&y9l22,@ Xءp2Ǡ-dLS6 j;ȸ^`KGغnhA%+h~2)OhSpZ*_5&qBu1j`ZH]) D>WȺxL?%'2Jm3QԠzV>g2mŵoI~ۑ6nfM*;QN//$ b sxL;uu:1NmkXQ@y:Ti؝^W0G@Dn6 GH\20y Eirˊ8]iб&lBi&Z(~]iCxqJ4@zO|~vq}Os[*{+,DUTAeLEì6 %68KJb2PCtX$}5Btב(b)T PMiAQ(ި5^\uOF7ܳ|6F ҄J"9cJ KKTjDcHXs}O&6N7,xNůmj˧ ~% }b'G\U݊]rp"D8$dӘh}< f2{e(Fb' H,x[ZyvwW-6j2%$xf(H_ȆOjZ@JN|by& H(x6'O,Ɇ *M]:=0R'[ Qb(mfsf*NP7D4fh\ZJCP*DoYBIYaTrX KUl%`vKwsHPqV~#T=d*:NUD` Dh~$T2>M>K5z{x:5Z#FF3JbP9$%Z)t%jP2{P2aD}K;=^(,6K@jjiM[#&c*%:!CL/>OFYX JVA!i9~Hs*!O;>آ@KTNc2X4:J3dkl3dkHrBhI  whє-]?r$bQK=VsyU"(&1>>%崬*$j^S(<#,+S zD^yhT-;al_?1J\HU)`iR0$G3Dڥ%I DLbHm麢N4J⣾t$% oOe,st;G(]iPRW^XJP.VV}:+Ob?KSJJ[]ʬM^BDפRMCFC&l5*{ʡ>D7ԧ|~}UY{f G"!X"sXHD )cbf}D-MҢ|R9#|R&5.5&/BɩǪlҨjEc GS)FtK*3gIV-qJaRLjhELir!@h]0Ҙ\yBn,us PدvFCEb2TwtMy*،xhccgE`P^VZ?"H=45LH j @ L&WoX %Cp$y/R҅T d',!f1kf^q$^ay|?}Д*|S8|;DWŇQkPZEjrP 'B-6izz Foy 4C[\I1To;s|^퐲 +eo.E̱- ^]iVm;=noƞoox-mEb'LK@GMI*R(RXADcHlपFj?LotZ0/U$ۼtlbE3ڄjRIWjK)hVd/LH&G *RUN ߫, Iy28ɊG!.gsK-:)IA1)#{6{k%QML׈K6 Q;(GR̠SySy?|AָR6fdlY:GZiPwVbOQ׼h=Z) 7[ Z?'k@p'n`qJ'#?s (Fp*cˠ 2QČ%ۜ(! TAGh#e^8`&%,,μ(76x{lO0[JIH?%)LHV8-bII,UNoSj畋2b*x)tѲ2?7` yg,֜bL•Wu$ @z!b. 2ʑd0 \4pu֝c).3>:EhK2$ dY%r9S2O (MĐ# :Y-$Gdv%W<@.mPU]p8 l\龲_tkaHϫ܏ N = DVm*Z^ѫҒH%`7Mp٬[q H<]TFlm0&Y9dޓ *Xt50  LGo[V Jrm5/"1ZdQ:y6LT6ISB IR&ts첊QgC7(L ZY2)Ņ`PulYOq“2}:-6=qfmr! B]$}*Vc6W4itNڝ PRB2U/9$iexDRTւ9sb.C5Q]nsd_Ճ0>Ai_8+?觫p珝L'V͏ZD_?{g_֋WeJ !mQn> N -//mԮtF9$-a:.҄MgڗuO|A^u"f>75DvFfby]N=)zSiDz|p܃$ܳz+jEߚ:&<_8{c{:_rV{LCϫPSw JԾ:Zawbw? xA.XtZCw.Zc"b벣V4"*kn>S~C3$f-:䣺hq<;˻..V Ot]N7tN=i&KZ\=҉nO"Ƌ/ kViAVqʫϬۏL*h4_-{ºp||D\t畞/+`jPgesg=O]^t ÷[_ՏURs(:A~Y?AV\~X'__}ZSxp3d\B߅ެ5ϓ?UdR3BxTTd":팉m@^QuazC8gdߟs^Þ2  ڐ/Ic(HL{G+r=dq=s:<}2}ao#{~gq]li _LZq5`<ӯqNM[H([/i]`Qr"^^mhO巩Kr]+ *<1V&%磳k  #y/q!O2TJl},e@Z~,fYa&ya $ _cN@$Ih@lM+B7u4e~#o\Pgiĝ/]jOGۅػsmw-|0 o^%x~<;yAБ@:J $^i]<3Aiq,Dd# tlT8-I=Z=?].c\'&רY<Rr5{EBlG'e)%XY&/z?_j;R )l@/"X)jDbktVFB>gAtmd4$"Z*ef) IL_jZ}_.]mS16Ɨ$bݻBeTl%R[r_>^T6(Ǧ?O׏5PoM{ Fyq4o"yfuzyC?wW^n8Xƿ7Sh`u@gg? ] S"iU )IU_d lK1zuWX pfUl7Q],_ZcLfܻ抬eHYڦ.O&X^w:UF˰>Ikڨt8SwaO(S4)ET2(GQ%3)LB5NbT` !cB+z|JI9Y Ptףm_(Ȣcc$!Zu^ t5"a_\aYs 6FDc<-f1((2c<Ǣ񐭼3jN-qT.VģtXr)hy1(9 WB5,#Vg#9mmv|ag';;qz?}L'dsLn~d( k#hQI֫"&jtsȌ.nz,ܶA ]ئz2HGoRA~$읔'c6?/ô6!Ed~%)Nyn,}x16j- 6^t`|q/Uo_+1sy?rP$Ttڐ: l) !ZWZx]*tLٖଁݓ#:o̽t-DˣkXG7$V~$TA6pDz " o^җfז6B$h kޔŚ'"*J@z!g dC.,mV\=-eIYz( tϹ5gYNgO:dBYB[D *yffYb r*((H.h y6[ѸQk*ΟnqW֩b8T%BJ!;E͘!mLr,,( 2\la鬘ŤE鋔⅊QDg`֜u ! H*{d$m$RM5ڛ ĤYfH&(%p fdZi2c(hj~3QYD&Z%hUN|Uz"㕶<|#7^TpE':^y?=9\fV9Zvy*_Ls)or3/E'Gӯ >sy 5k; Ęd$O,ـQv8 |ގG%?'.ܭ3sܯ8ZqaVhpRُ=;,UMx>w^w3s6 ; uYʮsB7q_v}+#?2wC;clb{(=li.{VG^U* H2ٕN0ndc6<ܥuس-'smzj?BYr)ޗX"TQWLG Zf)+kDL)w'8b̲" =(tLO5JCuƓkڷ1`C㔦&*Do2('c56$c̥(A$Q˵ -8ѕyYE')=:i).* O*Sc,5j } NU?dzTKp-`hTH(}%4DTFP"BBеƚ{KRR H!f6Uxm&QtQJc#\+Ұ6_ I_W MR7O~$GJHaj)UbEìbX[WF(5tSHs=)rn2q+r#lOr2 \6or& &"c+H6\r-- Ix w;,>G7z`g؁\J.+YkcH%Քȳ {KUy4 I1re$Ճe~z64yzhG7 v/ᖭ^0ISXǭG:H>I%cm Vtv  uB҄N,|{)cØ@5R}i5JF.NNgTW.kɠ3](jw-K `%Lqc ʶ1-j_&C&+KfzYˋT4.V [^99^UCCbdNg'KٌZ ˡ?)_-RIŴ4-_-G:.B3&l?v ϤX%޹[*r`2NM˷\s `wkcj ޼ZNߦ/o^d2YJ ދ(WS"YVaͫEzOs~byzDTw}rye|J=ͪ-Fht"(Erh%gGW󅺚q4{nt3G?!2V/KRlY${-GK[@*Z{O*(jws2z9x[ѧ|2\E*B 9WQS`xlSIAX Y$#i9X9F|f{ˍT 9-k?egSSr/^fPPڹ3dTj1(o|Sg/ﲰ瑐`r雅r\ޕ(2v.{lDl\ wKX J@%+mz#AE)Hie њCUG)XdWxirf례zoG~;z46u*U!f|,!.oZfɮEC1H$܅H|;:ޘ=Է*}=GoZu/.UW?PRbo %)9Eε[ h2,k;*O7գ'!$W#Ee2[NRRA;c$Y oc骠=?pQ+;+=zBzkBp[;}O׊V'1T/ ,l0Z-Հ(5bc1Xe~QMUhűã%d(v:㒦OlM-*[Z ~89/1mLTg.W>_RۿIܩatp k4@)%}pT8x1-6y_'?,ҿ!?_KT e- '.zhX eyeͻǫoͫo''|xnnu:׷?Ns^Mƻ[1e䯬L_gWm YDT>.PhQI.Ċ&;Ͷ]^6-f{ m^~='>:9%mUd"FSlT Ҕ 5iW=5qc%Pvܷe99Y Ϟgs c^W3f nI2znAn}r@<5d>,q<%oJC.!ކ6"*YRHe, Ϲ%"j,jq?έ\I8;*}_`vv05[v,̯ߢd˗H-t^b]]J]ȕaVP+^| r_nY3=u3l>Yܐ_餗/3ђ%^*$9ΪR,V⅌urzپF',MHZ0hcT3{ &ނvPQwOݚ`-3F0E0 #mBe8 jH=PC6f*_5t41rP*X'`r>:c5M⑕IVrE +:FpVU/Fp$M&Ǧ!C)SuZG=by#56uP9:S`g).=#[2ÎȁyF|oRh rP \HQQ M mE%ӈVn{%RGkE+Ugl:BOx/qaum' ΧԌ]KG4?v$J>X_ޔt!mъTG0:f=/V5ל"|2bql0XZ!5֩y3E#p=2ok].xTNYl|okoh;"kO? һz_;o(2.I)(kE_jM.z>_OڎbF1Z^'K tgjvT=C㵯gգ #5k/U {w5ӻ-x Ǖ'ϜZ㠾/8ې,Xg;"g-1@ bu(g]-NpY8+0qNC2\1$C(=!(DWH׺WbL(joMM螼94bxۛdd, _) 9=˥(S/9YVToӡ;C_=C 4ȱ^АnQ5Scȹ6L+5#4TW3[t1601bެ\5&cZKj 2px,/j>dG+#+m'c~~< %QR-)C$x/*' VX هFV_t[S-\34Qǜ hN6XVD"RȘ&!M q yfGtqk{eO|,4?UaFqV_/`́VepYEH )q'P <JſIEŭmYշۇk σ핧 a `Kc͹&?RԚȒS_5@ KcߜJH9Q/JHE@#ne8h6N1e[-E\^M@ڵ%7 x⑨dsiG AdG boMq%+PD^dO=p\HT_A!iT.%KM^qzj`n8A b\2NDiUxp8[ZqU37*FN{=ş5n99 fYSU6֚6)hA7!ޘұ5%dc#!E.94;k)&SN1qBx>3rAVNl [XKhiҖ w,-_X[?; ?L &<,\,lbOE6ѯhr5//N5&I r/'~E,*jzjK*F4*`D8o3==~07b<֍eT{Xt6\bW^$At/FhThf߄џgWwcx/ AeXG7Pӭ]7ܲdݽvA常U3~* z#ug|-huzZ˴8?/`Dx^.zihDO7DF-0m0m|͠Pph/j [8rlN?I]=-<㦲1-#&5_C/EMbi*ETq&` \F,>]´{qo>F.6daD"ek<oRB6} [}N'Ưo݁{ e,hi3ǎq[珅dqorT'_ M֦\jUErUL 36_M;o{UwyCP[ĊKYJ.`=8X[jZ~h Pڜb&J eƉ..bQe&5xHy?]Fעܑ`߶|R!cWSw9DiLR)(j&bG`"`Fz^o5D3 6 Y9v)XqXq_г &c J-Jmt1l] R1;h9CoŏBwbcYIIw7KRƈۤxu~guƣ.Y$(^^!B12Xf+H Ȝ]%F=$'$l> djDcZaqUb;{Zjs5 G E3"0IOᥬ3\_cFʠB/0V>+s\$.ۋt8?&b"T r_"/۪{v,k#DXfr I~ e~weL'u |V 4hQd[2Dch\ctл$mCȸ'!SVKi 8A " CKfMm,B:hP\-:i yQޚ`-3F0% 5[H%ׁCCЇ!Z [2׬#><ѶjL6Tώ `[CsNzndS Z#Ѷ%,׽WX{ lO%\~N= >}U3W sö;yL/ -sdki0pf}L&c&tf-kOR ַb5pv#yAf˛V޷#܌AۤM8I!Ϗld)$RF3)W0P\j=nl~'X})MLJZcm~mޟw-$:I6b3#zΩJVDVQސ L*@ؐm"Y+[A[skwqE̗rz"?W^y>W^+1TGu^x+&]uemmc&] 5p(}Ӝm.Uf'Cϔm JjA>F^`9x|ğOzpΟ͹/*?vyhi9/Z>Y?41Hr],~Ej cun&9L& h[|ɞ(/h޻EN,coW/"L;k.q?*1(1IFKh5rdeRۛ>@0Jad qCsBLPo؛V/vn@muQ7(|۱B}- @6уTzlOv5cƥ1|0f(N7Fd3FD-g!ֆ;Cl4C,#7[tnQ^eL \(joC'=A#|~ ͮt@\B5chL!aqohl0}Z D0.fbuI j#IBط~`2_*ѦH( >$ϡdQԤxwU_uWuE|䮬dFt}lcul`x¬wXj0JPRƶ>-A ``XĂh,]r:H3XWUi@}e7W ɸ]٘yZ .{5KL޽]_Fu($dUo&$޼ޣ1#o=FC4>% q(vM/~hqeUv58LoE%jwh^m%|,}~'Yy` )AF ˄za'4W*yrY‰<<]o}c-0"1]B̵X l=0btJFoXف?q@i,Hr) gO֣b@BJDb;tR&UYS,;d&p_ VpQbY1͌`%V,3Im꠬O1D=~˳.Iw9s}4TKeigznBhr{(4.URXi`cm[IC$UJBeKRˎAM7`O> n\BF`): z0o(+dNAF+8}Yjh0!5Ȱ#7]aDfv:);Bg+bMk$#]8$^*&Y%rZp:_kf=7æQg[Jzu[:׍:\jϼ]28Xے9,fH3ńO ~`լƦ=Mp;< aI3O_ zfr5k?ulV}iC'=hG-Ip[P^"4sD*`*=StsW6o'Q}#o xݜ lyƕ-x]se@[m:+`R`{lӵRn:\-Aɬ`4VICz5Y&UGHf&P)v8 ǀ"ZZtʏM#GÜM6lM y*:\ckUXpɢra,8։dQ[E~UmpfnuMxbx\Kga,ߞ "%XŨcb|jRGZK)Aff@Te->mhZq/f \%X-H戉l"9 D\^^-#g>\|QUT[y=$DԂgc(H*ǐr0V7a1;:R\飓d Hcy? kd5<;/)&`r` c`yj+33 VO* hEKOrj@IoUʭDJGg=J2yūL\#嚹VeW$Q‘Q):,ZBJ!Ѫ:-Y"NrmGj@Wz>AhfRjm'jm29̭LcL>S/\HIτ_e_혗w3tl ,d/ڜ\V5h_Z$>agۍ٠JŠuk81"CY;81 L-aEwvcWb #2}ʹ%?kXfAepn?-MU#v-lFSOxܔlݪ/7 b1}tE$rYX,W,U덕"nH , !_Hq%=O1CԦvR*[# [FJ#$9c4`e.UhXw_J`rU-Y+9$BR\r `e^̢ѩ" W8j.|,c y"L'ZX`y{E}Y! aJ0dΩH,?zR@q|:(EU蕤_8t+n#e`# P4*Z 'u`UB<#X#LGn35•@ =S8t"Msk98%g%=1s-/Hn׉ ['QX)Tqr\~d0:9#;p4Ke )btTil easb% X^*r0<PŅa5dȗښ.y4BeR\^T~BXZK[1GhaFK*ز_ WʓhҤjVi@ = 2Qze1eUp}=79ͨXZ B=߶#] R s\TTZ3{I[vEC@H!7Eg?살9\ݗ7 Fq9ts. sM &O˅Td,έ*!9B*aŐiՂ>o<֝PiʘxXKQc-{?UW" "8AWĕ*ZC_&xQ9=\5j?neZ:Atr2O1D~1FMXPm< M̆$?:MtJ JfEij@W`⡃3#JdӗvQ˟}+{,Yt|˪$lEw>c2PF@-ɏdzU#Dx:{/v[8hs:qJ\IIř:]$Z,s "kLCR\)5+DbIxvġ6%hY.'pvo+@F뭕YyЍ_RuI]'77i1X6-/ Rqx zGșPgRaB̯wK9hPheuX~>G2j -<2YqC o+tzϾ?/8@CMaI&JR(i$q)V`qZdqGBNE =r>Zsnך5Lv}yko0}&gU)۱zͅlポlۼڒ{qwsqyrd՞Odo'/xLJydXLZmZ&Rm9W*WRU4Q9;⢃s%@[6Amj4:+ƜŜU Fi,zcC #sњs;7n8)V,%]7I~'NZ٘(b(I ݄]C&_UML \sA)"dXV]9 ɏ<7Rh9:2Fk}7ҷ•;t61hh"<)Kd99,'/Ä)W ϶ _̢>I@&N\"+ld_'噋O8{U VBX'9l ^BAʾ?=oS  $Ժ`ǿo28:6tr'1qBpZরqJ*-ﺿ[xrݜk???nˈ_zZyӯ'gvf?Ͻ- ^\_ŦsGǥ_egG' !j˷8^Z,-lH K:m| ^aEaY9c0ǃò'$"$gϺa'W>e1s<ѓ(h58??I7u.~YV{ؒ{?'2_s{_K^=v> P(nݹDd;m5oߒJir;'jgd@|GL߹f4X 3&'6H{_`}sQ/v[:ȿfin:"Ph] ^1_WM#EtNt>+=7f,j{WӚ9V"0ח]#|IȨhGq{ 7wEZE +p9H%MYaݓb?@ݮU޻TޯfHY=RR!?Ϭ_x7\ɾ% GufĥFmJy\)u6;wizg)t)R썷:E6d $DSLzQOd 2(Y1Y!2f f 5Z⺃B:PS05L>l4Xxȗ6O3yr:}JӜXHR/w1H7N:Xr1Tm >9^=@Ej >4A,L |  XT&#3 K+`rHo\B2}] 'cSPl7D9"s Y2`b7y 7vOfohf4g"',#qe ]ヵPKVG[M+ M"/3rY*r#);21dGn~6W"+g7Btz}O҄Ӷ,|w0xG u"7TTbI}t5 $ =T{̓%'|Hg ༎>f`L!5QaF&ŪbR֜e_,c4GFqim!H*Ɯ3ըȕ5UFr/-ݻ,+2E&ԕmQѱOS4hI%.NT@7hBlhZf(%a.yo<ˆFkݥm -܍5EM>|ru8:^Ľ;k{gc8|8F"Nzl:Ӈ^> (^GiN/74̑UFȞ1.u YхN u)s 3/ ghhЄLSֆGصR]t>PY\ {t* *m^ 87hF-E47cj06ـ`tHc^>q#T4+hi Ξf5˕c+d731aViIq2+ןpw#iPH}kڋ+9a1D4AJIR@O:u/X|d}J1.əD%PΞ#0U7Q 2}92Sv:8HEX (DЗl:)2RsDZ9#hCEcϩZWؒ 5)jDC ֒ *Eb/0HiP6sڙn7&zQ, 7{}5G/S댎g'oC7obN+c\мSWf1TH~r7?Y9כϏMkɛ7ݛw _UZrx4_l< ϏJGa+}/϶{Ry{OoWņƝ & #,7[7Hn;N@^5.as "bԼUy";YPڞ:IkI\ a2;6x<;On?,q;,7n$B,:O11Y\/?u9%V 47L<]Fɜm~M8ՃNqtH7صUP[M0U--]u'Nɱt0isCRiiަ 2&(޶&jTY0FUFtB qOݣBVKI x&N"0)h,@bHW4IXg$dHiIH\Ho0$zQGQyWtK*bcTI@n{: Q橏 <&`Ż)|Bm5JR,s9Dr%$b΂6!9>h%L.\G']>:L"e'v%K TX5v`@"Y}l&6Vs5\e11P& K4!hNޗm;T U' `Hܐu* AaQl$B),L))04 ;|W=@';+Vɺ &Y+ *R%X]8AbXNjHhrLBw jJh$>4SմnbLQr )Q,F HUYds&[Y,%km+InmA3.8yT[ˤBR><$օh[]ꮮ2Q"gr8Ccnwbreɏ8uK ~ۘcmk8ٸ ^[|9MDO$dфԄ_71R ^V?] O|IϾLZgfSj %ԗwof8͐qJ6Đ64چM̷q]#]mݠޠom 7؍d.oHzvn"%Io). 0 TZ:9V$uI)ŵ!D?zV:F 1YM.]me3Ls/[VnʣWYD" zXZV^NR-o̠ުÇ܋aH.%/ vкQkB yV8 @H/Q2_2_.Zg?:[vٗjڨɺlW}%v'Xurn{]Іr;ld+uKfy) m}Y \+'h!gŅnxaj˿[b'9%(6c *EN# iUc˜ÚpX0|b \>՚fqx]eo{K GjWCVafsa]uBă'f}"'Ѳ1&5&3DUk/+Q3 15O[ӾO{@&&ƒE JMEM U,&erNŐ!e,lҦ~ښv)/*،(P#NK/_Έo,BC9gZ$3h~ x˥48rX9m6VN{ݕn3#xlA޽9eʵDnT-Hi-s&"1Y.͈!R|i"F89BX&`NAMNQ,eiB''K 'H:;tm]߳9NS7'D 81tfYGRqY]J:GxdoD]x8HcJ4 > A̓ >XsܿYwo{t߿{&ϣJi dyo.xzfE\P+ͮs\P/,{-EEmAO1u!CLNN'/kO)虖&,6NV>:$a&45}p;BvtN H^ ]Rtx2뽁d}&}ϖ {2.ֽ =pO3f)gGZ u?gK'寮f?~!xܓ̾ULU~LNmÇ}:o&I8]0u $/WҾ'.ܭzx޹s40v[EoAakT?qߣWC^]M'_:uE"lݢ^WH503;vƓNNNN,j1BȽ`'a 9R/xxhBzk^0aFT\q{o oydYUN @AI:U ޣE-1Gr@' Y릁 ;w&m-@a.)JԞT]):&.dC6}{ %O\zV!!#(&Q.kwrXޚCnzjWʃlߞ nI>}Kqq639:'ҩIxCOܱJf#sAVx ) /zolv,H앣+$T J B׊DHdBG6;G9d׷utmJwXz<#G]W>AE5( d1Rb+ 1*.5^بܾH@ht`:1W&.&xTi/VEGa|pilsrm`:ȌP4~>Xa![8aizV,W-( #E A%ۤD\bJP֒HZڬm]Ԉ^gީ2%l2s.$EB3fxu{1klR9mP:o]2:HRP7S%gWE `&UZud $*uH6Pe!:4 g hͺ3~*kٌOUœiYss \EOt*i7Z+P,Ĕ@7* 9:TFnj%retI.j~f;ճZl^ *t&GGH2Ҭ;wH;eL*q=Rå(s(gNX|)h 腯dZdT(BI~~P%#Dڅ s@0XՓR"`h֝"p h V25Iɐ1[#@ Fa0\Bf)ǚ$Dݲ35bS(5ӨUY,0ޓlB{MYwLդLUD^l4ʆo,nو!r"/R)DIEjRR@N;!s>) Z\dkY쳣 PhTovs, NHAfii#IȢ3q$oKBnf=\BކT+x>IȶњALhDcJzJ_!|+\ siR( zNF#WKD"%]-IB|\|} T !_F2v/ c0rgg%t뤣X/}}mOfa);?mCXٯ;=ڲdKֳcSr ~@:L:XH$> )BĒ[TWn,xn}Fa*Кs ŇvW6XIՎ~pRCdvʿ>gU:W2̰!eՇⰦl4pe gd,{i ֢C!E{0,fp^'ow-M9JRR$u1$h;], *YZ #iť0Qɫ~Vg[2 AZ:7,Xc`LΦ>(]sـ %CsAEꚥ0Copt"ٶu(|q~2N~:'Q)')J,U6<ژ)HhsM(ZUպ~u/֥>6|6x/jvd=έZHdD*(*j72ak5Le(jί;ΛS66] ba. E5UH@:2#XVhܞ*´_l9gso^;c5O + +j[m{UUeBHb|/RP&y%@U(C遺g<(!;f@?ҫҸRr=Pp[4x3'V΋n{׀\^ 8d0Bjw~2Ow^ ]ɀ ` 0kҚbi8'>hsPo˷xi|y| x?P \sPI Jm)B'5C>eWbT^P4KǼe w@ާwsWK8(J +*LfV8 QȬѓΊ6qฝpū_H}g<"uŮo%/`ҽk{F~?/\Xf\B$wlw1G:*դR`u[Oudso?n}zT|~tT`J !w?"rѝk>02_ |3zvY|֨,IUuڿ0pi9Qxv,CvTbb~ d=sH, 2]S>i_fW8T#E>y$*DbXԔ0b5NKѥbu LvEꢅb۲ӌ J0:~(- 59ƼtcSIzjk_-3϶649 p|}>zJU9V3[Tm`=>d>_4^rpmEj5 -8lPa|HdO #osy&!" xe~f2>NM4=M\\|[Ȓf*Ædn7143NgJf4W 9spWGs??3Orz>iu9 *KԈ O?zޏbrSg> B.Y}>#-t ̌auJ%b1:Q2foǖ`S)#^.&6h&a [SupKî3P|_Qݾ|Kt}. _)]_w;ORS= קu9AA^ q (AՄZ6Da^8_zթ=U`QAs m_9rfK3~R.v; MK%;IMbi:72duPl*2W_su!np'[L\Q-J B% MaP*b[ljdQmL9ǘNd9k*~r%vЭapJ2 E)":\SL9ZC]+ef { `IГ7J y,<+˓4k#J ^3=vDQJ.6*x-#P,1.{O%ϥfeo7;)q؊Q]4h$~V.RMcgASQŘFenήVJ<|xuLOR5Z?Է.Mm5{֑΀ cb(EVż QZg/ Z4Z׃IJ C'ږJ>GHggW|H8vZ'p:~iJ(NY`)0syLT.lgw2O§dz A>:%BKpzIb$ιw/h2h2\^~2]5,'8ض &dNSa@OVQx)U#Y|Z7d@aWզ"Upֲi+%on=pV+L$@v0^.jN|xO)UHz{|qy0ls=tF௑ ڬ ɵMr㮒ko}u_L0| 5t~@օʸh+{ ^ @Q {S;)(uQӪa &h]"{yQ`c XSCT)] ޕG="Sp)lKBp&VOHI!0cl#dnz,I⾪%܊]PHfҶ:-Y* &~] Q"5޳xsGƙdEk/DP-_-?ڮ܇"r9)vqw>]nl\&tCIGl>A7}kesu"` \ANBF%WI&}pއc|!26هvy([V)[VDJޢRA,wSˁ{(Խm>Pұnѓ6nPNإ;<憎wOdZ͹/rJ3+ŵ#]QQh L jqʩʞI ]4~ &%ܷ 3kEw0q{ŸjEb׬k 3Į8zK)bY{(zoPM]~juX:"U3P!)`jsFvT-3U0 X&AƖJQV+R*J&[2ikkt*iI]<5w_;?ۺ6qbAbE@gƶוPXEb,ج#,Uve吥_v 0{ T&m]e{:6 0j@+˃fXxi-jnX [SRʝU@-|iBkt(2i m˿$js,d!.Zac^;zC )*[4g,Rَy-;;BUiݹpw6ZFWWRj9cz8_!}۔ 6/Fj1H$ۋ8,J,cD=u u4Y)[R:n*oկqv10PsM-cP3̴Jb{^a}/^ݙQ겡Ζc%j1bU;,q9zm fne |,b J#Ug]TKEiM q6e sA&A։dQ~SSwFGiQ?>;pO1D=ˤl٢$ֿ7zڡ *url8olb6]b-HWlvtF?=4M.ٛE=7RW4roz9;]r96?(?ۖ2OE/=m r!hyjiVFN'MUy/4h9M4 npc],]ۆ; t)jܪFi'ZǢ;RCWM&{9nT)'[kpǰݘ>vlr$KXZઍp.giߚ W0+qL ;&Չ猌'#AIR豂ԙ-#*TZBH>ج\%qZ|OՍ¶SE] x)v! cl\7x8GEsy|9r`4 y\FKF/An,sAַzn־֕F[i8iڼU\-ۭ_.6Qwq.eG7_24_#htv9;zu7շ}9cQ .g|陂 Y4_t|(OkPKJ۝s+4q2x3/E}~cOABkJ楡hU ;|Ğ_!R3@ʖSZ %! Š] \ w\.9[8<%D~}<A7dVYԛCʣv_e d"RXGDb2""$76@$' %hK!7g/yDҊhibK08faM&(bS*qrd/|IhZvY=y&zf(e=J>PJ mm6 OIK%Na{/疤켖2YᲰ[ M|Ry;H.O ~fhg-%Ҁ#,:m[>9(*1c_E u94}.m248G<=}|b9o$= ~? zާq'ÅMi;l9 $~B}s}4>k`~ېσ&ha55rUI%Y)GKm u{HXNE:K< r*9) OeB=d_(S֗sc-E9Uz;F;F%teC#t֡^XvAUCݘ-ǎ&ЇA*,9x5mʰ  "w#%ط˰^E5Oδj}U5J AȚRua3),?TsImnb:KSZ_jhew&=1M"}*NW}Y>ah#,bqg4kg4^UN[6"Jً }o<َ(% }X~ǻ*=%̋\MLU1s;9<%ztTHWcR rNK\ZDJ|ʙVY +$SH^q72ٗpD\-&Z |KDŚFhQ3h[7%\}iY8,Qh_VCd !YjCU;]Fkm,ůGKQϫLeWp+ +S$ jUdj8Z 1(IHEZ׊f՞6yK4p Dh"%޹8s81 V ͯ''r:~ hɃ+;!V_nx&}$ X: It.g,;'ERX!z[ڲrPͯ !c}䂴9\em4h;R-}d-%`4 bLN+T-W-^ v5GSkr 8Mʈ-16U]~ǝ.C[TIA@Pritt":u}}.R?"x XO={OA(UMqNU>{,ܵpIm  gr9㌈1YPA 6cm22mKʼn`΀d%G x'~V|'()XI JyOv_z,%+m%$hE QZ۽ჱ]gA"e+#bZBJ%$ZQ-Ә/JL">3pӹ4_,Yf% i>Hꏰ꘻|r0vȚiqI_* ~)w94}\\Α, ?OO,(R\˧$$L2 oֻ>#/'K)j8OanE[)-Tq_?On8b#qٚ)|tDIEɤDEvXDVe!&8UZmǢFxTGF1J_ R,2dÇ"j2KyῖY~ОW|yV}1CX|g)߭),9\^}?++ԗn.xNJFޕhOL[p6=6|/_}>n+cs15bY^䁖B hz{lxGj՟ytD[I6w?UBe? MW$/_7__臆c\@*ɤЭJtyp6K4]<䘕H093Ͼ-G¯F kWǫ/HnzL<8k ]hv9]?}Sg|ЈnT>__4#mRflyX¹h< ܾCq/j#')-K)[;6{4N4֩Pwޘ֦x(>$W*w9 U)SH-Vic;r:e{~NJsu9 (+qeh(9dg"&i3upSrhɩ襩1N:ZUӡba$(:b#OAf;v"8+d]Ot#L8(jP]vg\;՝^8&AqR`{weˏ4H!Gd8(rok]YA\N(0;v+{ +k׃[EӾۙ~mc g!mvX_zEAl[^kꩽɰ+ewNdjZ-x T{j)[av(Ŭ[ZjDŐ F)-T( XzJNomȏB5bnD7Zm as$ ',e#ߒ&i*t NTU_I8*qϦG G %)[=ԳXcx,[-[K Ļ\VDBUYMj:L#:d9:z^+eubuJS Ko R&RRm"& Eq#?69A0 ҚswR3l޸#i :^.PcztkY\QW]UĎj"UE09ADԜCd^vfW8٠oIІ)*bm=javѶgN(|4WDvԯAjtst)LOJKI ژuhܢFwzCQTPQoϺZPSF!EJHJ%8slO='vfBxɃQm)joUpWMUPy^B#cy8fJZ _g{)GE/&^ڕ3֣۠鼳tf:Iwp>yW.6y֏l~;\"ztYT[FAZV>hye>>NX/Ǻyϣ_bǫimt{[2W=b=6QNML,x6ME[W=e51 |,/W71|Gv`2m>f7Enw?nof|wx()>4>Rg-+7%_4>Frk)gg ^(e6@Q"nk$L[1L*>uA9u=Z ݖE-K,kJ_2hcJQL12cDkB4 Hlu2Kv,>Utkq;qu= f1e_G'71W.kNmNEM@N/Hi͙vǁk-;ΟM"qQyzR kB;ʫ6Wm^m tlBMf+Q%_!ŌP+u*ki`Ͳln+oe@Z̘dж@x7*@tIX>D?`"z1|Oy?}Mh71 ܷ|<)#(>{;4 ~9 b6LFlGMixwzt.o7}|=5 uhq 0`=ѹ*5E1C♒( $ К-%,0`$jdPر4bUNR~`U cAf%m:Q>f*T0yCJƖ51P)̡]cUNp/6݋MvHuN"] jo1.)^\_Ҹ,?aylOCyi|U-lFVslAS;TvIv (lDQZèbkUmŶ%}`L1)#; D^ڊZͦ(s !v3e*KOyd"Z hy]{d/ ^}  '%~GOux2Qd1AA³i)lE[K/"+i.Oc\gdBwQ: J[_y~K"E,Sv+kϪeal)wosE}{6 (tA|ub rR'-46C Q\:dsd^N'`N6=`j)R :Ѣ T%e +VkaRUJ6|j;C =A|Db ,^0{TA!WE> $VJIfe)O V8v hŠr2IJZ){KA(fCm'3\zlα vz\Y4n25NQcw52vc>x cA:fa%(Z㤥' z"tKؠWG=a@KTͨ}2 ^Q?CֱHiTѐ i( %бdn +ltr d1k(y+<N{W]VY& "O L9upk'T3Ȁy uwJ|t8[nO;/ka'"^LbCt}J  `TJMDN) :%; Y!> ʹ7}NeDbUTFD)m 2$iuSkD#x{ @@G?y\*kF;./3 ` c6eAgXV z8Yۭd!lK}Syȃ%$(SnK)`"9:򏲂"ҮKYX֋&gp˪*ռPfSb4:HӪ N*c8S^Ky Hjq˓TtdHM!M/t1J:j_d|rɵFՠ9cBS>zkȩN-⬑^fFjպŔ*Rΐ*EO-qʡՖTg[*eZL{ ؠI뒔YԂ¹bs|rPwT5?S-r> : 0~@ɵ:̔dM_Qῒm"=e> Sf|n-XcHiMT(Ҝ|^+ #Elb3WR5${Z>llѰp* wr�%WpPҊGq|˙˝ykW&C{Wۿ9ŌlGx Ej{ ,?a&vœװ~u#nsp4'zU|:o&R L/׫,Ļ>f1~XּFF4MuaљO%mldwoې8V3f{yՅ f>bG@~Ʀ]kll57PTA ªDI%HJw=+(Lr>H gL y*X!RAZ>l{؊|hc۷g;F ƏAQmY(Q.k TMiU'N߻BHYjMFjj}W<2"A6k#BAQّ=d g,PɅsD)-&N*s%vG+EQn[7o%Rtk7QIy1U; UZߵ;ψ?8t%Z皥X;VBː H wGY'A>D#kMI6f礝g*Bl)gF?<;2Z)vǫ%s[vhO`}cqZqW.` h Y6Ac%w(^!O臨ܹv5($W3"R=;CȔrH>d{i>T(} #6䐀ùY<"s*ޱSK,S,l/:BN rPgAhM" J(V*hJ;s_eՅĠJ8W)\A[%Ȅ&T9*R<e 2", bE$[ ZA[nEbO}szhiz=ٙTSQn`UrAθ6H> 2i bTU@S7$e_˥ l9' PJvڻs5ܼX*lPM|? j` /ʀrI`496E ;<`;w6D0#"gPEH͂-dugF,FZ t$O|P1icW1w3 D& QӐ65R( 3 @!'JҠC6 e*2ItBEΜja,ڲZ&g:zHSge뽉%)5JSܲhPq*R ОQ^Q!u1j E9(֋ W)-نRi% ȍ f( | ^Vl{NjAk}4O ž3\/R" Э5+ @Thh N6 ^'R[P,1X b|#歀J1@=02aPDހð"HArBz!5V (ɢS vT) Nɝ<'<{xYM8FdM @ȣr Hn]! RA .b0ݡku@i5ň X@ŌYs%)ۿզŽ2}"`%X(dh O `Lmlk͎.,U40%vfnAji07 A̐]@1TՔ 1K Uրm޻]drgΠa9q&!s$#+lRKVȵbh-"iBpnh+^(G[#Q* =!"辩AbUI8YVʘƹ9.,K:zN/-Ђ (&j7T-ѩlсS懔BaP Jb0첎C/W!Zï9Su!E-c=-T߻bFgXV lpayjRBf [r)e';@3D$24 t5.Ā-7SpC-,{'NH`.AIC[nY !m( LNE2xG*yw5آ, IO pAc3ᑵ1~,& r|듯.o0TvkUx 3 h`-$yN""9siQo@4# F:jF^ ق6hϖب 5يgQ R[ϐP.jG0,3²hB}4R4KXmBZY^LrY !\jnXni. ]ơC2~UsMևl1ZfTS@ Pa+94O/;`YGB m69QE _Z4!\;-]ɇyi9q{EHQ kmCrR2r91TqH t9,ĨP*PZ6];`́Apgc #%tq.SQ$5q0`t {v ,sBBDZ`Ƣބ7 ՌpJ]70aM7`$49AJQm[_m 7 <7 rnE_6ۥyp?lEi>bJq[xVQ'*ss[{9O~;9o+׈ThB9SM}}Bk"9b댘WwM~_Q} =vO=R7,:d]>U f"Grc&OvxCm:{/>c Sgli#{/Ͼכ}Wސog~Bo-BO:}2oDkF^ rg꧙;o?";Z͟uk(G{ë+bvn^Who ӺMiGШ7Lm/}-^׃rL'fqO0mO]5*6G'uEe(WW~o/;J:{1RT&1O\KznDY]Kԋͻo{=Gl|h<{xqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxqxsx,~,ۘ_γYmg=X~gͯ  BAr4io&]"M8GC ?>[ٝYvsW2OkLiC>PhS*6d@tQTz(Y>+]ko[Ir+~~?,d Xd &k )j,>,R =|"%J}II ,!SէNc(AfD?$.jovʗuN䎣w&NoK$K\wя4_&= qm'"bkI8ern0L# `4L X6dAOs,2IlUdEi:0BPNQ)LH-&xqL@ιt.U:Rg\,A j8o Wp\J"Fp%cXtėXJɄvpu \]c@׍zǷue~a529Lk.P/mѕb".(YqB:mtwr @4xt=,8C|ƴq;?v>G1)n'G7ٯW9NIo>6Nh0TҚJZSIk*iM%5TҚJZSIk*iM%5TҚJZSIk*iM%5TҚJZSIk*iM%5TҚJZSIk*iM%5TҚJZSIk*iM%5v: [Mߍpebօ[*1i-G GeuՍ\ Ƃ{*a ːG~HɵZ˛mN.p <6l)|4p:v:A:Rys:}֘cI(+iDyM%!FLx! NceVW֑ߡj TF(V26s!.!Zhf=)&% RLy!k<7qq\:]a.A4kHRxIDE5>(J$!K{:K2S㾁U@eB;g'CY5.bęD8 xVAR"w sb%3=&3\XN ~I咍Tpa<Mٱ6hW"'VK,̑(^e_n$Hr$a s+3/W$]fd]]FӲ#<';j;'anE֞7@?*W]fb]Tj BV45xN sn|tcY"ΖK4:{]7QA1pb=?dpn}g /'x5_ /̚k9=xt˷wz>kFU>H]JLFV.c`yj+3UTx't FF> CB8ָ!H}GYaRg,m` N/ڜ\HaR#cgȰzv!~VXpY+pRR̀(rRq0i|+i)(B9b,Y><«5 Ux}j[3^8[PH#2Uγ[<=i*FVĴn0V4@XS=σYY(~5U4k='@FB/Xc"M$k~:rx_3V)Ȣ@Xf!-G#,;vFb'g%hϰƽӍª_NSA2raJLZ'E2m!Yt/f %X-H戉\쪦˖0`mEZS { B 6Jd&Uk!cjƶN@v8!()u&JO#2[«dᵋ|#\uHŔ:#DDʴ(vzh"mR췊٫O泎9X'(ڧeh&)Q^8_b{zhMQgl85ߪL~Pzpv֪` 5$ױ[VXGSd@0% )IBfu?]M("g$U:iÙ\90V+wo}ٽG(8@y Y$-"\] I4DibiG4xLF?.]yƀ3h)B p>R{~c_,#6.|*G= )=L~~Vepfp5 SuD⢰F`DBrY}Z 4'ZVD*p"ֹS{w:CqL\M =v|+ ^)*hn\uFE-x0jNSp1 EpZ+!U(xۨ,p˙ !V{:Oz5'E4umneeXl6 ]惁M@*gW9j6i%UEF 7 >o:׍8_n{^ĬlZʥoƿ7Ay L㠮%};mKpq%Yb},YۺIvBYxQ$%ElĺXW4ݴ3Zr/=9org;|Yqr೸Z IlP&UhwmK_pP}\os v^<`Q-qC znɤc,K^驾%Kj1Z%rՂ-' $ 9!DԆ|tOR=hxT~}ۄ鱠9GL,!)kʍՀ93O rҁTgVi+D#=H3BɴIvs_f)Zpr3{&DTKXd0P~,?y]O^.s3eri xjɅF ){CܾϿ۫!vʈmh}n/ڢ[^@h 볽fegTA0ewLiO9Ί¤,So#:لH\"͋V_T'78n|^~w YWu;϶TofqU oƗѤz^ѼyoUZa5\V4fՊg_?%7MorI=Ο{|rքԍ~bd ]DVA'r|Qp#I۹v`x)ds8KRK!v ^ptAqLvZ:U5W`\F=r݌p2 {d~G: :EJ)b*79bi @ڌ;?>aӴ__Ǵ7S91|25Y _#IݭFm|ɤ?E`a%+ӢQ A.%\V G. KpcS `MБLu>\< 6&29#&/TQ&qL)'a?>qps^nMѭkKUu}2xG8'{0bh)֦BQ:05AJ+tץCFl}.lXq~W9dv7 b?ܘKV3qihP.YB.:hZB'˭T&a$.G$#X,bk`al* AnTXH94t >:7HM wWଖ& B|(QXy_hs}p4)kNh:I,qqw ˈ#&X}LҽX{\yqQG6[Ohj(Aa3:+fYFp|Dpscoai<]|ЇH?5櫓;(d3gS0-T-fޙ ^7O;!%R"կC2Anܡ]k&S2<|2O_e>IHW.agT K[g鷹8K,+u3?մo-zכWAQ oX:O-/= Mw:?mߊdAK 5RL#048Z I1Hd83g6]oL;SM;ڮ}50c0kX;A5t0CuEn~\4~biY` A.Ua e|=Qe[MnpM5_S.ϿhR c)NszpfW.|xqc#2m8=]FM,Be!gd3%խn:lsmב|!dx{U)-#S#){s"Y`pjBjge#B<C =:s_:fˣX![#LDa4+߲\H?BDLm׵iKEU0H ҂~#ˀ'J]>5ý+ $hE\P  9ʛФ x ,0/ٙ bT",AG8,W3KD<>R5xˑg8w!(GGG q䙖hir*HAG}-y=q2BL*cDl9$ʖ1n,ÔfIz%'Q3T0֩pmHrVnVA*$*-ՎFɵ =,r84'o@,@̮-G IwA'YP\R)y'pΒK˘g [!ARDP;&1al!bdD3'907ESѐ8+B/$ؗMѓDן@``씗ye|dMFË=̽>'叮=Pܢ盯oSnSrymdmTV/cr3rC[&يo%{6Uy$RZWnk-$Ҙ}ERA&]alq2evPG?F./(ݬb 猤MMZZ:%|<%~rW[V.mZdބ"9:VtVN,~,]m%ݮ~{I[NM-ٍ,ū}#!d+j~F1/dR̻FzoI)rRn%M&..ZJaO'|lTӮ~ .Nv/X5jq7Il؁{-e}6d'-J̝r2NPȕ#QPDpES )QcDDɸ -Jț< ՘| U!tRI%w ՑOp)dS$JyDX++&|@Ə[ ~LǺ+.9ww.N7r6gQ1r|y e>ԯ|-J֜X[R6ɋhjodrXO8uBEHQ -(yBK#&'Jݔ6-pee) ơv#37gK>X)'ke}Yda\qF#8dxnfQw RXdm+f,(vՐNQ9-u\he}uhxd\aPq B fbH@< zk(jE xL b/)5Åa34p&4<*ZG^z4:S&!dMpjaygψBfn㩺CN }="0Cj%^-ZJ[Ej2j;.bZr9H@*,iiJ2bCA`&YT?vjQ-yj7TTskJ*BA~:XO5:7\9ObTIMgqV?HEf\(y:l6ر ;ƽbŕ(R+<\~v74*3°ܭPbR65b؇baC[=S![e3 ?AUbhK-//_`GÕ\י>Uh5۫LҼ__sع@NRc+6ɬ.y o.Z[ysX/7^QDJnqRяNf٧S5K[Xݦgy|?zyjczʕ_i5EV,Ҁ$ <,Ʋm_bvn:9$O-__]â2`z)8l7F5e[W˶>v;eznL{{[ۓy.{{^<EK hx( ^m?!UQ\rpKY97\aTO/僛8Uc}%5׼k^G*Xt I;Ќ!hD>Lk5ĺh =>$xBJ\}N:\ El%63y\v G:7Fć?|KB1ܪ(g8ֹ4)j@sj^'8NB6DC0xCD v$CV2Quf j&$_1m!c"epbmF~68(c8R]ӳDZ@ltk}-e<-i<ʍt-}vbP* TSgOz+1LB^av`kv$=p<: ǎgYW|$W*(V€|љAQU]kq5.OwLAFvb= OS%aG&{.=Pf6d.ȥr0#V ޅ&Q{=nvf9Da҄6nKE:ޝ\IbEϊ˷<&RP$:T!od'01sz;tcMM=@fĝ4WC0@&fc`l9G hv;Wz3%$dōc^ˊm+}cȱMgK' h3?xSw둧o#61UHJ[ *Nn*"b%ƛpήek͘YT%ҭRكt7;[@lMI15zkaB,[U1j3>YWMe;ʼnc7^a:D+(iH"NQ씉Ѩ)̦W&S\pmN"!7HZ{c-ޘ+Ac0Dmpny85}:]55sjl.;3J̩8D4h6XWC3oqqgOm)Z?'%,m"vv|Xo7\əOh\<>A1l&4'DU7;lz(OwiLæh( #eޙ@lWcC5X]yTzCA۽rf癠ǖEneNf:b %fe#[ڢZ3j&15 OF»Hq'3- ֍"vq(o-~T-*>:P'_Tw8Ҝ.tR9iPWѧtŦUWed'6r- 'Qdb`i5zF\@-` X>ŀI{Ѵ&D1` E`&XW 2mNsÆbIf793'AC!g'C̥RBjrVB%_ X6XcWy7yNֱaC#)@NTWkŰ<Y";8h0xf c}`Zr тiur1P"q$ez1Pha mqk} 3*Uvb@#lV},Ű<s0u|YOOJ*Vtc|T7`kFNYPtಎb"-XA$#1>kb !2YF SL6Nw*JN#:K4g8i :6qXеj06ဘ!u 3P}@1ܒÌԁs(6fշG<5yDUnMթT1*bRnru:P.xԌf]4̞֦u@6& jy#7_1;Nɠ&[+{'K 9l9xis,j{44X,շ? 5P&%TUh؊>۔'Li؎dGlH65wOl(:e-K+āLmXG Z쌳-Silu к jjoS!"%/?ސyi7Rҫ}:ؤ9dXs\9h-;W|Do~ޮձ 8o 8[绬?wN urzqu5KTuZT#\oэ .O-?4xDkͅ`m=>?{yMY\4 VO_V zm{U L&^T.=XnY}cx́25a-aoz?/(DVqاpBIOw\|vgٲ \Մ\\"R@ϋ+%7}+"v'6bz@2y(FMYlF p9Qx4AvOEES%,`r5K2DEy]2(p(8pةOF|rqjc:1S6<֪#Z$&Ó(Ƞ&|[pwv(4ɧnǧιįӳȶvtvXPoib)JG6hgVC$&c5|9=p^P>'eΗVn*\" 鿗{gU)?>y:;v^{ȼܬ*J.N/EN][ntZati?bZO#dz}^-#򨯞f',OCJQgdP梬 A%QQ˹hZp+[?L}^L^ bC#a]yvvݵ yW'j/wc\g*~ef`n?\4A?_s~%gen<ٕc~wӣE4x?4[=]CY%%sPo?Āu|qz3]/2+ok,O/DDg&ڽי * ]ڹ'a_9Բpg?os I?,1lA|;wO|Xo6QV+Qw ,wZC.R4-Ra0{nP{&BM#״10$V$=)qz}ìfuYݣ M:<،MnYHڒu4vz?Guk*CA%as$b_κ*Y`Y^=]}9?YOL ƲZVS!]R]r"$XF6|"js@;| ]NIϗ]ymwAGvy)ID餠>qiλJސHˑELm—q;4Lw 69 osV&2Y;,Dþ-DMeF99V)HzX4p%g_dTUt)"~>ѤHu9LcQ~:Sm+#ܸN_7x>]KmS}-Zϟ>u&&o}ݧ'?wg~p?(각&OW]sk\./?=EV"'!~(9kr_]^Le>kE%} .Oo<!he>vr0N gQ Ąy .o?vudớ[|"Z |l%m>[.d= yQ"Y u $tꃔ2GNfvecX]F*96A+e5X2AN |8-e8HV2 J2s,x3ȉHU2zx50}:*`1s8M!=ۘ.ӣeC}192C؇Ti0TdNɃ>ur`2'&G[-,փi&ΩPD%}3_CRY@SRЊ.Gsv/J1(o 9gT#"* ogOq{lr.arg]Bk{w gVllÎR$:59= Z?8j0C!k9b2V&dRsl[} \|5쉤 J53Z1'oi| f|n5Q݅[Jcl#Lz׵~6]ZuNœL(**rWuR [ږx- : {E (UJB /N6aZ*|%<;R= i4Xp%I0Am91RKre%v-&72cL> 7M[&rֹ,*r!9#@pFdUNU.9%Z} q"eg*uK\ Ty΢  i@pXSmB9f^{H< {#o(b//wAg'5ϤVn%-q]Nɧ=[}`4`9ࣵlR{M- B:NDP:|'ַlutߺuފ<,gey1 Pؖ\z]H9@oH-H v̒|]AASQ­2^t uз-MKࣿ5>T>3)zZ$CCr֦K6 UǔrQFwYg$PddnvL"'\Xmas~_<у/v9S` )0 jWzڴ5v>%svn ]~Yf E8B)G18}c1n7󶇷{8\P5ؠ!jfD|`+hކsӮb*92]U'ެ*$jm({VvI1`~U [ݛOR5Zο-9}Ysd>f7Z.iyۥ~4wv{ڨ1pTGD@UGjܓKx.졍E" жs_B,Z6hB'~s\h"㮆2v>ǿ䅒Y <*LCFdgT=mr9e1*iL( 3?*Ktf5bD2Zڄ..n%q0cBXG &* }rV}@ ^sxRеIǠTBQA1g{_p ;:V$% >B[j2jM&=Ouw 庪=ze %2z`Xi': Dwͻ38LCBCB\M 0)N VWTe׉ldKlp .e c](\SL9ZCMtE^1VZ?M]*Bպ8ΑY"4(,N*cP31=G(< e U]m[SѪѣ0@9ϗ'dyoaq_ 2CKJ:ڕ߱No+$s$BY2d,zoG?6蜱b#y t;kacb )Vsz=aנ:Oiy3O;l.[ddRû ||MY]ir]_~YrOsr%wN|xGBYl ~d8.y{_xě//) a6^|ǏK8]{GEcƽ?{?./G6 #aG^~tr-6l8UHr7/Gw~A, wh%e1$9hx'Z%ׂ֩dGE?5(b4ޯ}T,eT=# >_ܞ^Chs=t½Ftۛj%j="G ƹm-V'0E Jw@KzyROJڥm8ٿQI40Ɩݍ,hNYOch!r(]k &#Zj8t67l45 T/@}QUs8pqkJa棷7;Jfd{%V}JAC>@<=MAb ̖jJkYwo.#9W2(aˢA:#3= 7 1(o-TtFk Ef'TWa7o6he>#ICsO>?g~khp7 H>T/e-X q_;%mSu~5)y7OLX%Ƿ4F0vVLM^~(Usx6FqY ?(Vr$QRQD-dW5^"!uxlK^F)#iQLcgbSzlUQ97gukyV$@vm6&A5'&SO?GN9t35to*=.[by2eT(ek|T9wo7xlox^Ӟlk38gvQ%I]Jd#^wnDj5󿙍>8z @3XÙ2.Zy)9(=$6=4Xi:m GvMoP1 -r+; z#̕PAY$9DV+f[YRs;AaX^̞б VB!ODd=:ic:[N9ve2ZNDnOŒzTDJqf)L5FFކ؆h 3EX3KF\V++oQT3)#BMJA+]'pF:1o޽d$f6g{9W!GrpkU%0\ { +y)RKR]W=8#IIM)rgW]/.`$H pZPozV{'.j AO b,B* 6@ק:K_洄b@ qdaRh}5Z?JfXA3XI'Rv*V1W魳a"FBa'2 aXKmPw/C٣Ƹ(@wDʌU'JA DK,\C ^Dxyz`(uzB)^_-W[ YyX'GxR9>S=#RLlI[EYo 4F[~@+\65E%pkUD+ykOQvG")0bKiiFPWih:de `'Q*rmK)TX(Gc \^EO9D hˆ9[g` n4 BVl r{.Am:8CI8rwkQt DR dR*E+9))ag6vN Rrfh@̬ >x{JrN3;91 {h{2u{T^| ΰkgO(7|D iI}IYu,lLKуIAB5귥}PZ3 %}m׻255$Ɣ^>N8ipkh5Ur@g ^TVb xн: jIkZe[0WAٗŵl4=8P>|>"QS58Q/ R/.by<htoRvzCIuooK-k.B:TEXDCVFk@i?U"P ?G`u(x z.USyd]{ `Cjm]B N% 7J h,+%Lp>U*@z5{MKC⺭bu0'ѥKTcULkMCPCP ){XAG>ZGyf]j8@"^ﻰlIs2ͭVhNx#5UQOZƸkc,5e$~**qEz16i,0KPb`&P-'lp 7v€TDgP%nsu3LuSf˻ z R PPFP=TN?H"ӥ`X $3%~ ASa<وeDTM~2UM^<X5%+ s㌌PrN$|3^7AMH hR#~T:c(a()d!7V ϑ@0pB2kbU2ȲCtltM>>M^)n[k[lm{3zyX<I$ cDy*cTɞi7ߗZWLQ#'j>0 HU |j 3U^f@Bͭ{)N38W I0OK4AL\Y>m2M,]k'_&' }L=kA/3] Y]U&M9N{ȴГ*([FGtX"1I*p#Irr"_CM6ܼYTV㆚߶^\&dJ2pN hE"*Zw A7d!FYA6!CmoUCYA=u}]8,P*qB)3hCfjڡR}_]?cM쬹 :ɑNBUmhiS׽Vf=U墸.9m%Ҟ9GʅaHY['`kht P{3[JD&yR{K# 2gBA:")Mo284hX0$J݉o}h2aKׂҳ*X4|ᆦ4>]No׷taiRd%`kcLhi`.#+ WKktJE&B@eCڪN;u2`4͈I,8&A^%09`#C$G,`ʅ&VՊac.>ֳg7XauV*pՃ<χmudZ,\>  åLX't)}4BNu^dzW; GvC=K=GP`'&%DZ2A(A#ɐ9JQ*Ӻ9rEnGOdTc#>օ@6̻?-z1(ei㖑#B#lʖ&4Se1{jlt"7&*\r;%/Ea,"#KW/k%C;7i h R!$TW"S3;UI8T -t 1K9\%:TV7{D6vgj=-=Cgb#HR.BZ՜4rgt*_׫ۯzu|{(ӝopq=y%*]Y[Յvc'+Þ٫˴p4W>6\_.nro_##<-:NexQh_UOWi鈴]/|ovů)npۏ U s~jf|nx3sya8 MBaMa}*=g-6Kz+ӗu݂0i.-|SQLfN Q;~ Y6yHKXx9eIc4:ؠޡ CrlÿvN.k7O$pEn %T;TLAƨdB%fkQȌЩW/ә3NB{)&Ö ͼ OrҒ֧uZ}5_HTu/{}rsdاbuӟH&{p8C#jYv^Z1CAC̖)cDr:GMX+s5M]d<;ę6j :K@ RB\s%V;AhmcTDX$.$%_\w!ERg|&_A\R+)K,,瞫TRC~`~pt8/ur.bY|:Y?y@Qn8}7/pj^>vjq*,şyw4ȟF49t̀W4 mVNbPgK7xrˋlZşWo(=+RJu  (*d **݃%r8t~"[J)Eh( T'_ *DJ2"dm} }ʜ\Ryģ86ܧxZ$|}8O+mz2ݽsx-R}Kcn20٢˻k]~0-7&`-W׭6.ٷݥ@ռ3mbcz]lW6x&,TʸSpzyy d~9U#6Q$R,K1lK H}u)c{1 ĒaBd=3A:*56ҠmH3':Zg{cpU ♰S|m_1j.m4ڈ?^qq (^+|!);.K[4A9bpˣ+HMݓv#gtyԍ?|u]uݾQGuOע Dtj Y0%RwZ 1ԠBoJg ֚mN )Hm-3\TLQ\L 8z؎t" L-t!._M¾xEௗ蜒OUEvs$BΕ|ݖX5MBA(յ׶V"+qZB ;dq-XSсҤxK8rrB}(^JvI\ђQesCٔ4m9[RҊrzM \WCU!^RCImLLcI_{r"-"iHDDv^!skwT+Ev)5E?d۔b͵H% M!WU6&TV,@n R@' ثsPz_094U"cN~V<kvw󽜞F"Oe8= N"ުT.}Z);"loı(=LdNzkiTAOzm/$#p^^B{ D 7L+l0&mk.H}nR%rkR 5ѧBL&HItRL8xV -LR[//f3DZ-a҂Z'%ѩÍo\դbdKQ\[f1ٹnbq)bp Y[ [|Mj%(vl U<laP suV3Im\;<|c+DZ~0X__wS&a.$sk`mm5M$QC `>Fn|ѱID'oE QS! $xbf1i ΀}]~b[ϞFY_4b0ecJw XL`d=]Hx 94+mj{%Y}Pt#> ^ѵ GsD2bT3 Jtf6BW-eOЅ8}(^5Nf 7qZ֠h;N4ȹG0.XXEz ܾ 4ggOE7U}vP[ YT.]H -eٔg*O~#St< =XpRA|9H\rʹP* #^̭U+v^~Q 5'G帛(c@&4%!q9)Hٙ9yN`i H/HK%)ԝwk\ԖQ|6*8IhdQB ~zYZמ`fTUU6Iі)疑F@޲1B/mA[v⎾[UI# ~~Cowu_G^0 g'gY_d1>@1bpM[F4.=$^9ԗ^) Gk OCjg=X3~w Ŝ4==btnS91QMK20t^@e2Hx$ >Â**2$;voaJngsrgw2ezp-Ml1Xo0hr,؜LXgED ;C_^4u;p&{Q\R92-4 R}zޒ>6Hs^&.f'$ZkIQS# KZ@^Vs\&$-g׽"Մ'sJZ0#Dq@NenMZ#IsZ!VL=(%E|9c]- l/TQ9ULi٢41c;NGWXGqq⨍%sHIeR P :>\ʜ@@+>ڹ#TƂ->J?u2 ")R<2m9ό4ŏv݄dh2fHNo YzmL [R\lΐ9>| %9zH8B j-nsV3U|O9XB)L }TI,p`r*-1*k6P]EatQ{R@ݮ RPSgUeMKt&֐RlXaUCMV]\Y&YdЬssVߨ;h;qY:M5[KLnU% rrڰ6*1r6_0>x蕨EK/F7lH@b |8Vֵc?L?n'_?T]mMfK\hFCr]ITSm0ڑ`eྲ*faBC9]支rvh{5<h#[1vrb6Gҍ;B%Ch3H whS<6Pf=gB.]npڿ>SaNNC~ͨ߄ҽ>zF*s>B،78Z6"=w͡%LWhyAXE}oIG=.AV+>@c)H lB=Vߗ-RQˠwCz!B"zRf o-/yW$ )5N, W3<5TmI!`4#9 պJv4F\(n ũZvZԧēԕ^}<=' {o_|p$R#ќ1}J3H7=!ao7dS#n}J_o8PMB*[ ˥w^_Al͏gdRSr^ 83ʿMT2b*YVej+UZVVpu̚ʭa}Y,0Z<D|r釸r9.X{P1.л\{$֫/@A/R/p-Zm8o(6.RNݓN0/7? OEu褳/|O!Wrg5('THh1ֻϷ^|pGғn?z.:.Gh+Xu3\({x|<6ɼmG5Wg*SOϿ \~⩱K6ԏBF$u2q_ -@rv%f3fn+*yPZ]PY{RȾV bOǞN Gܮc^E2ð50qRȠw͸R,ܰ`[ 0o5P^!''+"Rj*-`T-Injz9)‹9\q/:~{{_ܯj'ڴ7~~T1$h3gPYh,jbr5=H0$,34֑vpGb`=t 2ǐHqHl <Ă;r_7]5T%J^RS%K32%fƟ~.&%<M%<p[r:c6 /A# A#|ݿs{@m'UvVtjDo(pgg,LH ^aW; b1ƧCJT1]3N,-}Fm{yn5/!}Px_6-x' hƨ ꬜EQ4FU,$ّM+a]:6)a,l,b[TОkʙlk92ʢ ޑҺ©/wM#Q ÞIsB1ߖf}ۀNhtRcPmIS{)k{Ԗ_,<,ԟ~6oyϧŻSNPUe_Lww'a~r_>vNٟԚ)}eA^Qa5[;;XYΨ?79,oߦ#eM bS#" ,HEV*RT5wXm37j==v5WBrSX=_m$iLXf XY CA- p]:ЇژX՘{sUxED B)9vKa n˱|1/N#٢ד, >I~q*б$)2+WWѡQ2&<M Z:up<AfN})PArlJYݟ +B!_кcz4s׋n]HSb/*ޔ/?~@B,d2(dXjCQ.b RIFۦЯ%֡_[CX@kf%k&_ R*T ik($JΦ=(1{LN+09{KJ8~nu5yQҁy|ldG/'/zMr UӇO6"G|vy„.Pn>\2kj-P.S؃b09U=$K|ܔ$_]Og}G"hW[#0衝y`{)32 baw EmZ_|88B!W )+צE.⭉S'BEth}9Z2:$)8pA|`1܆@{K v5ލ?\@[ZHbCn*ӼJt'Cx·_N@[ij$񅒍c 't *oVɎi+7 uN<`Оu%4K L6S6iؠQL;A9/dS[p辕 9K׌*B4kw4ڝVmy8fI: _=i=/ir.tyzyڼsAWlK_g'瓓ϳlngY>vYvO>Lc.+ɇ+AREňFp0ش'Џ7ڹU+c]ۨN~i[.grXr,/''wDlj6kĒoNfQf>e5 ܔ?/?/ߝY/LOudNPӽs˂~wzیV/~,|TS f2WmywCK>}wsɽ̃^0;_f4?!2jY[´ô]χi]tC# xM<|jasq^K3]iUL29$%DQ_' !FtyMh$v^yؕ{`Lu76)!eˍ}Rc^)MhϿno$@Mb#A8Qͩoi1-wX)m9sx'pZy~oRTlΦ FmrRUlƃvkUΫFLs67/΢S_9c]ِ7m 5yC#!&B ظ6gX Ռ@hy9/ȫ^L>?R:}~F&,::=9@#%h$SF&Q7]QhhadQ("X =U}|gy\lmrG$.#Z -8YsV߮Acg<ؘʎ %`ɂ=XqXqߖн:`v0{ʐ5"&$9Lmlt1d]仈AMi?WMŦwKκOH!]ޙ4F:MWWqRVi|yg qb x! ,UB5e*ٜ]{TIOX'ܷC@aBV#S; {-]Ξ@T-6I*C4>QI/e㺟M-kD=E~rWQ.Lŋ6.e~j@#>kQ5 ԑ)WB{,_*Yl{6|, 7n_KUX}$br6`21;ŗAcigv%kȶZe#k08tSkj\cs* ֜ciB?LP]`:=StþT3w3䔊U}Qd7Fѷn^+Nսv Vvp`Y)THmMKwHJT4L#=7yUW5&"Yޖ\G&i  f1 i d#b;}fj'ܜi_Lʼ U*Hn{"^JgfgC1xe4y2mmr#a]USQ|T=AU&h'xi=4;ta=\_]TN[⃗u2^g̦sNeI9IEBୣQl"<3c"}4k}]f4/G~0B#kT0Zj=*J@.d 5VSrc )pluR ~3K>) QmZ=W\2<)[6Żܣ'>v=R]kWSv׭nms/~Z>\x׆_!k~b/XbFpD"b?@vpx֒=sѬ'|<F]mʿmʓO>GI}d:@"mY5)kT)seWe,$;/ݞ]rb@{3ĢAPUo)P@rMW0boY;,SwE0] ޛ(t+>:uM `=4wTfQP)W'5đll֛cH6cRX*"Jp;oJ1TKXy9SGStGrp'' dOW ./adfUGbUEv1D}ׯkvB0.l! t߃9РL&! 9D6 `cm\qœfиˌphpԘ+Uo 7Aa[`1_p=UHVP+U=0&rZ:`{tXklصگT614)=o1,Cdd[5 w| {:zeObir J^ T0TB22 5wĨ9%g.ZLi8CZCw?2Ȁy uⱷJ|όl'9yYCx'AHj*97PPV9LUբB,FX. rLOׁ]ܳJgK{Aϳ^n Wgzm D#鿒NÜZJ+ݧH06P,E7{~*(^I(`V"3+v#D;(d_הmdu`K1!f+^im=^^Іj,e,+qDSHct7Za4QAFʠ 'cyAzJW ]u)uN艢Alb6aʈ6䬯9{9S'LBS})AO)We) nz:A&2Ix=b!qM_d蜓.jSƄ^zѳ V C Ȣi ȟAc8`pR0@&LPX~hԪ@ƍ!1HxZE\%X )9BrڹZjh͹Bx:{ tw3e^Of~Cf&3Ny ݟ:WFb@T:OGb&k hP*/i$0u&H/>gn|kJj!bsN)d¬sZb=efZ${p@ꪜYF ֚ WlWpW2MHEMo8+E\QVr{E)s(X^ ȸ&N/dБ2|ltKspϞ9` ny#Kș`9{St@-jB61">d\׷骽Y`l΄868b e([} t \Jʡ 04 UMQ4[PC/SO] [?YטÖCIu=VNu!ɀ-bESMǨiTM,|C˪v21A{g~םi#Yޱ Uq!PI *5HH<6z'1iٰ}rpEBn1hL\^fWzy[&zcߏfKݰzxwx1T~]l#E n. Ӟ"{̍YC; LE]=Ώ2;2'o˟UԦ Np2?շkG:!:qM{~%=clB}'yup8P4 {PILaHo֘Lb!,o<-vvn)t9t^uc~wܻD]*);HA5,̊L~(&_(ЅvO(>7s,ò_6l7..&ɵ0_E59?ݮNoKHÿn"U٣,~nރEku:C_ -&0kKi'7qm8y6;7ivo)p9z[-LE34!K(j̢]r5W6 l>sbZ99>ƚ2kB*[q:'u]\[3Ǎ$ѳYFK&^2Ewŧ|OǛY<kڵ̮upLy&utJ1kj;V6SUP(4ULjbĩJv R/wgHy -X[\naLQS`sթ`,Z:ĩ7+pS';!ȆH.K{yPxǖ]>Q<PH'Ē2YABJ*Ti H׋3Rq JVl:41L 5&fúr5h4ab`0e}ɆI2#K.HB-_>A"zo[U}wD0o'"EF._Elaݙ#ȭ/x\tzXfF4&~ ɟ }[*v>QɔXZ+5\Q$ N洍Sa﹧bsO;瞌}S625MBD 3ErujEX3=vdݨV!p͹gѭ6ŻwVtw:.j"Cأ-@|r5XLV]2Z6=0^YX%gSҚ. 2cgWvK66  %Fs;mhܟqeܞFqΆOXnCX4xh?oK+h G-,Z MEOɯ$tt̻{//y &8,zp߷88_{lE羲G&^twU=̞`.{%?w]7;_uO?|8;:?üSo?:GbOO? 7dz7y[14Sf{twXAx'ǂXxu|znxv֗| ݶ/>SVi~>pnC~]&u],o(_ {x>p'{f.a0[\/6 u2N8!*|kev,hpvvC}[*~=Kφ?nhDXOVR"dg7 c >jߌel߶iJ1!.OMqY2ga_ჸD[F$a bDP[PG[OOL!ת=LA[.Y+: fǵ[>?ȜSx+ 1n_}~n1¤q਌]2FoZVv7WZJSs%V`ZPXo,!h-Y\mf~=7_ht=#RP@w_P,}@QB8D9=n@H3 eBΗ9-jn{9u@/H);mrlws3"Vej21fLG&b$Y_S˰ǚ58>#q|UMq5n0 +jóS~CzGЯ.5/+>?8-0DgH/ F@&X ÊZ`vt˲rsڋo=u|0( FF[(5 bb3NL,doz{, u͙fa-C$.ʌ+|߰bmgjSQ4#~| 6mB}\@ɄB&r' .h8$r ȆEãmgw; ]wBpZ} ttwHҢEnQ$r*;JLLWIֈհVN<`t~ d/P\ 1T3!D-=gWP4ە q88k'{2Tubc }O`5 m V@{g?:TR|]Ys9+ žPwg&/nKێ&myH'oȢdQRއÖ@!ח@"1  hL٭+4>Uxݓn; 6q(52E8':?= }?ڧ2l$ 9ia㗗o]9 )"vZhi|a6%!Ml_'7]]oj۾~P/@؟wb|!G0dX&$Г^~-'m]~KKDǐ$("z,^cvECצzwo%XƇ@ ƄoyRlxg9κ;&uhswt.!֋ٲ|˯ށr譌0R/ѿ%}ۮ ɒ^ݳBd88VԝH fdԶJ`-GzOq pѹE>2 ȳ|v&n] På b){Ԑ}.Cs徛1A$|)}QG86y+a\cYCTC W5'4)"7Q`2ϑUJ <Ֆ(BԂA)K/=^$U=k0#dbـt! lR9JL=gRNog2UHzlR%۞@G"",ժJӤfikj!(9;qtz1 ߬U9m+><#Ag<\;}<2ZoCHm]O[MQ,"D/)&KǙJ >8{ -Qgl2_>RfO@ UjT[#:*,1?~ڡv2HM6>]VƧ>~?uPg=tWVfÖ![~=As2Jxo?=j[ul@Ud#UH sA"TG|Őij0^D[̭LFi#7s)G.JiBgZqd';uP^xQh%y0=ב( +#~rYeWX¬Lcկ9E FWff, Z 'Jэ&4Ƿ_]ì˞3|~qMV_v&͇WUbzk2Vb)m2XN=hH!R"gS;콿U>1Gf bweTPA DlPWNԵ߻=8" u%YZP^jsD*Vy5[:%"81 j9GO@.ʁJͮKh>żUҪFZv[($RRFSFSp1 J[D/dٸS0FZ$U"iՐSg@kgқ|&S { BxQ HZejU,)eL`tB+ J)CZ[j&"y1~ǏCzh-ϾϾqt+|޹}J W>^*B!涅[R+y!VUCaⲎzGuYx^ѲjS#\b"Ulcp'o#?q";l `ޔ:MInO}Le>ⴔiMrAK8 $dQ.g\Mgsռ(LDs\Us W՚rtZ#Еpʚ0;xd߷ߧVy5c(e%KZ 5>*yݼS*Ϙ2aw@8K;˴CGXYJ 9Jxi{"XAMM̆a'@M5vLN2jG4G"g>Gi]V'i%KMG!'#m"+5~NZJ 23şT;#8rȁIN'rS1e G rHPY^!蠨u<@uĞLL -(j$h˥щL"rVnzTeʕYs!cx(0>vH>fmfδ[dږ kfD͎R0*e2~>en\ r<|0~ OYwJg7 erp*3֥$'N*Vg`<Տ[)LXnrPnNxQ:l%v\sy ;ƻ ԗ2D;3R=o9lGC߭KFTlLCH+6')9}kQz481/UWa@ڛgmd9LF (J-gW1TG'TS#a"&́! yh+I?|ro !Wm)#Dٰ1`6'h4Ue(5d `ݿ@o#F1PI*OJ[Q^)p_^՟W>l~=z9Z_F~XݶQfж'g" O6+}M`% O T$6];6wpBӇ#d[~`fl'r0E-U=CcHCX]6{r4shA/7[F5Еd^^YjuٽzqƇ68vK(n POP>-Z>8ju̕tsx/43'/IǾ}[',;;L%9)+*'2!+˓C%~I'<'[c8vQL,2F1gռ3Sro?o߾=H+qz_)Uo϶徥Z?;'CoV(Gl0_6^_W3lM'_SHྛ} {gk([nl#6B@y}+GRZl5gJz֠Nw.v;{{\:En͓d ?l=q |=M膶{:CkY$8_c JhtmN.[{q6a;}xl#ol⬉9kV1Zmnzxysd^G/xc_u∃M^4;4=&cz{rwqNѫ,gnFУyŢ e7P!,PdˋmPqJ۽+l8/WP˳S 3?.^R, 0/I.T"x,=۽4B8J/'H)( }RP:[)[f`?;.w 9ꌰ?U(XfOƬBKΨ+{:L os0ʛT Cȱp ēnK){ Zhe3ǞÑnϒ榤"O7# FK>dHdh/j*njbPUute~)*ۙk`Mq5 Kۀd9Nu8UVqTN6U-YԠ I 1S%4S&N?[s.Fխ;"ssū.Tk Bbd.PjS(rVflt*1ECelzZQ)ʰeهPVf2R!QXWS4Zs RDnpTyΞ]^'rs&R45~l )\W}R:rFXrDZR'i]<])e Dy1$j^A/{~x/7s³"/AY-A;h:ZIz܅d+ow7 _/vo_l$\~0B+Goh-M $K>H.XM QE6ŤW '%cdƃ(]s'jv(-S >K z2G/vlw6,j +ꀫ'أCh,7g*Z@kg/cŬ ˆh*ɵz2) KBʞb6PYA!u+|;ˡ@%̡Yx b>Rkȶ)pPAF<oQϧq+{b dh7;/Ki QEMuOȚ )|iCDCHsUpec ~AoAJ7%rQӄUƈN)Mzhrp-gaی)0a65&Ƿ>:Xw*{jAr+5F/}l[ʙ% V2C*V-pq*I+j~p-؋*` =E45jh Lxv:{ `\_~z~Լb;kՅUE݋To]F1^ [ΡjxAU2i#qL БCpH湴{η%WS7d#ɤl})VT´IC꩜^̽+{ $[|ثVJ/}z?k}0)pЩ!#4&ʡoUߺh%YLhQMll11Bkl3[-\(bǭ38|E+aTBT M\_m)o+D@lPe˅zlbŖKۼW_C89 0zؑ ]}BgO_&0fCn%m䖜5✟j7S=OR~^__1 k-`ck%jHjf1qe5Tz$SL$8d!-qd{F(-R[Mh3TVQbiޔ蒀T_#K!~{⟧A)aHL wd,d)=-"Rѵ#}u=^Zz{=)7Q.{zqwnBrTH_pndz*q+..iy½AJvjs,9-t/[ h:|&۞F94T^kN̍+smx<<{x0jspJ8Ui/8)kd?m _ފɭ\e/|;_VG |wWd৓zKzz~Ꚋh10'鲞,kN>_uK͟˜ fѱ<^_.O'3kwվvEF Ur~QK'HR+QNir#̝GwT~u*tN1Zxmz8Ew~ L_lWg+Wvwp$lw>fGgrxr_mĝ)w!@N4|*ϧrVfy!,RlZ\ͭl?R4<3Wꠦ#և?43aëǏŇgJxH_i<ٲxu)0$; p d09H({jDR7umL5]U{﫪w&)8Ei)CKw rcɒiq*Q-U-)܍Q*3$}@OzH h}1 |,em:?Y?oz\ԵI7Be߮ƏeV,W˻Ujk"%+yNQ"_,{Po^WʹdɏE 5O1[&s~^M{>_?r>l"]ӒR#9wK32g-x )YXI+\}w,u%$EJf/7TI $6a~t )e VScSb . C7iMn˜|mi~ԑ'"'#YGz|࿶QIh#%ϑ|pbt,c1+vF6boYwݑgwVoN<6ٱ=xa`wo9 v\"fjmmxȪ֤.2^D{nLY:|{ZK9*gz ^Nj FMB<uhH1vpd}Qز'r{L,Y!3`Cɲ=["3HW)'x4cĥ j"Kj'Z˽M cV mNDg0:,ULI=ue7(FJ,-U9z|cBG^'Cp:}?TK{Z'M{I_/PǩeV:N+ Gſ}WMK:}Ũz}aDj:Hi\yq$?Pm;m~y_M޾~\]K_SV.ѠU5-24 cIӓ{bŤDlEmxw9Xmqx#>e`N[_p1y A}uTՠzB! EVIj]u֟i$&}T-͍.DDi58#~NVW&iMf#|[W[i3}Whlp8 1_ Z+\ʸAojni kyEM E]%'w3i23՟yx6/xCdmz%p+Qr\h zkpP q ts K8ȸ;dC*Mb6E?L)5_mF2?P2ja(ýt}){JQRf8OF+66 #prDR)՘5ini.4$Ke9%AQGCIw)KsRѤňA%g4ǁvc.}p,+kh:Ke5r.)8WN%^˩&XsLs śs_Qyу-!SΌY)dP_UaY:8>#8>L8Q7u8۳m+.i}~2AZ|5@e&_%pFb_FK(%HBf E&h;Rw K/o^_C# ZZF"Wk %k* JK|Fkr ܙi->fQN6Q'hզ@m?'#T6~.b Ř̈ g {7ҵspK]ac&em.U̥:~_b*OJ|qw?$o1do#K"4B&zi;lgr-4GSa uC5|{ f?[˷ir k0Z/[cfh޿YN5߻R~7YZ{KIcJ5v47l}(o/_iXVϛY8-6]d 񒗹otIH"C0b,r ~N/ŷѓwLN& ;6 z[۵gpI .𢌊\N{g-PA톚 2YGHbC2nSP=3K 8>ZNgF2^x mf KzjD̋ґE!v)gGtacn^+c5>XuDdBLy4-wcvبKhH,3C@pI[JgU`xGiK? "2hM^Q %9wAyNYMi,e%mSLU:  dt\J6 Y(&8p]J}|dF䲥e^@_w'jlPC{ҔLK,TAG}NP=u2CأuDK}^9e WgASQsT0֩"8m6$90 qU! ,Q3ڱ(V KD\$.&NK>) ND!Ųe`:wk$}$h=%!Y9{啠:ep^Βs@p4)*$Ah2!z8532C5hNdQIr%JLȔlbZP1g. `tv&q;K ds!h$ K$Oe9:ں۸>W^o]iwyf.>Ŕ?mUbV{t2պB,8kw׬Q[Evoh{z&[JO{~\ľ%?F)q~G%/]M]?CΕگ .siE T< p ǑFqMkO.lULS2!謜JĽS"xI ևȤ ,uc *Y2"}V,#&{ Et*$}U``וygJ5h4w+mว'IF%T BDCrWMq8{RG&۽.e2ā,CGVFok)1PmxfE*S\VQeFW+WՕUrU{L-[p8]pYPH(>F?E22BECZ ¢2Ǣnh3 j$t>/1OQ$GAY俉s;/sӨ{Z#wo53Ά(8;٪S^E",&L7fi1u}&sB-M-uP^fߣ??^FE> 7Y;VV%72Qݟx- KT]mNbQ4fuM˳ 2|pqVdoqu)js1N[UjQybR ޻[x$ ɻ'WOF~\$JLmgj=ggbRQ\!kaWR82vkfHVzN6R*[\Re2F_"4胗kF !H0 q(Rc8R67pU(5$-/760k8+&.!QME`H9R@&UPnHlHTàb49uē0-(>jm>ga+U嬍;۰S}6~W[{(D# #!p6rּ?5hkާGZ9']a3ղ*/\T7* Ek%}7KLrO"_OF&EEeRg}עS{8z*H'w Cv*2pw>!0Y=i?S2ssݵ5y$@'JmR꘥5z9<쐱b dʑAs!9֮lfvDŽk%(WGD eHmYC-i VPҋ& }tpԣ(A Dv7yYFnIj'Fh.J0q7[g /~sl|LXgMZ\.EzN+HOԵ2kSs J`GӑjjVFKgy\cdG~0HSKׂlTռ8\j< L}re6|jzOb쩴1œ5w+4RFOpX^ 0-73o*+~i [}-V_{&ɘ'4Q귩^_h F _zKFХHn:&DǮ+4Aф>VTlY;iF^*Ԝ*Hc:P]#rAd} 9X8;?togezўMIgYA{U2O5;`LS 1V2"'afW{Yelr8 %b+VCtTK~EWg43/z 6c6}{3=ڹ9o(VLC*.F|K|<5zX7{cm3f:i(H:Hr1.qz=s T?F1%fsJSf{C=U,ީc&jG-"b "T/)9'‚\7aMO!6n.λбjMS?+Rd#]Jiò,k :ɸs.]n^'Vm/3 B"x:;*Q0Z58~|Oy>M;ǧGeKc7){:?{$ˋ#7&.ދt:h.H#*gDۢᡝ TA'㍶Aɑ YȓNGMDؖ6Qfl[5.ڙмs߹7v%"kbD0%wc^u Nu {@;`!;V46iLK~aO olR TnQ^{-/$-lXeX[#GV˒ͥɕg),7w~5C}nq;hx/~+ݷ}*~֮'fW+q^ q\bIќkv?szzH_fZB#S9pј.Nգ7YƟ],r{Njo)T h@&LAf(30ϏB/.\뽿֧AY QcmV";p='T?D/QM, #Y`!e!b*04ȫe2QB Hh1Z }/gLJ梺y C)d7K0RfUM$:eԎB2!lصC y5\W ¶ԍjVX{l ޿r"6lZ3# ^QN\l.k{(/ڞc*KS2|џ¨ӳXr˔JK'u^:ҿ?YJbEo Xoyӡ-Omb:}*o,/DkIUON޿S/ߜ.jmFlYv\#rk-Ϫ_}#9G~Z9_xgrztaүuCcmXP7fx/ WI< ÞDX6 `~ o^_mܭpx1sB_6$p>}X d;>P,sH=+\k߇!C]57Cgְ$MW?+<|t5[#?k)Ul6\%U|hi o&C7]vqythI'GyΰZ4Re9[FϺ>%ۻivSJ%ld?<-]]\-Ft}jUm}M η6djwvf/DЪ;zi51gV!ox!Q=+g Xo7FCa}{#C4XeNgh]ܺ:ZYh\G:mp~L4<}pF{j c(k6FaҜ_o-{ޱb}^w="5d1Z0,e-o򺕟lYTޏӚ桡 Ϳq߶ݶ ӭ?ˊ./DikUe2)@gކ DRڨ1%]AU//w~ŪIbFҾ[=+~xLh _}\ޯAC5T+"ɆZRU$- C, RZe$nQズV& *EI6-G`PsQ%ͲSvtVR*iڀΠ2~xS`j}H֩ (S.c(AcD߻b=g@|ǼT&fdYA %nEG>? [0_`ݭkl3e⯮@V]AE!e Q禦ZR*j%9 {Mu29ǣ]* f;Z5S6[\XM>tG`|x_bj6! 6C&T ,&ܡJqRGtMɶA9kDPkjܦzý"vMNF @VZh.x|wSGPR.b˝*ҘU+2Δ#Kj]U.-=MH2FA˶Mc -6x1fufk#T-*;ya 4)Uf <UQ1nDd$ؾ9|1ցgb*"G[R'=ے)BPNIjjV 6%X'p;6lk%jJrŬ ##/%: 6ez*5ZxC⤖!iċJ6ik5E6 l$t$JaS( p'N^Exc:'ݔ!4I"$ 5W5Fʦ.j#mP0<U"MX 53]AH./lYmX(Fw3 slA0WYIȶ\S6rT9qT7hv:ԲUUFʫkǽkhcMIzRrdDBU2Iv;xhL*tbu6ojhc,55 g_`t>beށ3NC+2f.laڌ5meb85`̒|v'r4 T2A1f.cn,ҰuK ٻ嶎,+Y֫uLMxQ- Tec}I HD>pH6[uUyA~6fZ@5:EZ޵t h .E9 ^FkN HjPMU2YV ,&O C(: k@LOΈFJm kDI 3`) Nx hpN@р˨匫k3zysAڟWEw_tv͚j.8T*+ZUUԱHTthp c;*mCb@S& LoCyr[r(aK0g 39{H8d=hʘ"jH+fZ 8;, h4h"V$.Ne>΅N;H%y^}M𤄨X XLn I9h5ݠq]$:`^wcIuy/qC]UCD-U ,歀|C(mucѮ ^FVuN4LQHk"fTj"r `Z]n  4It"|<Lv,TĽĪhĸ$dD0˲eZOQA= 2XM~tʣƄCh͚Ah# @^ψ6"f1|w76cAPOFe/䖌A+ &z7Xl=b&ʱB An!s9vWwB0{| sKЅػG-1xRQ2$SوV_Ir挮 0Ĭc'#l^%!@ IDh6ʺ8 mm*x3vCY JTZQ hX0yޯ5Ѐ)e-uJ#ņnH(@EQuAVS Υ 5]CWYE 8 `JXz`pђDjeQtQ@Gm`AZt,p!^D;g=p.0TP 2]j1?Xɵjh50UA״[bri0\I" lh UX8v ?:nPZm.PLET`mLv+;H `B (.@C$ ݈x `h *w &F)jJ  bc-K-]cAQeX.1d[s!EcQj(ɂ[F?}>fc +W Lζ/BAV&0U+lM8<| JF/D ^?A)@'` ~/B}\ E DDʥkmYr)\4ZnZ"-o+Y+]VT)£% +Ș@]bd~Sc3SM.fW'eGD6DހW!cydUS dwgAvqiyoBX#ݛ=hkB ކb(|u"[BB'>14QDcML=@"M+ Wg NXyPsOAMr,Y-o({y+0Y߂3ݚ 7lŪCw:$H JvEvT›H.(7a4.O!E=zժc|RU"8/tI^V+1RjC cI# a1V!~B lUleDo3|'!Il^aRbO~F7O \ryvHA$ޭY.N/~&rRܤ˄roH4Wd*uAsإSFVJxr.\sŒl i5y 簘Xs*[l8nH"q$?/׿o!RfNڻlִZ'A+kYG0·q8cl鏴8"M78=!D Hѻc5\t 5ؒiaJG.[+c遘?,b*矾Jv fGal~d]Y6 y}^f p#------------------------^MJ9Dn `ppb&Cp(AhᦜrijSݯ}|)t}mO>P}4_R }gH= UdkSšu͍}ߞO0K:/O>/VzoOwuԳj?l~ ÷:}6Eq##0F{N~oF/>m\_޷T?UdD-N.}$\_J6''?I^UEl]uaqe1æ۠xU?%_'/|6:*\]>?uqg!ʠ`G/_] ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ ؿ% F1@_tzKT >Lg]ob"hˮL~ L`a(w} XM.IOW0NM1z F=[~ P#6յgTp)W;!%x!_gKKKKKKKKKKKKKKKKKKKKKKKK×+| a9_ أ%h9x_2(%;KeN5>+ҤuFO9N`e5~ϩq0.)Ȏ׹=1X8a=zissmK56V=DP5V@g:+Y tV@g:+Y tV@g:+Y tV@g:+Y tV@g:+Y tV@g:+Y 6G_4o~];;҆k9/KkJ@ҥNJ@}f'_B(g ?xQ*Ot5މ'4 }zצ7}[Af:橎A]:⠆եCԥJ7<㺖t^җQ*X#C?e1,-݆ɝEG)J7hj"7duE*e׌W+Ƽ{>$y빿,?(cɞov}t*C7_%D雤brrF#rWI +Ѝ}5 M1=,pA ]zU9m /}{-xbs,wGdLD˦T"zH5TF$+%0Q[*M `( `6ǃ]_nT[P6 ۩w3~{!fS6n<9ojSdKZyRz NS%}0x{Mx7F<1l*P˛J{l*m$p'[Bv/=HIhC;S74bHDbML&j5x%ֲ21xP͛jxƀ91Ҁ@^= gq8 Dw `ln2*^UD*>oRnl*bF _s])'&㫛Kq1ڂFV.9R?*v6O~\id9#cY+e[ݷԪEWLQm5tDqgÆ֒w07yၳ.~feUM}($ecӦro2'm]9T?ul` E^h- | PY?{:(7'kTAl" ؝b3}s$C5J55c07Fv'!BR&QMԴ1Y92ek` 92ˊa̬fpVݳkQk"V?,1RedBK{!:(Ь*@B%OEj0„“MeP;+ `wpʡdưxnfqv EܵU}842t)i$DF_j&ߏ3t}hftiuNLJ.mp]3dي>F䲑&4XGay<3V`\F)r(1,>qpn'FNY5r+MS6i]@΃y L-B,Zy}ž H1j8Ɨ +!qHlcX<+C7' \Gc"^>)Wz<i@9ΐ!㹙g{վjS >z(-aqT]\tg =%Em΄'rܛy%WB=aD6lHIZ'bdd=5gUUkNVѩ` j6p`p(CZ:H |W tC)B HPoPâ3(ݔ{ӓIN8l2%aq.ыN^+kM^)KF/0n~.jonmh/&8}SACnr \MI8{e2ÇWvP>LmQ?../^zࣀ'ޯdځT~l/v+~kL?{WV׹s)n0Fc4h\px%;˯v9-GV9 HG!꫅U=VYvg-3!?f[Wj)3(`bP 1,TV~g-F s7=TSwj9~&.aۭCbqJ"˴2|3Xx`o倻$;c;;.;,(8}ښ=wOn'm%,[yv{a 2aC xIح(Ëe'?i凷))(8p HB Lnt6wMn#T;,`&l!d\2!Bˬ(}2.&dEۋ,d҄ae 'YyU6 Q0]w%59 `I_mHZ-ZG\'#p$wJR'|wz,v}"^gJ+m$^⽍|@[8~:mҞF^.H'`, ,2:T w8ÄG7,6SAU@EIY6Q4=A|:Dr _5(+h8X. ,oD\jL$)Kjﰧ\!}IAOdהKÊץ\)\,*d-}Wdp9\bĐ\[eŐSPl2K&6d{m%1mw(E١'Ø*K{z jU]%/F[N a´\>^ۋ'H0'`hw:C;Uk17ak0~Xz=+z~S լAʇEp)x[¼i`ߎ2!S:W$F#zp2܉솚+m xޏDu/T|]+qèFd$~,m@s9;3X%l=jA=b]ai`>&z4 50qQ|YI P E!` {S <3g)X-"dE\DZeRlgmJ;YΚ.=,z=ZT8uwsq}<_^Hg*dW;.H - Q2hh B`x}:ПjZ;w7c )v)r\*ՒxDЊFJs%ѦuYez3}3N|SʹNe4Rᛵ[r6ֆR/>^\wRUZ Q (ɐq'k񱍰:G{"ȣ<ȣٸ lVAJ"ܧmvs똬< Lx\:svI=m>m6Bay( B@TV{R:t@cKSc JrKV[7oh@Jf[_jFQh^4748Z[G1 -6q\&LJ do1,S< Eb'T&P7vx3qVP@"h_. ӋglP324AGdڛhzZZX(5<]$?کtzNӑ_A=9 *cDd=i7!=e!4֩ 9٠L11JJjAsPv&D1= ]qC߇<ԟ믿c6q>}w?P &n dY}"u}g}?W\櫴JM[hkYupiOK}LnZp` ?Wl_>͖o[͗2_j-(+-?[bԇn~bc f=O M mAaw^on%Ͱ:0ߥwF'Mf 韯ק'ENs3"D6WErNԎs1^ë:sѦn.8[7"ċS.32?Lh-6VQE[c p]| MNy ūt]%zJG4 pz|h Po? AwUdNp$~*fѧ=9:dF'aUI;:ذ32}ЩZ(CȔ,m9؞C ; HڤAI{/[dI ={Og$4:-w]ˮ{}ܥqgd%w{$wjϏI;Z{nni,hJ.xOwM *T?$Ed,1`"}{Ш{"?^0rR> ]V J!Ɣi Zٷ#Bdm4{r=tꨣAҊ}W[CJ\1<* MŰ3rɐ܋dxov BR A܌0qosXl hH!܈˫h|lܢc4A$vKf":r4'&0DQL&0S&btl@ehmAKd Lk%RYҁ`3դ-smLFoJ׀ǐR@o͖"fu0\ϯ 8<8_DM>r4(T^IFϖ@E5q{S<آzXL| qR}2VUҴeZRA/k852d(kEr2&>gcP蔅RS$ٺzHp4&~F5mZR5|᳊Mי6mWWmx ˡ F .mоYo5ҧ.Pǫm {6@o +4uzzVI_q4qR&iҪ lr%"Fh9H-j倦Fקt%cȜXP>FRDv rmgs&A~{z~-57A;vSqNCyŦU̶\dLAsQS*p(M]rJVN]WXȵ`;XI̲c,M/W&s5ޥ\LIy𬱙gb= 0x{vOxov4[EY#4PP'J9F 7<_yDt8Nס\^j.4Ba.p@Z7)H􉔰ѹ R&UhI*#{v+{w+7.uߚX{橛xM5د/I񂨀) xmx+lQ^ y%{_=卿Xa)YjJ$1҃yʹ$.N1U* "& Ĺo,$2l=훒 qX=aj [F|^sA,3@/83hOC`Fx{}p3[qُuĪߛ`G*m/@h0E|5֠ie "D}˔OdcJ xf9ZW][oǒ+ շcws6@1K 諩X"eodIYZ ,ܫ.U_I HRJ+Rtr) ꮀ*`L( JV mZBZ&bPDfP%7.jaR1;dq1?XC\<=7ȉ"K4fnHPBAUF{|dtnN`ĶU-\bAhRPL }\RmYB(]eAy~ۂm#* &jT3Nd%| N8uԝpQ-a8?Ӫ']n;> D:b- 醿9P53RWZFǮ?|HȵD޵BG H`|NN; kf |y:#GLXY8M3!^In6] X\UMBm}Neh.hCVl(БiU㐩 *MGX;d?QiyL 1r.D44F#IZ~*{DBVYON_VQƴ~5ݥˏWO&5SFɿcRT%F3ԯN3b' N+)u B.R--΅x<Dj ,kePfa6׹E[%@9DOdTc#>Z^~f Yme\kjH9i Hd\@&e wa~<`%:b oJˑ(8lڝE&'3 cpR%1l O){]vHNQ|.(d(Z#%*\~S +;fg0$9 T5@&%ZGIw*Bj g`eONJ:b8m"TPHDMs~ZUJ~Nw`Cz91 񵤒Pa*֪wU$%P֋c ^s&rAZƨ&%M5&0H$.80ʎe|fhNq`HR"R613 jʶP|B/_9ɐ p҉b؂ : G!ϰlr4uȂJ|p8 ٨yg2yg~r #duut8J'L e7w]F2Y,Vy$8qsR?q; O;YL5PiE3\K'؏g|}A?9JϭSj:tPg:Vgԗwhhgq 6绉dm"q9KVÂjA)M*Ո.f0)=7KwG[;~8+fZxYÎWn>q6;;ٟr>;GpJ磢KVo]/ۗ?kT g@ ,g0bLDӷGz|'o/Ƣn}mU>WUh%}ގp4s`>f(>~l@z&^_w#u+Z4-/ F~:ryIzC!F!i#&Ӯc9_mE/5컋`5dY`w? V}hPt`cza:j|='%{^ٽ{O}~y=zĵU^4)_/eCaiu_ğLJuz1KLx gh BS^5O, ;9,3u=NȽvG}]w3qPSwj7rv8^6r(0b]DҪ@AUkZJ״`; &">:B<ճ@M,A?A~^UYWRHV3Z+ɟWWyz%LeC*6lK> /w' `s* {T0Pf5V>RC&XSOs9+?}WK pVN;V|gTy.M{ѵ %1^ܽFqƲ$j=AAVE0n/`G"E}iKج ,||۴ }iv7_k$ C$IYڜ\b-⪠q[ O7>t;;E(\rOˆE,rdTy|?Џ.3*ߏ̓q}]ztkƩp( Z@J5x;t~=R{Z Ğ< `K$PN^\*햄[K$yJ\LM]n%Z d2$RpG,DXedD66U |bF'܋]ٷ8YXi">zhxyG0avwUbpwvjxjp! t|)[S򂲚:9VZ>*h{ur6eUh) F#B23ך֠=?T-o6bȧ ދ }v$Z ˀ86EbKI ^QWk8 CKgܮA ڇ -.vRvJEYR$08IP,Q9ZJ·3)VKxf8gx/L,Hj5B< NnVzXrϷUR jwF B1Y" ML)]VP U#:re|q&,gxωZpZJXPzc=k$Wϳ_?.A?adn,9P53R ojjprpFt=̬ʽ0LaP[HLɁPK*""`5Pj{.z8@9 b#km]u5rTuvX p&tmJr>PXC_\g.l*_Jdȍ&;KiVu=Ak< J{i \N|rezYrZ25E2z٦O8H4͉fY]54ًinw}k+!+'%s;2~ s{ˮPk~ZѶ1ɝ!FT ~J0w# =5 $DJ椫$A ٬)NQ_+nwtO;e)ǴN&-Ss@gnIKB@ Edv8 (ɽqQ xͨx=@P:R If dpcUj $4TQQ:Dtc VJ.U A>@ԅkfgM`H4NYGHAǀ[Ma;S3s*E2XIZbtΔV6EՏx>Iqt𳑹x&:\/^bzɶFFnǸj6sP!5G_yYSz4n&ѸUb2zu2»r>iI:Emy\fa-/j}׳8[of7r|70Rl~Y$/{C͹Λ3o\?S7El0㷇⑮Og=˹|1m8~!L_tkO=㓱9\,<,uţZ]?xqrs,Kr9 y"Wk K 62ϝY|߭?g}||j3FS<cv9„nvk^7q,$6O3$MG $_a?8ȍ{N=%own:v_ηZj )i)M8:6_Q DS[o;^(YWxOVBX79w([Y2Gٜ-?;S;YFţY)wrRU1٨%S(//֟<5h)׏8bq6?<8`\Z-E ./U)Z̻OCZ P{=4gH)[;|L%!Cfn+~¯q|YWo(=_+cY'hWǭ7';p12'Hj(08%AeLGŐm_tˈ9Hl;܉ZwPMhEubѳޯ6 9E 9D2ivT-oJZhO]PgU=,xƠT`Hu`C_snwݕ.P3{=o偛d{M XZt0_` .d1 (5&txү5<|oy7׆;޶#3ԅdˢ=ӶGv˒d[v$K,K}b.G E>-7/5eD=xv(ؾְI66Vǀ:c{eMf2|Id  lRBh@d"[ y{ɪvD8'V&g_&!#~l]`_d`*7 D{vb~ȶp4zC4Fڄ}`W&g(&[6d-E c9cmm[2~[r{w_Ĝi}s>?Kg'%g ~<é_&&iﺉyד ^O_.?Mb"L+-' k褝?NC{b;j~,kK~:Y$]\Տ'a8cȵvE!2uu%HV:\Lf5LSp( wkgkNnLI: v fݴbڈ|R.coa'ŽZYqN[d_}iYigWi `}5 eň]]>U 蟥@ ߣyQRR4%Z)h3[hZ~lЍ&xALr§+d q"ZG܄j;j6R:S0~P)16Mq/T$Sv|CvňԂ.FCM68\ءEd5dJ -w՝`rFCRZN/hs"~(7GY>6Sr*Q$sm>;#nH{"]ۖ>c:#ÅP&dDk[a2 a4sK\u9eKDY/zg` 5̞Pa(Iye#,&u zG{+IתW =ӯٌ!HkaVV/kI`D AL5shU g!D9}m㓉9p%|O=riPӝ`[cn[<{׽9|g 6_ز-T+eK7v9}g-R{/ohWy_L`#5q;SbQNĞ&@5UmfQOCEQN| 8ߔ#k4cSls ^*t껗k:WuI pϾ̾r%93W -FGFl_tYooH-mP_ sji0pQ̶x@$&a_m)Yy'|+nPaJTȰ*^as_:9BGTvF~ycch^ڵ$Rv3mj=~ v@"*ԵIDI!_SHaRP0PF.0qlf ia!r^(sg :fR_^zOp4]ϭ!} |$<f-zphyE&b dm-/)f]-.]ۢK8r2ÎG- >VX5DF.MU/(<%8vĎ>:m* 41l5GŏR,e\p{t[! tq8y咖B 0R{YrZˆ-VC:0bɶTVWYiئjD ;(xd>ܮ])oٷQ`MUP; RB56==nKvs,add=j[~J;3Ş |ٝHަ)SY+O9Zu[2Q\)% 0)ŧr -@#hur$P.0jCP =$Dri!~˹CLo XPdD-֊ذ{iF(vU5fnP&gWibkcW^$Rq$ t<ڻ=}+j9;Icmy\Vߓ<蔫]|˕5RP[,q-^JAQ3c1HKÉ!X醜[ `k&+ͻ cgt3GךŜm[jڸm rv|s3̌\'nqDArBTE.! ct!)!|=;`1ǰc zDdz3[qw`tŶ}x3矉FGL}7|ŌqEx\Z|`{FfGXP&)((M\X{=G/!"j6)VNYTr., Nǭ-<X'_ggH l4ٰP%o<[hв9В |rcqmܠvٴ>Z$m  <}mcTv΢c*M`)`)ZDk5ٺaIhM!Sk"l"Z]ujAD-mim؁iۣh7r3S#vq4~IfBz/E^ | ~>! ZY3M$0KH1+GJݷ7SXgsFqx;HyrOw!enQ_ Q&59y#)`j*hK5h4R6F֍K[7*9H)8 Y#|)ـMbsb[uC\1as_:9BX@!RT %d ֜]{9\fKThldl ݪzl zhNJL.)"9>_>Ep6-N擆ʮB,6@޺F~\ mEY@M- t l`[5S0Qa ={4k$En4Cn Co ֜ n竁C|K))cwh F9蝎-iPkDm3U#B6y-u&Tl@MFLY侒yÔl24Xs*K,fUɕ\Lv5"WQ;_2jm⾸5ڜk#d;.jRs5XEgf6g{F H-a5·Jb6 C%¨15{9JFe%.:N]^L^D@mPe˅~ZtbŖcw{L/وWs #n`0o`o]gNp< pe J-'VEMUZ+en9/i=/gW{ Cxu.*p4peSMT9T\5\BGw5g>ԑ\-]-=h-y_u"-YX4Rm]mo#9r+Cn|)9 .YpH,[c@3<Wlɒ-Kl/eV$Ti XH"I$UsؾU:`%"?Gd횔!$L(. hLcÝz/w_,):Q}#^nmr! B]$}*V) kޮ#Ҥ)ґ: |js&=J)Zϩ I+p'䥲7x)sxև,ʘ-mvna?/6|ܱJ6O6x"`V hJ)vzt0厽_F?-NkV?_mWQ9UkSh(7+Y{vr:0:~烔^J<+fOq??fԎtF98t\&b ޵/nJyD[6U|t^IL\o_T Os27h*E@Ϛ_fV{\{@=_Mw7&Ny:q0tHf1#U5nهGX⁻n"S&4 HԾZawNAvAމ);3^tNv8.ǂKL>jQ:PSGy]B.^7:s~S7P<=qܨbw9G;:|ۻmpGtM14wGeLX{sѥM"&.{WH̜N)L>_?2=>W|&3W^Nvϛ+dMV?^Xn˻Aw?7|7NƜi3T7-+kЙyoN7ͯd-Y5d\B߅hkWK)[MK H2JEE&Θ/ 2Т>κ0RAd[op.X ^&g)LwmHڗQ6x(HL{G+r=dZt#7~.]- qҮ':;=<҇ڥOibScׂuBMӠz# \z=T5(K $xm^KQ @H[A8( _ٳ)y6/Y<!,r¯xLwAxpl=1F8m]1ᣉddnte3mH mU8^GM^Eco|6< 'Ŭ,o4֩Jҥd4 L*rfZ4)q}O&1i#RVz_7yyy.6#gi:?v߄ Mws?O; ߻5pu+Cts=^OYxso:a|]Au<(蜦 4:a67-͸pS3D;])ם׸/F:ƪ~antynDl27Ћof0G)nf=FޥLHze ] S"iU!)IU_Z(2ئ)Wt+*6UMU\Rӱ,M5@)|o{čɌ{l{MSxjS'ުZqpUWd,_q{*_(_JrvO=RI)ΦE'A9,IawcT !F 2羿7KRjiɡH9*,39 ޾QEǛh)g<2FkҪ'a_\ᥳw lh \bP" PdEAi-|Up"n="sH@̠+̛_,q__xU45KB<;@/0Mr'˯sgJa" F ivISK 7F ^qGbN ydw~9x&an}21ON>juo d2 jgxm,%XrZ bAl<o\gnLª0ٯY|^9?;VGbM2@ QbѢx5$V-Ms,͞PK]@ː^JVԌTV*MJH{'D$Bk:D d(z~\)t` \6Jk-)X,JR(WC}R^;%qoi$ElO'/_ :AEp6TS!A'k$ɱ˴b": IUWLJӗ{u:UL'bDH)dC3`R@d1q^r ^eό 2\yY1ɳN)͖P)L  # -##i#)lZ8*9@2A,5#)ۄFA h<9;/ Cb{ L J!pQ_LdDQ6;L̔ɒ<C`.g湗{x6dNvdlQ$U (72gH.x&*;0XӦ->Ɛ#x ê&hO T ey,KыOM[žԛ vEh OyZQ't2>+^fFi۲tUam7YIySJf SX)y {JjCwwcA9bg$@6-LQukFY@a +>\:^T{ Z-YOo:AhA Q, 0FkI H!Z'HC|BmݼWjّ2`ZY{χ$ɴ<C]~{'|6';ZGpF+eO-[_j0'7R?ٮQ30Ym] g]z FM) I[b?'k0fVۀ[#za3`CŐ@KJ- 4-zvC{r= RA 鞉Ym 1{ e{ִF֝aԑ$j p"ՆRC I {x[97M?z FB(($ l@V51DPEGs 7Vsto̬uZ5Pg.ި'.k;ew}98<LFR* Z(?:Gv* V$%}Ԭ?Ң*#D̠lLI\@{%ƈ8%EҔHx;"sd|jJmѣ vS=H*H>G |_Od/C )EhN#^^kᎲݲ@y wSU.-KZfdr`z`h.YL+ JKmFn!&Yqyk*ELYAsUg 5_ Yj4D_) ϛ6bh550 WNdY33IecFP"kin,IU|2uJUfA`n#=7)K=h۲sȃ 7K-S|4P3^ͿtOP|kǹLZ] 2'O77Lmq}zW6Ϥ2 'KS2}?Ol1ky(K";oltɚDZ.ħ`둩wSW{<87q*K ߈#.B;q&hj%mjha'}mW/iB|k)-s@|M{S sEJ_|v|c}ݰV_So/5 ]0 L~4`n=[O6Z2޳9ܞtՃ_fO/6zK`+c K(g!"d;E:L&^E*Q$Y]Sn}HD 11;N19b"CF#?a{\|~0yz'9E25m4P ɚP飓hIlr N.:(±Θdz̟^.&!Վ4X;}5EYG Ss`KMOyS":>jL?阯Y }oU'5A; Aqa$)m)"$žޞj<<gs}q뛹63-|z5@Ն`#Ũ}+0R- ޵%͢FQ[m>!_fܼ xHqAGF䩳r*FTO*8C+ ʔNlMs3T9y`;W)eX<JΜCE b%פ ^z&l,`pk]kw! YL#@ѤR39 Fk5iBL}0__<ϧOm YndT RVtiBNU/,Ⱥ >FOj]HmYs 4z8;f̌**6иc$hD1NhDW8Zj6AacW]V`US)GTKMꘐ;j"_1RXm5͉Xslqy[2F/kq|n7o֏s$R6Wau?&-Y| koѨJ렪`C)$HYGx .,{L fmml~ܡx+43~6!}66|6җu=񞗲:["G#Z{eK A<_,R+:9RgǗ_NW{6dtQ(qaK(Y霽İlƝmYҲh!6ip/nܻMB3Og9rdj ml;wfV'l.-L 1!| d,1'WS ]4΄jmߞ1an'M2݈L,Eb1.P,8\(TkU,1}-)_nLͱԂ.g'/m[41oN;FI̮bo@B V aO-6X&l1ofyh5mꍝƅ"сC'{+&ARMFȰy^GaZ1L2^kE\ZKӹjBR,z+g;]"6ZԺD̆jlN'@VÎ\E$hHt =#rO$|;kJpf jF~ rY1vT!{27lF 5cjl͑V/QՌYak)8l5S=Tˑ瘳%  >b<:Q"㵺FZC7lNtY瀭GDB)(\s̅bJ4O▧1"YUs'!@%Cɶ$Oy$[skq(BG uC'!caR* zmXg} FWRI*L =$XPq5d31J:^afyyy\R9*:Ǔԓ1T=:H(bV%:\m(dYۃ ]} 6J1ދ9 i=Xֻ2b?\hMԐ)=I,y)mvI&v p@̐gv0q*?9tvqA a|y-ٞ~TϷTB:?B01jT2)pͨFRg]*qci:H&QӇ\QKY8(3|ƥ^un}=7"@b 9Cbu&ڦ֓PQ衔%F_%1O^~ |{K;n3dHʘk_>9"ybF5bВ!u4HHM{YdȳLCB[<-wo;@R5#-!)cxިt?*NqM1^cۢS&!RHr98\NX “+\R\V˚[=g( r/DɑvBx, 3.Y<&p7 Yfб%!qBGS@R9G#[U'U+jFG,1u13 dËs.3TYs+g{y:QCr x6t" mg)k4C,𳨽QԞ0w;'voƫ^f%,Q|A<ԽT z5uL\jMVk2>PB:#䏁 b7/cPhZ(# Ȅz3N;ygzH_!:<"0`=/Fcihiʖ(Y$FuPJRj prUȸ>kl(RF =~W#y sjFB([ g_(n."َ8l͊G)G8#!Qpuh /AR[hpVF.rpc .6I " 3 ;pA)T"sz#o)X씤KTJ,(tΨ de]֮X!k+d9)q/a[qm#wLn;k1 aQ'G$}wujw)<I hyBp1j61cVz,P.Iz EWJڹkTѐ,1J֢j6E˥x G{.{{77=`i)/S i3_ȳr*w<},&BM.p V lv+*YF1!x1–X+ok^N CYyIdjYK h }@E#E}FL˶NlD7/n^%tc9p`m}rږFZj8?Z%kk|NA ONO[OG Otг2 ~uz`tbƿIx=24?~Ґ+(ovB$% LSj(mq''_tȁn=H pf] _ąZb$ SU$2`wXaJkd^|>LkTxQ .׋.,* 5$g`NPHdUNU1x#i̥(<=aK?~B^ERf!ʆFE)i'[&=TBo[o ƊuWNY(go-g5~ 3^I{:ĸ~׻/:o]Dn\1\AμBF%ƙog#D*bbu.J((FE`SB`ӒR90&9Zg׽}CI#l~!l 5^y @"G9|drJKxilV"j=$SԒ4(we22_ siP񾩯Z +S 5HA:SHTUU*r*vF%9I{{$/3wZk~pg. E/{8,1ίK>^e=&@dfl6_)7d9ǤZM2t9FOKÆ} ҟ'g_u)^ &kO;ɱ9VAjm.iF[Z9d~"{udk >$=Wn(8J`L̮rO|qbbe.O]F6=G## We ]rcrIz)s>jڿkj+GQ~y4I* {J.)SOg?74}~7}4XM|طk?i'2Os9Jn9NMS`^LaQh: z#P-.GWPHY%8;.!5VB+s'E҇o}N1pdj酊Y6e֏^W\ަ=}''Үy#^Y4 BaFEvj_ٽ;U(+ lfμl,A]:ł]i량N mf͵&tڥݨ}O:5)8 0{ Tbl4a1KFV,r3 9z 0IkvtU])K{D㮧rȓmՀ-. F=vvseڣW9+b̡YA~F].6:gႎqCd1%v.2AZGq_su&/"yrScQ2EeC5M3G3-cKf((S,;U)yJ`ߏ bκkD d.~vU #WLJ:U3Tⵕc$I'-enͳ2c %7}[ݑ۝;;O!} f%j~qkZ,9[>P)墌c,Z﬌dnȬUa]9@6%i ,{jexͲ!Eo+܃+,sN).{O:ȥVQ/ZkrTl-cCZ ];t*RHl0]Y47w!ϣyj[:E}F1Aǵ>)zxzIWvkQ*THl#CV':d2/=aP$UzvvMl(z:<5a)6q֒0uEV*Ʋ@")YibxASQ.;d7gx |Ud>AkShg$}uV}fU157/h2h2}2]Nd@aNJA+K\Ig\@Op#j")Y[36: :|ӀV9땳X0!xuڱ[Ѿ{Sݕre"ӊCFbЕ=JkeR]Lvnd V*Z Bb!)z7f݁~a!28o TY5F(%Q 5K{h'q̰5VKȍAF(jDFH}ׄ@yaX 07.yxdSH?Ekzt8[3k%Z8vdD紸gdxNO6O'Zyʄ?zpo~xp1]?Nry?!!V' Ns#C20],N}r_;~4̡Y LSj欭ɗ?>-h1>YMp e%ucVw|8XX!a(c<(ZKkcB1,[T"ɃA>L}8X}]+>ڡnP,uHQZ'н{X:89@ң k"\;p Tdqt)!3I '+==/pNamlmpuc`zq]{oIr*b~?Ip[ H[l ~ZE!%!KUuu`ftvzr)'>:vQrfDY2Ъ4tp7]>H6)l*k6*^rk·QBC?Ռhlh32Aej&b;[^3zv5B3Q5΃Y{)%v@$gQvS4ݹk*Ԭ˔}09w)>=|qB_% ڬH:8z01V z4΅^yKøra < My~jP+ņBf\$Rw^IbK hoip `F#ce[٬lQu/$`:DҋV0ډC:s،wۑSf~+7«E:<)!,(l [0=/݌OaZ:>=Mh0v'G[{NjODDAk,,Z!Meo‚#*0)a&Zdy*ԴMbR[iğn6.c;n!45pSReZQ[vi~S#]L_7zrqFm :2]5c\s˽ʅw/r5*__݋>mVa_r6-tua3ῒ'Per S%>0,sɢV)w~vKU W<2^@'XmVü+!ap32E/mf]Jy7᫽I[+ `2kB"(-".'3}֟ȭ$EDoՎ)إj%p_zw%SRT$n7(RmOs?j5sәp<.vA"̓P`q&@VFrR`ᙗ(XRRaݔ$.vAu)cd^<;N0,: CzAF׹qG"\+)a{PSazѳ]s)N@ GR)8xL&~\JB*qAB:rG%6d܆m%1/y\u8J)}iť`wgpi+?b, 3'w,XRJR'W)RNi 8tm~*\sq2Ap" |R ^0RauNxΣJKchnbZ :3,r1–$ /c-yt3ˮѾUmF@%$6!TGN"AkR,1o Ut6"7:7Q È w>8m*0M '83lĽe-naջ:VmsջJ AS@" {C tr Z:$/M~sų#MDB/zp (0J9CڕRIulNkт4Q4`6}rmR&2$+aCތ¼3qZa$+AdM#C08p;0!hBOe֧#efxA3RDצdB?tVg PX"K%6t8h,ncc%ٞ|h}.>*~6.Z/L .V-#C%r!-8MST Mʢ jq1 FqgȬ،V S iV2/?vڏC%8!7yΜ(rjEUq6;hܖ(;t}s?|^ Fy~ǟ6+K XB2Gќ|$S̲j\L1Z:g* VsQӄ'G5oԛY=7Gs9j=n8w|9*/oOe/{gM/UyGEZͿ+WP)r\ ,0q~庥?qaOHd;{}XgPqK^P}~Av8#uT YǠ_͓t;sV m9=QY9lb՘!BO3>35>D| ܼinzU|gܐ[L0o*/,fdʃQiƖK/eGDf'^Ec5:b(ٲn+{W$A+E'cy¿ G8uG%1ʜJ wDFt2E,cPg"!`ʐY+w'>%b `B}1:tz2DaN[j`?i!,11A)v TzUS2f]BT`4(}#%0e͑s`|"Cfn(N`tly^ $2f_ _ߜg`@џ~(mjG?x2-$\^kٿa\/ |gOJ?ksܓlq-~X/Á]m=D4ysg3^(Of^dmnLXqoqI+|e < tV3UC禧A?Re gY73Yqz緊Ž:fdQ(p8b"cxӫZ[l6Co-l)]O)q#qcLWâZ3a-RY~=< rnֿ߁C@Ds6322 c0`榐s S `As, I!{1hg=[ o{ ^4?qIHF"MZ?ˎ[` W}=@Y5#-=|>|f׼=7Z%el'a훀.zVM/I" Pla TS挚aFy\qlErV)a7Y}9"7@'!k?UxMN˾Vkf#;d*0[LkcӣK}b@4 }Yȝ2Zb*՝_y܂[I%PDz/ .rLo((qzfJܐ{Ŏ;a,`a(0UJ%B gH@PQS_diWJ:v#vq٪wllyVg#ׯAv~8:ydN|jݔPVX7o+ӥgE8D490SnFElz{oVh_7'#.p+I\1}AuCZ{7ҫxKފ~ 7o%ٶըsvWyl=Ujm'9yQlN8iVl%-4hZGB`JI`iv u F@>D)x8jZԷ}Lz<$e~|5 $wySx.+fn0ɕw^GI?>75<LH R?|Y7?ÍœK3ۻ.+[: xqlc$ Pȓ(>C"OR u0XrWdpD<H @2j nEE@3»!@5lU巛O\^np\0 G ;椒8Ljd"#BYw=O4 bp5lw@wb]cXu@u6vow=B+g\ш~@Eb@id,SP- F:i[iN߽GꀄBXvpz"Nb F(^:IdZkqQޘ$)Wag9:V#;s_$O` 1M'& *rAA\ 8-Gvn;1MЗJMH gd &"c*RĹ^";IC2DZ㨝pT/挥T4 ds&(X|8}41 0§2 96ʒk2  Hr .J4Vq| fˌg5x-$,pTTB $9qC[ᡬά{Vi '$r$d:9a޵ƕ#翢 &Y"i < b,i[gg).n[Ԓr^vKVX_Ox, Ԟ/wuO|P{g9, |%h'xN/`)8nima}?R׭q`z[vXornv}7w-O]>ܽꂹl>m^WnMk7ZOK~_zia 赿yM_=|ON2JZxzdTǬbq[u~CnV\l{-[1^mzvp_itkf%Zo.j7PgG[sxLK_o/zYb6+~m:?T ̠U ^x"SbZ1yTLb ӆ@gN%DJ SƵtPĕLjlHD.ASjg "aKa:ӣZ5(ٸ|  h/#wϛȗo fӾqKd=ߗOO>`X^ozTlPAE-aUz[SJMȼA啴_u7"Zψ3SJ1l,w٦|v"e'km.ه6_BK?n[ZK+8ut(cG(#zƣk?>Uh+H(^NS}>rks(3xrKn%VXXi\0z6|"oR5E\E 0 0nv>{qǜ5ڬ33 $+\1T_N>X0NqSjXU(3x%y[d'9K$ 1$JsAUKzb}߇VyRZ'S>?zˋ>/9?cҗ>joLO |Y1>, _EG\\sϾ5e8^[G}eqyM8"rLk]~wy-},ZU+ hTts]vH4L=wgjg([2S ׷>w\WL>4g'l|4ETEjPwμ|ª(9҈OZ~ _VF5Bpzp>H> b=XsF؜#Bjra6:E@$SV `3Z-{q1e$E d*Z<z\.J^7h<߂7F֪`5ղm rkj6%h=;zes owahZ\]1&R0PPBIC[h⽷r-;Mc `[tE5rE%UB ؄@D**ׄеa$-%$H N#ZFi6oP$l +aՀjc)TE$ 3I|Ȍ#9qKDf,NV΄@%dʜf!"dϊhL܋H?rnPbjfIV7pcLN-OŪOf}N?L_0z`؂hiX㙤VY hdBH$ u$Cetr<ѯE;I}#֎xl-(뵶LEzy.t%34D r(c)!֐/a4.[c|%JqlZ,3+&|"Z^fǻex0oxx5RcEI` quD.f\ \V %qlѱ],jՖmYxC4fo=Q=?uW>_rc +O|R?L-`J5SNDK3L~0y-;>I _-BXMKwƆPbiMkOb{9(ff3{}ܗ骛_LXKhry`Is}|p<5Ù5 mg:2S7&FK7ͩ,d}Bh\0UcRPn 뫙}sgs<3&AzYzɕ)9Z03O>46DNy y)ch8;L6 )Ħm Vu-ea; a*aQ=9Xo7WvIյwvt1Mkʚ|]VxM|ُ)3`'>4~tp<~rGCfӕ\SFaK谅rLJh&?"?dL|gHu[sQWM 8߬@V N1؜ ԆТtJτDkwT!S I,%"626Rk=  rqKN}{jZ΃wS jƊKBY'oSLK8< @pժpNlɮ  k5ަ},Ϋ )B#^kP`:`v9 Ya@|곆K09K1Ҷݎlc916IZ^PNJ歹Vfj𾘱*c&'7ūk2@YrN-I5S #.ngi0ɸVa q0fR5(`I2h$&x-S6÷`LS9%YK}o3;OV͏C-RY# o|̹3 NF nI:r֊\4ctW3hfj\RD} eT7l(R)p-.^lD Ռ[TZ$%Df1cbP8SaL!% 42o}S/]ݚS_dTsэaq_Hu#n%K8,&9HrAEd1&<;6ǫ[^N'Wɪ꟣ *!tOEע!cY}pQ[݊2r֍_>rtq|#j4&0,8e4Q8_Wo}+\%ǹYæ [cl6;TW/J#D27Yc7^뉽Kf#1!}:z{ij1|4}>+M%O޹+ y=È"Y4vp`;X[v$Y̯?Ŗ"۲eEBՒZ,X]"4U&=>[BصJ v\c 3t{3Ϧ"&SI5\T*Ft`CI樅l*x ؾ>TwIc׺Ձ}IzRV48ѡR#\RcX Dquzf܎zϱJ͏STg#8VO^{+l)8(& k=DEj{p4y_ppz?@'zf"GEιHU*e[7ڐ5 |.&za/Y K>D|0H;<99THҮmlL$2/HIӢ51bqhŻ%ԫ3xUwK[ϗgs E϶g#70>rY9#iKEDubXddUbVu׳^N p7gC#<2ߏU1{L5zr9+ESsj ^I:qŰLOJF&P@!*f\j0Y<#K~wshAOZ#Mj)G+텢ppXRz]9>zA#L*aP3w1T$0 PYqI+oKpoPdo/+?GBG ΉoXז>H>݀|j͕I>,TV4r~:BfTJ`PD`4; ,Ltk4U\_fypE5mM< KEp.xMU)$+`x3ͬ.C.~}}'Tks+ y 'Ts I,^jAC-:%{41&[8FK5;ɠMVDsXݫZKU{pj׾{`Re+WϳO7^1k0f :*,⑁9 m1y2.(8Š2v !o 94b.%xaFe)Ğrۢdž_x9v5=e2(p|]J~Z!(kuqȚed ,⣮e/nJ[GҜwبRe]4bFԶn< SkXj]1 ! `zhC^:\lxz9hmOzm}he ƅ&EӴB""qY AHqt}sI?zpZ;NIΆc*!`03pK֪>/\w"B+tzNi^޽ Y ˦Y=`,& SSl=Ze}ew&YIlRX Ҍ3-=D>jvpPœ.%:.~TH!J9Vp5hJU![).2+{.i%)Ɇid HЁ@*FjGߩ1` K_xj{ƽH l^@T=[ō-:t 4"c>̝ \Fgdڊd O0[9kL SbHJlg=VDɁ Jk8CU*r v8g\`W/p]@ aT1[ޣf!(15 [)gSUt[Ohw"E:!^ jqo=@ 2-rI9L)g.lB)+ָ {LEIN=S$ W~R,:ϱDA ,xB` [0p>}޷A-:i6@/Z^ U0  WYoc\@Ow%[vѷe\s>#eoVJAǧNR?JEHB Y=9;N>EN/MN*Eٺ 1--ku*՟e!ĄJYBZʅgT}9#a&=6eD:Jڄ@9FE$ob6ŠVdzPnSHU#ěUkd{/{$/kpӥ<~L'VEd+|A i hYT{s*C_i̿yek x_Żn9|NYûʘAEJFdl !,G^~&nqe;O' LӢS`pi?Lt8;Lb_.J)pC누L~7\"U"#y{C~zat9Cmg ݦ3gc)sq8 ;t:ɭ\k&є0.Y׍ )1)crb>3\=WqңE6khIV\VGcBD$Qb2*tQ)ђ5։.JGgKGB EF;G GFgC@lfܗN ff. Ba.i`ٚⓎŚ>:L`:]PS@2=2:2" Ƞg/L^撒s}ж(򈾝߃sYHBmMᲀeV+)`lf b% \{ Z}V1Mb݅muD9&} $c09&:(*!I&@%1@ӠnT0NpႷU$ tVV(a|&Jx !8 WO\LJ!ƈI—*5fN{Y0B0HvHm$]c~wG;|[hon}]Fn:N'qferC%wٰW~ϤhlbeK`k\ w(Ir57vjn9v U7$L[7=C>j(HSBT |f94؃pH3σo7wWogdk,bMcNU0[@lU3FV4ewLˑoz> g7qE7+y{O|Bx}ӨK΋RzbѰ𤍰<*x# qwRҙ!k6 xśRLadb s1VC8' g ۀӊ-^i@6r vo@6 de󧥖F9`կ6x띔Q[䁷>~?3ցM}%lѩUa dTNIc`CNw'Qg5&!@ &&i4 DO+ 6%@胶[Zv~q6;=?PJ،MIQ<\ -lBȬDStg}A~VM[7QoD ɅD^fD&X.E"I4R'UeJs@ ,#HYM}Vw]:LT9WfOc`VC6_k7\iItD_8CyZSX;";gB('!^0$*4fb/WwFq vzy3Gѿ4gqxtj"VM%:?Y(u#N#U0gA2/cXT!(3|hVX%6gݴ&:/ M싎y+@COZ=x,J(κQ[?18Lai~imPР48D)3#Z |ɲBZR-?DPڒ,VygQ\/LF.H;aP+?wV e8ٟsvHfaƭP%k6z鈭X_vhq>d$ݸ7Fn)ˡMM"~$!ݦvk+хX,0yI;m{t/KU9qe+*q]QG"a A!xM}A0/zr{r]U7^R>!X.grﻷ]޽&cʓ>~Kǧcƒ=՜S$1`|>~˙D92&b>tH``lZ<€:σ)t\x&P ~ žEy &:/ˇwgz3+C&6$i^3_? F?Ç;}C~P};r/x#1'4f6:yP8Fy|$I b~ncdŵA5%n28~sp ܶS^Cr{L )-d|V|4܂uOy߾:gȐɣ؇1$y[diA[z;% lp`)\&궼0}:o1iΞy:ka$UPة*`\ջ6_{54ًlϡv(;hm$RT4)5QK\u9H1D룝5SNUsq]Ty>yϻj?k?xϖlCbٰ  [uH gyL0矼l[+"~HscP'XmsH^}8…{몮,~nlEl{C  >fL>otfb*vM7*m>@?+d~SEcAw_{sBTͶOB'-(~uY@~[lx@oߏIn{#dߤ Byag`8K!Ę'{*Ha`h=zbuB`m8K({cyCPuꡓE3epb=#P*E 6-zY#M!!.mEaE*W-6W07CKv0 ViJ HUlbӐ&qkp>o~6fXQc$bՂ~1D(, dҷK$ҍIka_!/D;H}Pw>,lOd!vz\ [d M/gt6V~h*\oK@Uy@h'#BN SLb$dz*h胺+D@I^N ƪ 3:&0l$ aWp[sV4; 㜒2N*/[fτs9 ` !0Vq"cժ5r.z7N ~k,}/*j [vߋNv_klV*UNؠOZAPqQC ,p l$Mh~.vm64"@lID\14-]25!]-#։s)Vт 4(LiޑјtZ̵'doƆUVPgZEt $6j03}Q6`ݳYuª[*Z&%8uk3V!CFV0+dUɻP}Pǜ:avZ/0!Ոgf(`>fjhNReP:ڨ8b[I %?9ұYmF cȗU\YZ&0KK(.xP%IdtŚJ9 c#Y'Deku1[ >\ulo ii}%s׍+ IdU3<մ!N^ز"oM(W_̊E>6Z _ t ܠ(E uRG,ՔEG(!$P{n} zvb/,M*`sgM߷̜6[GD.V]PBo֗΅R fšgq2l*>XtU٘E/F kB.^Rw.(Fodٖ(1oI1Uho] q-h&#[{A!Y^7ڢĔp56= Y| Tg.]mbQԲUU]Q5'/c3Ctz;^g~JXȽ% Ob0dcd=`ìXC}ַpH6(XBf0s2dKƃnsJB,!aYqa%73RQf*Ip]4oJGSzFUe7 wC(۹{8pMhy6XU~p9pWj%9 eJc\3mBJ ߊV>`obcn_=k6]4 F *4}H}ϘFgь-T?F4V2f" /騸Z5cWn aYw,,ڨ=u8Vh cprXpw5Ij6bVxq8Tz v]hEaSX*Tc:&s.+.Qa?"A#p.yAmA/>:ScA^ԃϟ?f7Ya"_ZAOz=2inz&zm?̡.=k7hz,;u+vMQVKiI&y wIdK]vכ% d] ^JHp6c=FmK&˥fG7ںaIF42k+tLsQGoF>DnPK\SnsЁ ›g$s߾Yr%T@9)Nܔhdcxց4zzd֦lL5ZXP9pʺaMg -VyrʙkVMJrbuΙ|ngtHY.Ȑ }bsoH({o/7uZWL2{WKp^6bK 1Yn }\ )YTUtRń!@Y~|H#D!\JɢYXV=m=a۝ÏƓ$;LMDe ުX?|塟뽊]eq׻19TTmn;0;$".8@wi;ܡȷP俞"{5}=\}lq>+Z|cs/WEiv#_is; _ 6<_~[.liLJƹFNX%u2D Ey \Tu|0N}uFr$JjA~M0+3l2~2dքS70xw~3Ζ j9׌$ 6 `SЂϱD&$JF$Kΐ-B͟.yNn#>KZnKޯ+]?#XbAāaA]D(e$ mG:}ϙ\zC˛b@"o\Reݴr(Lpax(BCϥE f r!sycҹOѰf}{arwH?7IсH0rwNafV3vLQQi!'yvUz9EoV4Nf3+W#NkCzumt,#54dOsi&(2LeGUvK/8)M{kuӁw4V&ûtddRg|g@@hn8|*M%6%{"˺L,]BL^%Ut繉sW xŝ/3ƁEb^j,1a<(I;^HzƵO Jhi[j!bS$AHz%b5^GIKF(FSJzQw@yOvV}@sB`$uJIKdNVcubg a@U/@zQ KvJ#6J4'qEy7rfk;벏 lu`+&c( KU}Cd@b@ȰɛqN>+j 8=w^޾p۟^}~6Lɧc*yzUM̕肂Q`5Pd_]oHOep0gel/:yɦFk(X+-Ns0Bl˦(t#G q-1chΔI)X9>{\fp 97frqr ҊJԂ7BO?_?OBӛlշl]яG+\d]? w E~'DbK B ,dѱs8Yh&GVs[o!:w~mtϟ/Hsvvtf~>z } (Ws1/OW3R(ϗ(Ћur٪VN9R. -Ji YFƩ?fs.w  |uྌ* 8} nsYߔqP3%F VtA<0 kIVer κhvqVn[a=ތ㾘e/8,{'wo?;2e4`!F**Zϖ)u#z=w݈nT[RHƄ\Ff4(@6T^((Ak6¤~v">ff}mje|v1PjxDsM)k&N'+zEICnpk9-vl_(غȆ?J+?A_d 煈vuZ\78:K( g;.t~jó >p0?JUzlk?--Mu-oD!()-ҪQI@⥸$Ujsk&1-hc)pgaQOL.!{xK)ށn2~gɊlN٤SHRv%~r<LRT}R䪣5E 4^~0n6g" :OMX, p+ULupŬJ$Z,kBbKvf,_[\} \ͥNkXdfo#EK"6M'ײe|(kTOxjΆ)bI1r٫R=x~Dmc;x5(`?8<{ZwxƯ<޳%+wۂ  |n+2Ur<}Kf/|5Uq~;J5boϟ$p w?G4q=~I징4)õv1Jf=C~YX2,Ĺ|c޲4׫Z;xǩ? a4D|v]7tK%_F4>j akoߌ!n<]\b.>;gdzo~>, iٽ8N9$ݝ8;?>_'/g@/OXidY]s&E&=V.!kfjSW5׫[6lK0Q=9?:]଩Ww_ODz"ӵ:ҺjIB!q+š3 +aj:m:aqiqFi;n!PE5mr9LUtH,M9*Jg`,:%ST Wd AmH k+.ދW!lY;9%_{핺AhqDX&)?S 9 -6WJ6%lS^L2y g}(s,9[J#GVv R5^kN[Mh'õ%Zw0޿_&8N8M\?jVr`fqG|=[Az9OG<-4N;??9=gvc%p#e n_S3Dj׳tTg|,9z~RA)4;#~{czGy=]q^Xl|3~cq`_*e=A;I8wr:;e,H~Vuʇ~PwT{Pe?=p{{¢vVZn,L;:qqNH_Nہ\N+ݫv+[51t7p8[ =@{ 2*9wGABApGwiMcwƔ,ƧyH][1Z o+F)rr*F*FsG`fw`9ʆVOt7f@ =ӛUfoytO`]XتnxN`ٗBTת6i&.9b+bIHn{K:]uy.\o;bm_ lsF)rR8묹V_[mUû$z!@Vz@76cǵ[Mh49<օos{#LzDs#^g(\nŬOzZ6Xqq?v[ ˜4u`>aM?>r,,~M V4mӕHq65.lB inP>>u|u[Q_/UsѦf|kAl ]jRrR4 B%ϥTls96\>WZjuVfE{ECo:(1ZD'vJ!!dUcs/nTҊH9e?V uccj2y. TغZOJZ^?X>radc5j\*&]dT:_n +!D[K ),o#i!x蓪)2UsuVf,δ@:yԄ)n8N)m0@-HŜ| pЍUVr)aK)J(֚|?˜G8ڟKĘ|,c+bY."ds30h eݳJ|Sl蒍7[oD+u'pmw5%F*-9YGŏ 1iANORKj-xU%hýF \zzm%Q-;][H}b lpu ?\J'p7F[9xՔi1i*u '9: G1.p1F7O(—L)l&Y4D5ۗ >%9 ,ٹb%|^AG@b]ۅu|줞"X2RT2di$tPJyx`Du  ”RJ3!lUNrސ=)8=h($AO6{hO)j3-d,8Rq3` bTL)x[` 7j‚-.Z % &nÖjh8eHI(+bFahJj !pۥ͌J"W] 5L[` jRo@36B‡,>) lFpKcȬ'tf:ѥϩs [abYDo,MjUݾCw&I)3eR lԜ" J*ޠrvU 4W OWaD> ̴6e5fRXc3A\Ctޑ(,2&gherFaMDny0{w!tY}RK can^h OZcF$5 QTEFnsRuUpHQ@(Ք[D1#@F J`cZ=`[E-qQӍtf' DTg6"%4`Tuv$+ ZٌsKO]t0P?i &9%D *V0:&%)c(raGʝ~:8x) }jжYi5 7n:G9SSnO 9-Lx\! f.\#iTImHa5!)-]RcG^i~?xdԆ ױ\gH:8`IFVV"f .Ň+c7$; WH\$X- QnF`r"r_:"iB޼X+\lĈk ıYX)G .Rp]D#@meBA5[7/V9XxCB-=ƙKs`0"ixDU|bnCv4wKCߨ jF60m {F 咯 `4J<0v ?XTM#iG0AjmBj"h/҇yyќi^D=*N$25,&ѹ(W ;@cx abޡkİՀyj`9GJڮJKYervDaHeJISb5<'jXr~9zxȇ׋6U` Ԣ8Abݡ>|͇ݴ\T wċ[YYʱCJ/(8,#"f]QI;]K3E,KpG \B>;=(ndUM(*1ƕeZ@Ԕvy 'zyy~qV& w͠<3(=A6ӮnC0hX'e׮P(y@K*d Q U p>´.; |첆Go<|&8d"Ӊn quM[~0;U=v&OV0l{ 9}ㆄ ^54asGHTX HX$yӇgQ492>"_(Xl?7v#9/ء'Ѯ_,,[zi;iNggWFȴ-V̌ر9ҭu_oFXJ%&Q1#BMִMpKĒ1uw3i /kE<-xb]5r."}NvϾI/( !EB]T`5 p699@x1͌+a.(ҏ^i{akLj]ص; %^ckI &^wDD=•0>/x '^8‰Np/x '^8‰Np/x '^8‰Np/x '^8‰Np/x '^8‰Np/x '^8‰NpP9•uJ͇>^8V+'^_~ji>'񰈇E<,axX"񰈇E<,axX"񰈇E<,axX"񰈇E<,axX"񰈇E<,axX"񰈇E<,axX |ﮟJ9NqQUPՀ1wJ$c"PzS,$V\\1wocZCZk_ߍ8w; {춛f0oMnmO4n_y 7y71^ِıZg^=Iܘix_rxuZԭuҟTSJtL[XRYɖMɉ*QA]oEg|K2ijj{v]Q$E(8$ORhSBhOcJ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ ϐ Xpp3JWdx`.SGƾ<;阼JR7OoXYo`}[ZEDށԭ?~cHreoѠWl☝z <-jjڌ'.Z]e>Si<aYjp !"fr^9Tڥw0)#!h Sh鳫^8:{sC.!~ZBGÛsrPsXr`f|&e6ȴђEM6HC|Xc\k~pGVO|wY=Ov'klooPfYo۾;ƚLqg|c5oFQ74^E7.; gb-vENzӵU2]C5RΥJf_܏Vhez7Kv^`E[ͅa,z\ ׽ۧ-n.}w+g,la;(2MLLAGqC,_՝gv{=-I&lx|Y <|ŲcdX,cubc`<ӕ#;6^?EJOh]5U~ kF];^꒒:5(YJUi%隯0ɽZK[ C5UGH TÔ yFdue+@q|z|g_mc]odqQN"v>KiUbot͙2IihÖ],]Vx f®ە.vrg듯\noiwsf#B?</ZrC6!&ժZFU~"jK9;˞+*fG!#2G-qy0xW=+ӽ)^Jl<I,!e9(5Zɺ3ts7؁KAUW2$G*],*JBscpzbGK=< -Z9:K*uo m:J\.'6]ag&lTVɒ&2sw3]f\(4MZ蠑C/-qϘٗzqq\jw㹾7RL\M*4HdWu֡skj#P9YoZ'!S@񚪍aV,6Ry \ߊb--@ ->@ZW㹞a{}|`b_n:z©G8ip.$eGn!U-Q[LcPЫ jtH67|^~˩w v9zԸMASS!=!Y{|c= KhYYB6U6aDo!>f,~. lj*LYV1 Vlgƶ6BKbzvwy= XBc<#_SׁLeBgFF >ebѿ?1]"n,,iG/*)% 59cjбO4 YeOIaL&l5dpo7_/>f5yf7!8R|X &'JIi`N}U Թ3'F@7EXA-D~nl4Z2>Dug&<*͹}V%6Q~^DIEmv=^~@F 0򞀲wGz 1OY_L2[a6]V5&ZLKX0a1u!6c/2r#h^&o{nͩksQ,v%)a¯Ib˄rww X޽9N522x 2Zdm ?u: Ox`n KI^E V50F^C݃2V`S:i/uKCԔzRZb+ KjMFvdqoL'g'){)^"8PWz ews9(n( RU棔t*)y Ѱ S^`**;޶r$ cnwH,f7nf JȎd;(ɲHY -[2yN[EZ0XN%pkhkEj'SiĔ=1e SW|䰐id!vdq:ZЃ3MYAʗ/"yxG &DEKUXH|#üV?̫upNGmf;~h1F6&A65Qc˖7- 4ajS<[h&th>Jɔ5b?iY LE 6բtH'p(  i :[z |M%%ϡ(uA0T7ŨEg sjƬVo$W "\V) bϧ0ݿ<4wi񗟞~77ӥлO1 E/O/M=wla .~O}{]܇ ~eyf*Ci4}s_r[ |ްHb8uZ.!JUh-?/O qX#*960DOp6Wzl_{>մ_XI0PFoњc,ؖh%PЮ#UZ0iM.xd}/L\:m?/Š㎴jM67N 3&_I()ND@DeVs(}6G֚in/q'ZXl(iRǓ 21/,,Hhj{wYߦ=ەFl>6\+T~B0 CN~ĦFBV3qqn:m*u顋V :"wm(Y'i"IPքg9R6H!fY[%GaI%DŽb \4W 91 gGvZD92ӗM3`D_kur[ [ ;𛟏\O&L\N/z  {}s[zHfӷ)=5Ю[yb~.wv_y?͵_CbhQArV5NgsQǺ$)׳>^O.7vG!65 N0 7_spYph!. =FV[5sWeb0ˇQ1,ND=FDV/;,wT;mށٟ]$ H:&17~r@cCbTR叙3J%7ڪR&ӯBH!(D5e!V UQWV%;V,c~;RόtIhh$9h Tm6olZ@Y2 4$B-%Slq%gVv~ cΜ6ǝ? y{CY]%a:b'(Id$)TS`O|V;^KŽ${5IoDL-~CM!}h`}u\q# N! 3PX@:Y_\}ʭ:|d [L^ l8;H,Ϗ^4dEǀy\ϿU}wvU3;֪)IAjU,KKY;zYn N!7?XtsrU=*k@2qТx?[M?RD9isMY*K^IUTs1:]pB"z,[lXl2)> P%9ƵĒ@N{;?M툲kpYHb:sAC\I[DXRx<)Q;l&lTU?rV[,%?) h@'rno22Kj3O@䔫?J lF vږyCLK[1|VǛ:v;tn/ϵ/^ To^Wh@=벖?8bxV_L3 swZ d!Fcbj. $JYr@==z8z D.Yde]危yo=G7[޳lۿ8՚ 6,8_WrRoϐk~bvr[RrL"D8#A@k~!|x?DNQu$MRr߬\Fd4([k)[k+[^-' yNbĀ^ג @Kwo #K"qSvaZ΃ly8/p*ČRp >aiYW{F@,XmW\ݠt#ecZ~$;4nv$rHý^C PۍŨrAR2j(HjQ ޔΧ:pK#4bejDS^"K~V%ؠ{$!A-aͣij=EJ\j|\[# AҌNx:͡:]B F2i-P˜_[z.M 6 75pF',5XdY|]|-8K .' :4bɾM65=8y׮jD l:豐huId:aզkߒɒJ)qE8>E5q-PrP.n~0ijVEG(·U΀lOEǦS.o Xy0C+h~`yw87BёNfs8]ݪ3!ԙwfhʵ*-#˜:Y2Y+]:[|qHE| f=}i+-@ʆCBk$6L佳BzMJVg rIGdcˍ:ӊm3^|K H$? ֿ}mtN\qV U- ghh;_E{kf珶W&rr;;"Dpw,aeAZCHdR S-aji`j`^bhb$D& dbk`-@211$֣fI(v`jð9R0MŰu})bLpr)7 A@H]\V {*:XJh+Y6sk &"h+\Or_Bq9jĨґ&LUTV@8œ{8 i9d”Mq>@% Q>dj#dINa1-6p^d o1 Ekgk=e?-ZNYɵG4p&+9l:I)  }aW~fiܲlT[\6>/V9^ֳ[MT\w|+Yv:ޯf -ޞ5pF:(yk~,6H8u jv_|k01 ai~c#d5Vv!_;ざn_KOw5~{\~t~0 D"^jQ"a.A(7ښ&qzhO,dREg8F(Dp$๏t<*Dd=|i皌v[N`pȵF ڻ6$KRIt{1=DksH+[;$N|Dm<Tu[JJEJOKE{/0yi]<[Myxl}ݻمܚ6l| {:i2!3@0/a.QrEY$ވH_\G}֥EV*Su1ZyHT %$jJ>S}`deD=u|zQI<@BH>3vat:뀅fba"!2$$ )җ0ykeoi/ݸv"Pd I5&[:YRl zSqr%.@M:vo/plU[ԓ-dph,i#֖-bvH9o,4i%LQ1{'*z{㛰1%9g:Rp\ťS)=)Yz7Z?Nα{qdg#L2 s/K]RD(M<9ύzZ&ʐI#Et!I&r1Y@xR%(@S N:ǘu))iᘽ9j{a%a[h3՟MtaеUF"2f{!7QH w8I*@ /JPͩpB>zolxڽxJJ?zoKˢZ{e}.>7:y:ZiA=jR~QsYstrF7<Ы8 匈(>mFVd_k[]VrҬ6֊ĭ5?&!;i~a\/Z-.v{x,>J},^-:>K9FW#FWvhDq!QXYZDJBwb⥫?dLl[oԪRwU8 fm%k}ik/MS5tUZvBI{\G_$WiS$ o햬aszw |%+78Kr%#暢PJΦE%td"KƂ#1QE7_S~8h)ktSDkFk*rHIcp!>PQH7ªbqmZ]!b/Ā`c0FIEnHs@#M"1?uCF?"CIhm`4{r ι"D %^gR~EHr Rp|Ep|yb׈j<e (y<f,3*_W{BA3Rq gW8X>_,LQ4y>n{OH=?Կӏ?e~|[@ l"lTXO? 3;A3m8;jVr&]r}wHc"g曷}y׏1Ύi<-cΗ>ˍ7ǫ1~z7_o>]qSM0=*JJ$舆xTݬy t#ϻǢ]͒'/+f[Ҳǹ~S4Gj# Y#S 8z' d'حը%{'#P2qHCEBUALBIݿ|l_^ꛅMԷԃ՜g sB2ﶋ Eg2L#͏`O ܧi@o<-F9s.y 0{\ q1d7ܮֿAn E(= Q9bg EpǤySyI#˧;ld,M ̩kPe&kz2K&E-W繺^qQ |)j.A&/vŮ5&xͱ"SfQb r*D-x4 .gQς*O+L_Áue)啺Gdt`H+PԿI¹J(OXEVjfPJ:]7zY\N=%2uj?GwC^G5O[kD'cv(v أx9B3^/Ɠ5$uO9p~5%,,[d\N' ]WHAk] $iA:wdVL}7$fgzD9:Ad旓՞B4*E czA?|BS}f=oUgll,QQ-[e4;VB+U_OgOw nR"ڙUOuh5>;Nצ@׳|ih?-;]Ϻ&:t-q:>O/ɋѿ |mjvy3NN9)3 yQ * usֺ!r"i|q=ҧ9hTF[wo޷[|x>l3;Y$Pz顰Hq1&TEcA;usc"/GZSĒiޜgk3 [,pqd `mg6flL T6%kƚa|k?ud-4gR-ˢSv~~ޢsVt̍>*I!"@%' 52{TuI_ש.n˴$6$Vw1黏MtɣEzr70& ,yu!.X3з_AܜiZgYA!%(}Z1^8Mڬў)s'_t1rhaR M϶H1 ;O966.i:?ޏgQgY'ZU} #<x3JNk{1>[sWվxh ޅtF<ϴFVZ4\Znm5%vdٽ?mt--N)̱?b+q,`4Lg]SZ>3ָdM)D#giTaѰQm;Z;Z֌7 ֣$n$(3kC̼ u)y<]B2 H *v[Тܱa|WW֯`uB"ƛTڸA![qا * T&22(ESd[R+!kp[8CUj t; BI|26q=ㄶ.d $ =W1)DPgQ|]-j!ON8jj*2ux,4"ES arHI$C?^HJDбSjd;&Va/ꎉ 98"Dj1,EiA w?9a' 0!=} IŚBnk K6+^ck-OJ668Lcڊ~ZZ{!L3ҁhT3Mtz`e+[h}2X%KO׆ G *#j)v 1q>&JZlkmzfGqH6Cu( s h|xw`=a yp59b%qdnڧO95h=(%B[$1|uúaB4&VESx*ZZ06pS1d6ydXcWھrdyۘ4quv2ݾ'$Fi)S 3,IIhxmz~]|MwBf[7c" o\=7w7 gIt܁ Ń/PSrQXib(h+\zj}p%ڂ ad^yrPKM\|Hjrs x!ٺYwҒb"09 bPyi6 QO(`#bSLisfHkNڷF4 )"GscNŹv"ymH)aAZ5xd͹ Ed_lZOYlK2ڄCP&\E`hTêЁOnɀA/PA6k7G.ӭgD+|dlqH*WjqOc>0vLǣd E qlbٝ~9T:A@Ikir1l>νz]TjȺX)XKJGeBo7]Wg:q.Bº (o?v 'zɽ(/83hύ&jV2ϾYIuՐ)ALc4n4}~@jo$HXhE'Y@ۗ oǫ`!u}}".\Qf:*_E<_w͛.;r=N/>?R.GGy^0,-DҸJtW->\ _]NB__\%ϗw /-d1]t:᳗sbZ׎ Oݙפa#)dx]ǜJK=~?V5ޜ|[Z;;2ݎig;Vp:O8 >}=,enhi`xk+O}[}3!iH`y[b3ƻkr*5C7' n $^>ծ]T)iUbpMB!ec`fB2bHJ=ngs2>| F)J~LWqit<'Ǣ;zw/_eEwhE6› қNvO`erJ3%]x=t:Z' WSx_n$c@ËԱ{UGYq:zu9_@ ~ꌒ'4*ڿ^yh1oF4ˇ*J=2w}htNb_Q->PQQ:]ǭ_ԳNn~ةҧLҀ@&7}&9aC2ɇnn r/jrԊ3-#˼#5Lau E;6s'Y5Cv^xh_s4[ɯǘXo?{֍ OnK*8;̌V& [,j('C6%J=ؼ4`~y-%]ļd6ʡb{INR(<꒒s|d^F+e1☵nv|ϱ5Hxo~l|yn8n=x3LvhRhdatS-R@VCD+(XUt:G,f4T3t,u j呠K ".Keo駝ۓQJ2>vY@Q6V}t~?=ʞχ/:qzMQ'VfPAR6&D.\ cDi"iJQd$7x@e6ڹ`:+*6#dg"%BbH)BVGn xCMN>,M*:c[-g/^qçpO)-rVjW@&OuMg S.u d}%{bH7-D0))PWⵥnlp =@S앓sC Edftƞ \6N.ZjEw_L" ~4>n$nDi=ۅR@ЌTL)j1VuPX Ԭ3j4D_) .S*?ԚL `+',:`c&lh@֢-\:롫efA'l\Yf_՚/,h5a7>?鈪ḯQC:e:yez?;ZIH3SЗ?;7Wc9CO#Untс^f,GۀU?NߜxO]ٟ;yf#7v*3/8P*'26hDǹFxvIb=ľq@<~YIyxHIwPycŤ*0gvWׯu_gL4}͕x@Ŭ*~.ZwftrUw+N?t9~Nq_:l4W7u|Iw#s#"Xd7vٞX/`Ѳpncav@c[&S}6b{J]"u4y;M׎58=5y{{0{aK4p]ֵjOd(|kdW$+DJgDL}~fOm]NwՆ `ewC{{/^}_Z(Q۾8=ER7Y#S/PiRl 1YAd(&dDх8eaz *V cdr٭)tmnxy9_On_xB|*K(6Ak.adSPň^KMP=5RA9Q%[^L| ðnxSў_VYjr lˡu'G:Ѓ7,tFR$nbyB,.uFǎ hAc -*d5d^:}4ܜχ>h4I6u 0GZtRƅZ!)%P@P`5E$a)Fz,cY9j/>u=nq%%;wmƧRC,O)!ƧpNSj|JOƧv|SjX4>Ƨ5>ƧZ4)5>ƧRSj|JO)5>ƧRSj|JO۳;iGh£Qx4 F(<Gh$5 F(<Gh£Qx4 F(l5ee.r}rVY~?mIt!GAT RRgt")L^c$ջniRբ:AhA Q, z F5_ADDu ņhd"j2^x-!ZhME{~[=㳞Aǁ"-T/h, j4"Xpc Eۻ$#SH?EpҔ|-dyԁEB.Y +-_hutO&gϊJo[ೈI՚I:IEbKQ.eJr h(eoeaOnCd!\bqM8>Xo (0(L @ 5)CHP\`ČdAm{nrUмE5H)}8Jq¢߿_1}ogWWbb<&KSw*o&Zo(/;AcOX9ɻoK~3o&z௜E)*r}uEٷ߳8χo甥yy01QuRxEauJJUR:u0A1C7L-^DhL#f3H!$HF+U2Lc0KLrʀO%;ǹ4j׾Nv} R;@}ׇӭZ44L@ $ =-N>`КLwN9 E15 I).΄TކFU[m,8GnBn¬aW2j6si$>drW`*U@ H 5!kcڡJ`|8Cb_C(1F 6H%' %+>krm!  yU];bkgR02"x[ D(ą2ZLV3F65֎yj Ֆlɀ!s8V#6B5ۆ0U-i=Iq6MDW ^6@BfK!1N "s1&:Cv@xb,: /H߷$2C?`U>Oc-KʿN ھ|%4}"|8PfJBW쫴f2U;}!,gQڎ 4ճc5;6RZp)Bh" h#^ mT։aA!WYUTt"܀k[UP,Â+]`NͪΐJS5 #!#9ESCƣ_$HbدTOJF&P@jn%3\jRinΐssďȡ'DA|r kyq^(rŢZպ"SqYA#IM*aP3w1TƐ$Be%yޖpoP2~ .xǩ{YpN,eitGYkԗ±,ZXm'WV: !Um3-=88gA9q0z-Wt 3ЄB" 1Q)sj+X颊$B" M_pyE{3F"]&hW/~s8s fLk *7R,nb,IY#@y5uty<rT8oǷ8'sԍI-^ped+HjM{E#c 4$>s@k0|5=H> 1Yz˺B/CṄ;XcVs 0F*6Sc9Q"$Lmcci^jPt0vhKBhuN4 #!:}"s&7[T*& L2Y"0PH\ւ)gRqP\*g`~aS!s@JLA5r+}5p0Β9\[!xDZO $Uï9~';1&9GD0Vxw?^FBb9$\ciċrh!@Ȃ2V ɖ@k}f 6~Pt]Uv{-2A~RQ8ş¼Kw-1O/w9>]۩OOi,HLҵ϶IkgvK7'zD38r=9h5a1gab<9ns9}zM/vGp/~I{uN P x9ŏ󧋛?X\/Z wdih^?TbN)|TYiO$\ ‹VQ)LuB/@S}lV_VCǗ1X)"v Ӊ6X0 D0YFI@DƗ8C r7 (B >3pDUVۘh{9Fg0.Rб_~MH"$TI0Z22OVa!&AΨ.8Ԅak© gr.H:hKnN2{Chy71+D=#~I $y+`edeA6i;WsJL-kZ MP(ɜDˢ"'srJΩqr[<ʭv KL2( CN(2`sEh(*"E 2 Zqޙ6R~ؾFl@7GB7ȭC"&zɫxtչ^ %3O):bwhdĢRIfJ!mیFꤪ[,)-4hɝ<񹏄 O&r'փZQ pLDqLG AX Zt01V(r< P>^^c3%g"+eSqcDυ"bFoK3egziF[^T3sRmr>^_6Js+PbrA1PeAĚizy7חzm&h#nogNzR:M˶)# w7JݒHf(D)rUEEgϻt& I;Wv̳;8;쥏mףC +kV].`]dڪ[>*uį,ϓ-<87ˉ_3W;Ǎ>^6˜{nE1֊6E gh<ұS3{g#9./ZLSР7CP:- },m-Iq,Xڽ}7r\Z+rwp<n=1%U ̴D%=0bkkh>7)MsՌ{]Xa7 (|Sc~<r|.KV5S[{wڶ)53E}\~Mtk]p"~ĂC~Ԇ0.-N` K"+!~`{uh<@1~2ߟ^ݴd Gәd`'BuJMxS&ϣ^%J}F<>sE<^&D섴#a6^yVx$5dmVih/a*Xq^f]~`:Z=iէk2[?ϔvzl׫_s-n/uɝE5AFx&q_ߵ=rV_png{XM7y]|KE!-Ɂڇ}jvz1.^sve隅w.̹h{\xj@2z,FA$#-*DB.jvF RƒiHm|,3`!qɏ[g6XZ:xhJ omm 8f8Ք++k7űo1XJ~_1$HJi:f~Q$P be%($< :b'ޗuyDne Y52gYuB2Θ11,hN d; >a'VŬqIHkwDsrd]\kWw55L/}aۛy@JpBhL̘L-J#i @!Qᔁ51GXH$M¯;=ԛ*8IL;brʱjr,qRX#sdFFXkdd z=&:%`nͼ̈xY7fY2F?nFoYB1A}C.I2S$,B̤RﶞbWPrnj d"FbH߷0CJ>M_9Eo"LWiITqB=Cp))Sw wiD&ji@&K/uSr5H wXlF<٦l'X#Gs\T!ΐ`A*ڧ'`dO==#z{!?)"N{ē]u!:Hw\Gއ3Ȩ#(Z6}!E/fXѫ(eB)-,#(t~8FYK=^ qXTmu×tDƕ#*:/*VLpQY7hih~Q[3 m@Vju?8h>bd.1TaYJKSh紗8ykJ1̿IfK?w^:d^=&CԳ€ݑ' }q=6z\^A+d^m7_zҬ+Q{#kj.%Xsdy}N'9"5¼ 5 WJaP9M宅TؠLX(K e$kMG(Y,f}GcGrS,pzT邏7Ѯf,R+g/GRLʝ ת\%|ld}\J7N}׺C;Fɐbʢw̝{wٍZ}0xQxژ{UrFL݃^-1ҽkݟHo s hy_?XOo+:9^TF\_;\)nt7ơ5^S*G- Xoֶ&NPojIU/2s@/a?v:kAsփn>*=>~wJS L2lWܿ>*c?3ɲDIl>>"'rr$Oz)4Z<G\pǗq9hDT|p*rNs1MKde+R uvkƂ̐)ŁoP(6,k"šNjـ7 =6վbV 0G;zceo>4`&bǂ6s-͙ Y5i= =3ŦX-9NnN+C"RBVW>.п X.AǑ[]ս3OjmjRY]pqiGԪ&`޽;9^{@JF pV")܀)M[Q8cN)1Z68(xo8]F^s=a-y28Z)iXv<\Zu n9KK%RhUr n&j)0d@o?Ujw ȉ\,tYOh7:uSXJ7vb}_̗lTbئDV&ioGeKk,M/ {n>e/Zq~7Ewz&e?RW8R D*gIՊJ69<|)oe#A~՘Ss/˔VnmLq87'CsZW̰LzZj %Z[pvxL"`y\LY(./.ZRq›ʊ?'N# W V_+Ў뿱rRЊѮBZ3aԣ=Ìi Ce6KsD1%b7<`cp3KKHd)A~J԰|"kj~-1WVZD($tRyHh:RW#NXLq>K x鏏q1kl5,]`?p>| 5h֪ %IɃڇPF־9l|` ʠ`!/ s.)iNOA=RҭD.yq5,PJmOh>)%)'<(Ќl ԝUC9|bm^09CrW}_f0}S& JnUc:BLs) B(&j~U*wA?df O,-ߢrz7R`pN΍BAVޔ7**+;PVڙtE/Onotrv %/_&2$X *>5^`Ȝ@;vլ l/\m{\n4qB[%wma֛&{蹑XB{Zo.nZdj;{If 4cgbͱ-,Ut{[Z߻mZj̘SC1Ϗ'ۇ>>$«.B1E9[/A=Lb9蒻@%o\~GP eŪl??Tn(|T^V>ۄغo:R9]Sw@bLqGZZH~o[4\_3fyYsR,^o+febu[L 1kwyxRPFEIT_g,Z 9_Gd()$a6Ry+{q6b5+/0UUY'y$T %z d<+κ\^y+3?j߆-+PyJ /-g\{+ᝓD Vt#C"6ӴG'':uw֛ԟT9w1c[VPҲgO-KWSet<۾eŽʲ ֟ܖ0%|u;3 Yඵo{R6Aςvg$g#rt [#B*2l?E3;",Qf_r'CrnRli/uzfb@mIN7!" V^dݐVq"&Aly]Ϗ*O%rإ[5Ks$bY8=C1o>WџKt7\KAiS[VZ 6-;s ӈvfG%!a)-kaZ*)$97"@. iA e%.cFa՘~ƺ&KNCCsynO?XTz&Wx1sΚ_)[oE@J M5ÀRd cӎYbJ #V誥-I-q0c$b Ad d;ksISr5H 7, 岅q"P-:gW%˃e^叔`$Oμ*cV*0бa߁y'Ekך\izJTr^!ki'WB}7G,S$νBWWݑ]՞x82{F\E7Se=bL׭hk*'Xf'm]ڙ#F]26cJrЮ[J@;+nnJ+.]^Qj[8+,,L6 j~E`/R97_}ٳ|kTګónnltJ)sr')*EEgepJy8[VRXjכ3rzzNъJ q̘UZ+~1sl"Z|S5f􌾛CI;8Qض\S+=֬ TF-k'hp"*Z*uxs7.T!.'akbZe/m8GG0)A&,3#.>M˕jŠ3=r*fh.N saBvDVQk:=0)fXU{]R 131- \P7,FQbq,zHMV֔dS98ZkCm4_߹X!=wuj [!֋^)~`ˮ۷w /R T;9X9UNMx%rqh)Z`C4 զRP+{̿y˄[נ4bQ Gxܸ`gDyBuHk;pmy9mmLVD?֋$5^ŭeU<`Ff^$ ƀbuyH_Юx%so%fXCH[U.oMk "o׵4yѵ}Ux 4r+fLj aP1H&EE0z j~#czc|l͓UX{tK5BIi\]~W^В:zH9޳K&*}4j-cv2xG#x9VG jԌ2@ZF3 $8 nL+M>|R#L" N|`V?Nf8F@߳#^W"$peemF 5sda̰\,쭮HJ\Zl9"6rBWD3|FV,s% vho-\{.lC9.z3*v3Q >1U™~93b{RmȂS+*/oo~:b%U`k2Ll,aY[͌^*~p7ٙhztM>ÿO<=£76}DdZ6k 4x R4Yc#K ??pvc+ld*R: J)^|}ܚ#z+Dp#MQı()G14*GGQ d[Zq$1FoT1 E9'X DzS[HOl\{<\ܩG ! dŸ=~oL JYM1]%QASޏ>K4 z)l %zG.v1JJB3*_*`G1c?vb$5'1o7t)8! ,9a*g|%|^>Q12td8a;J2W([%T %z,+H:.֬|L5Ĝ*ٌ0زL-g޸k_+iFk㍻gxqb,AFc r9nvgۻnۯ`{%6eSi b=Yzݝ9L78d4)d)#s؜DZ`\ }òbo>. Fwe\-5.UR> `|bէL,SIO R>Ợ/Q7im}W>Y?比' K," р%b DQ${0oFލ/ 2ɟy%qɩK?o-??5ϲq[ҟA X €d[R`^?<[Mp[4tQi7̎4{EQy~>_"쌥|#eu!nsjb6oeg})>wvL6P77Eɫ/;֞kez8K5"y(&\9/b,CS!e"E0S]8'()He9҉SNS`He&xiteg1llO `XJGk5J,5|:XԂkdQɥdo쳤Ng&Moȴiخk쀻lņ)AnԄpbٯ1fm3P/-.{]gUwhqv4Et5yWi<$3A9](cE]~XŒS[fjg% 4Z.w!&#?NKF0 af0Zthō㷍 s!̚O6]y Ƌr~Η5(Xar` Ę1~2ৈG7BA)cM6{.hV_qA_)bʔ8p.s ds] DcYuDd,7S)ҲߺY& LjLWV'CF_~#4R$Qx*%e`- w@GDQ1wrh~3OWϏ8FTBfQrH8挖+m<輪LiޛgYaY4Ld[lڡ v6=XI Y͠,c=4Yϳ}G֩x g{7JC {L"X>WBJCmG*(`e(2R9憦$A2"ËJ /lmNjzbC8@G!Qh"T[|^,ags(L7(*:n .O,4cєLF]ۙjwxd/1DZɩ/k X.Y>H+pN`{QPշ:0Ϳ"J7AP^NPQR`H9ȣH$\ 15{,IX))4MrM_- zaSּ((ۡ ¸lwa odWRJrv&EHf:MS0rx(Z`ܴG&Z/GϓxOi98iP\_MIcE=N; C_H(4MY/yYEEA4r"(o4|#G@ehpFZ$J:ϟ' RơEZi8;etƯ^̩M `g48+ڋ E8}ry3 R 4J ئQ.)$$ҖF<52$+$/O=R$^$SZgɓVdNCZ`]j42 P^GJ.'E=Cghwa=ĵt՝s􋑔X jJXoQ 6[)frD_/5B{42A -' <<:ȰzQqSr׈RJE qL\,-0Z`T)^)gR*/e3J0K֡ ^y8.I(\޹D(kX󁻻^,7'h DE5iݵ>5>^ ]Uq).B18W  ',)}h㘲G}@'BD _4%.&;~o%R8|Az"qq^]PARLwp %4B` DJ9z[`Ы¶e\Lj*ADZfяZi!|RXi)2H 8myʓ<, -:cܔVD/63]Px0P8l܏VS|>ْRU*/=Sa;{K'åy PWV_+1ΞIY,C2qO3vɴ_O. ňkIgHoS>Èp(~qNiNq\upz>}6`7 a39Q`u bD^&ćNT,ŸLg]G[!|]*[/u߇,kAe ϙ(#ZU#k0)91_nyp<#ĜL*".l*h6ʮω>D6sʨ,@[{ːyO!&x8́7|63|yc>Ee:-Ce=l1M-Պ[A ~Ip (N-]#ʉiUKəXӺR+~~P%]R}(>Zk.Pdm_uw>%)WAuT3gKM2`)~n6Z7UNv*y)]B0qm`;A;4aLO|.JPoՌ'FT + csȣ:A S8@ ^oRHío a}, |XE :IhwZws؏8Vbr4\]K SqJ*@=v^x.KTĜ,KB䉲F{{Et/hu/"+N9!uE3kM2'M Ȯ)4F<,U_\JkZi Xm/5=]ѳURUr}cxT2Ĕ )9Mzs{^ %%Ll9<5?0%Z皺5|mYt$x<'u_@lXw.@篷QI#Mg)l/{#όsٴQ:7(\`XhnMh%Ԝ%rgS+vڦMo9Onkv 4a\z[Xa\)[""]s[n7Dq *hbEw9CxR  i6"lPQrΉ9#N5pm/'Datʗ$Bž"\p "2g\ZTBةb.EwQg.l^ Ӳ·Zw4L>Lgޝ4 fz8)xaOB^J@#)b=]EHֶVOu~fw ]K{DM?֍3,cU˴>=GdDP`{:cj['wA|.7:]ࣞ⦶ְq.|'O`au=BѣB avx Jв>TIIntTmO mWl5w(olBAvE ss=kty_`8.%ܷZ #V}XUFB"lm8ey]xC6@W p# VHo%5_Lô .^$TK.^~hphrt^!*Y8w>C>Rk(R%\J՜/Z9jK}$A߻* \~ ܫHDEz/ l`ی&l?0a i%W:tZtgE lq+ vlkllXw[xҜkfI5eٚg7}ñoS-T{9fkh 6K)}h6LU@ FjK)gR*/르Zj  Џ(L72ϓxGxz^7^aݏYOK-zpm<\竩9Ċ`o'.Bo!@@y 3[D@'#JӰU gA n`ڄޏ% %3-/m*UKI͢_^|- \]U%[g߈ȉּIT|; IbY+^  nW3ZmP+un4&R"4T @i&R`,c0 L9ym`L0CҖKvys1RC䭇@GC#´#ZlNM2$+$/aO:"h v2F(b)bB\E1J .ID^%\%{vQNDUdWQu9R+Q 6i.ѱVw->^jM-n]JEx{j޿< Vo^tql~MT\`ǜ^#d&*K׿SZ0tK$'U O/H۬؎W?۸!zvPBQm Jj]><: v?Л1կ/L:OK~?[箾<~2lt|%)Vz4iv''B[f\6_yÝL',/_[ڗtg@Ov(3Ə])[F+1$߅!,HHt {?B"S*+RLV뻇>$IВ~K󀭩hyF݂``5a"e"-8(dS98lXaK{tKojEu)Ij˲Eٴ$ډfđx|s`bٌf"a_w,,18,aU%ʱqZFQKnF(%krܖH}O@85cQ 5G,1 0$! K,QZu9_狠P05Ps,(HfLn1'uP  %!%e#4P*y1+tsAS$w 8s J?I^<0Z8sE1K EH`a%dTRK7u׾l؊c zt &i tr@9Q eF |"ט%"W-* *dJcezMʰ=$sm-fZӚ=Rq XE 5PEk yrgy7_=K "H(N=W2D9鼩+icX[< Lo PC^<{.~|LJ@W.x1k9Ov_777v}./4y]^ߜD;grl~WJ-Z|}LZ0egNK(O܂z|m q ac{YXbDh3=pw^f-U>xD?>z<ޥ{Ҏ6wLnq/BM7c=5N_Ee{7cNE˿R.fQZW/kuv쾫}DAN[ƒɜA6L2g2'E&ȔEn6 eX ˼:y/o*r>ן/wSnr7wx<vTq8x@[ xO6v\8?'g=d<l\J7٠:%\j^>/yOf?k-KL򣏦DPg0`r?$Se78wCl4$SR:*-0n~n`%OR6fV& '$K,QM쾩UUPXmmoFt7 PY}X,Pa {ߟ*l86#ice͹k{ ǧ $h'pWa(,PEYvoZz>8 P<թD`}Rq_}Mag]#@[)0 jo۫kT]#ySM_b*^1xj]G VLc}?vu'PZ8, $ZsK@zObtD$ N,Ӭ1o/9=pz/cw6ӥ(v81y(9hn/FP;$J”աD{H-q :0ir8RrivENrhbuRSAyFH4 Xqi7>@XcXRkwkPY5jjm6 ji5)&e& ϸf-«(}۱ޭz 3s3)}SKJ?7{/HACe H4jP\1c_GT;5.3s<}F1`XW_}2Hj L(VI 2-80Vg-4EKoB6+44+VB7UK Qwm( r~WdM4ĀPTkz:Vy ͍X#( @r]CV uqrZ+~cέP/Qb.Z9KLJ!AgI9?`xZMD]*U!x33gY)XT9\֏OS;Ǩ3%PKpbLn'LNFe)F8 )ISi%О>p6 .v,?]9 6$칙kZ Z`Dz֖<3(xx6[]E <֝L~f*Q [`ǰ ӆ\s0΍B PNL(Qunojh5.WL s'=a-<^HaHvib̥{`L]ӷX&FJ[%`h]1$ifz7Z@m h'ˡ7Wp)v Acbqθ)GKt%c8U6V~*)`O8LB>*I,Z)ҧH("oy#~y}JгN$4| cU>|gg\hiUO3J4gPƊfDž \vfhe/XoA>kv-;4ec[Q; TU5b<牨ͥϡy0[Lt0wx8%:gmUT fawRo9gR쾩Al KːAIk*MGմ=&'g[NՐx̹Õ#8܈ ~;{BDt(:bVVtjJjԙ鵙\*7)yqDxmH[c&׿_:>{ v:bRܭ?ߨyP1w -Û!2ɹRҫT!{U"d.ԻKe[}?w*ݥ[cDpZ{q(Ǫw!W>Nu}ђ˭$HYeGbG0 08tS/<ʱ<}S<Ғ__D szKלtVU]rאOp!^_3Cm##>a3 Am UrnqKg)s^b:z|=q.\ Gӝtim=,tjĽgg.j0o.usssqo;ǻٻ6,UaI0.C@0d1v q揌N1/)JᆵGJ3ALjMv7ջիK:&ƈcM",5RcWEMKU78E10@пӸ~CSx=` -4:M77<Gp5ŏ4-TUy;54mf}F K_)vv! ez4w~;ۃuK~6@~;ILGt2ڐV;qbqr4X0Lр],\QCm/m8(~#@'_R!ς4@%Q13p)\]$ ,{iQϸ,Ō ܗBV8́Ֆ[6g@gOcͅg,3~ONwOHw'4-$+f _v,6jlsdf?«d BVOu'v#s+&zO94t1O+{B)? |Pև699Ep!:_[k9k_]ت?N@M(T:-PXS:z:뽟olw9BTna`R88߻>Yeed~/[M+B> kq&! M[_[?n#'(ۿnM؁Z訔Z <eIx*w9u$8 oܫn&h!H3" (cDI(&8:D4S1[֝u"%>aTAzid1$*lrE 0|h:GI S)21SJ$J{;&贲kd̴NyXKZ-(MNYDRz>"%(5֜H5)GG%dvazi">4ZuZ6=@"GHҡσ*" |Dgp, OYSKxP:i/ep)J* wN=|*^Uzq!/Kˤak󫪿0Tk F0F!~\,4kj3S`K;$tuqQF1ǁl%(FyVYޗ!x s)1-=¸(dtn3۩DF]Zŵyοhuc?>/-MF^7P0`8DFePA>u*@ rF5eJs̞#"m_DwN9ܶSy7G~FƎI4q\ߩbWwAhb~SFٻ)=#Np#4DgAd8w{(#Kz/>`HR+%C2TqIO?<[ -"KA7~=gȱx׶> _0sVJo ζE_~{4 cUL`GZ,,sNuO@ Y ɉy:KYfQzTĉK02}aFJ`Lr腳)XtpĶ$y!Co5  O8atbNƅ5ؤ g0EjʵƓ@K$m 9G8-ۇtxpd4fwG\sS4f)7 91EX+a7'ysynVA^\(WC,`"q4T(ŬӲE5r ".0`W{w>8ίݛt~ɎFV [J9Be2 T>>B(YQĤ+xm [llŵa(gk}F<7()Rnw?6mmmւ(u0%r)lȎA "JR-A3ŃB,9q,26Mq>OPf%YwDHqy]MK2gpۼv`$vE9RP=Uc6ĹK r '%6fII&I$ʼnz@@/9M)+=q' `!& ē^xrykE $1'x ;J&TJΰmĢ%kCSY}QBO?R1Oo􍦬<őK(Ǡ;d'a:PequfZK8T'+f0N۠X}KE6Ԛ#Ph0HcWͻ=IR;?!(J-"L 8FZ09ZnroKăZ(~g#-m)))vjY!@ܵ8  7s0k2ֶRc"!,cg*)XF_4\^o(-[ `҇5zhMY#_ޝl*Tۼ&7xuO5pLkFדae;%N{d2qgzs?,̙-!TLue=vhvn^ ej-4tvu@s(Zzt%$Oyfoyw)6KFWB5HpoA2}Te& GPަ ȧo{5,oQh0̐G13 FθYM++z?}}W)ReCUT0hGGtg:1:y{dTV'c' U]ޟgqfq*ZnS6]"NyWiM8]M]ӭʦӫu瓞^=[6y ua3Gfh;V]dი^$xڑS>;ë}6um_.ֶ6MBNbF\PXMue=_ jimU jM3+%Zg|_U,{d*:'j b(.\ 1szﻓ߽{]oɑW~H/$  Ƣ2cԒlg(RH2GȌ 5XOhZy?\&$ ~L_z_.cnܴ mSrs8Ј F7NeۏB5Swnns[[mUqۄ;Y˵-b^8X%9ǫXE(0}2C"!C1Kͥa|f&,'*q+ V>uaϢ8/Kq)fERZ^ïmjh0AY}yt=nls,{q,*?%j {շkHhDLɮoϣ/۽Hő5ϫB_6B}Khjټ.ͅmn=ԂF4.*(cRVI>X cӇ$PڞP*Gި/Mr9wL<ŕ~#/Q.w#Ȓ8 "Ő/e<۽ :=[OI{IJ*wI{?3Jlv5ퟟsA3 )ߗh{Φ%pz? moveU1׿_Tls 'a]ѬWBt?w|)wnUBgć=hj^@ry"??Ndo1{~>V8*cIXl*' RvXpsMٍ!?h|~^}3[2Y+F ")*-m 8 3ơ 3d\rZk("i7Pwk{ƅ>KB'.K舗Qc e$ :$xS1"9{GR;ˬP%Cv'PnK=;=^iwn28 Ր{(E55L4:cV`\ě- - &ceg( *yZZThiЊ; }HS 9r1$eF"9}ycAJxHƞ$c g5*fL++vF wopcCN(|yR5; S!E#,tud!V r([: @Dȑl DopIJ:ȒTMO8t"^S LGRCRA ڏ4l4Yt.d"&_qFhJe,-)[rղ29"3yG[?~JǾ{қt-q2$< skVFS60Ԩ2/O D! Uſ' eQJd~rn JDgMB8ɵsʕńI&e \me!.{y-'K\X%%p*_b]wj@j4MjS_qtQ\.pV(}Mފ ^D傑ѹZVխik^GDt!eZUIkZrӓ1+pYȑI!!CoMawz ieCԑÐ%JLdyJk".\ 6<XDNB%+VT\9erY;ĉp{@ZN/i:@, 5$*[,w,'> QXBI&e vzcY͚ol=8L] Tɘ|Be=eϟ_m` U$ϒ ӭkD1 ikl@lY A5GnBX2Clوˁ$">QV^,yz(!mų-6A- 9R(iD&xFeEB]&p/,x+dlf킉 Iy, {%J1)nبHhLi0=PnRK>=MKMw\0\N3xgk'@{m `FbiJ5Y4$+啖u= dv 4Hkp:^Q 9ϑ2K-$2:GBԖk2GIHzDfNno}=Gk8;VM898 ^ k&[6[+(zCF.[슍Av@8`6e߈l AS+R`DS߬dY_WQdwO}U᎝d (Aq'S1\j=b T$`9SIY\@h0j[@αDS$2IY%8*,l4@kqNi!8 ) ݧ/36>Δ +/2PIl[į6҅vAcXa<1$ᔌ![di܆.&s H*sЁ \xvy>ya7J84j\;\Jr{W*j_4 wuk8ϚDSi.c슝]$ 3?eޞMgt.pϘ#Ȝp^@zk"jqqD!V +?=QoEH.chϫD$[=ܰsΧQ^/ܿ7FA]&_Ѩ(T~@p@/Eel~0iܲ.M+E}2r`xSݝzi:_׏EV{M{jA>iA}(.&_[%˦ dgV_O?eXu@p4 jzK!a ;"2x`Z7P5P'P5=k"/|á9#Px-^:˼B4 xq-5WA:Ta: f/TR6"q$ u&r7)vֺBaL+! +trsXϵBXbp.O3AA3hhkUb#G(_ARi+7]9[ nA&1bA+hXO?ey+U</q6hnXb#c(e}5bTzks `+S@@c/,q~^syG/К4 E9IǂSV){뿉{ͅUN E"4I)qФg8wp1Ҩ[(,Bq0eU(FaFya t8(EL@ܗ,Y ӐM_aN)|0ƯC+ϗ`!>E} ,,$iuB|?H,R.MbNNUKB)ƌVQR.йݷǹBćO+G 4\GXۧ(Qz罌(=HVhct!8oIF'tkiB0ʀVN9bSzCŜF7xS FEWxBb䒇s0i>|T)1ykFM=gX'# P҉+WeU&i؄ޞ6_fд 9pqm)0 ~okxF6" à1 Tܑ7\F!o"G #z@ 4u u F@>D)x8ju6д䎐?'z{LU A!%0 v8ޖ=CZ(.rx!RI,R C[a  D`|$CvOް_m t(C!;/ ?yu`M0#cqO9X#o|?,LX>)kA&EsOc%#}7 -FMt*6@QL"Z+Cv>$Ia;1ԉh[=pΧCVii+9c%Y2DK :sqe!x 8,zY1z0mbzT٩Cg14W&T]wtg͸ak%}9C d,%tӬ k5SaUD؂ʳQվ7:n㜝R=CʼnD`hGV?"9,7'#Dym_kחxKş$4Y$Ӷ^B#lauP Vc9}](Vqv: Tl[G.o=0!6KӘrFJok[=%d{D>CDa΄QX qJD.`InNʎ%D{،$%`^%i"Fꬄ yfBAӜ%7"D'aHX%Ы֚l<&Zl&ڭaXxlYh>!R sߝJۂzxxoDs&CSQ*`tr$d5y}&B9ZLA+j19SDYMk"HYP'I@rbNTd.M2^eh%Ҏmx(ό"SS\2{2x[Ѐ2E1?1eAd}yK)/µkDçԶN1eX&ˀCw7oF?5 r{`H0$Tm&=;BqAЍy3uPy)Ӄ٠n ypNkc>7bP' k73,;'F@/\p7kF/N2{90I0"f>SȒ|D-eɀӞl'}r{,c wԅV #lՖא2x2Ԑv[9`xnBfurPXZKޢĿ|4!@>1aT^7a_ iq¡{3*/s eYL߻dH ]$'Z.W;'#T|/4b:82/hnw݌<ϋPf~z e@޴RB94N\3Bk>[B*Sss7M93ع׋4jF;W 8N#z.ř[! &Z"Eu)JQvh xV+u^"wソѥ4$ MD$ < i4oFgz?f4s0f+V4y. [otJ 娴؆Ih 2 ?kFN>K '% 7lo7_@"Q-m'DhQWN5Mќ9:>4PQsҦ>FDϸZ58ii 5xs܆uZ^3,Ԕ6#PƂlbO 'a9@mo0B5Na heڐ`Uvj4Y"nvg(|+Oչǰ# Kc(Ez캨\m(o k64\[F|E@HY0V+˶w9hͦǁ;L P„~6$EdR )jehڗ nI0-&dY3jc ꓟhIC:)ɸ*8=Ikp8e^=zbW>)S;UMA,AԜxdR!i]$yd=Wu&ջE B'|a?Y`KDa'GL}tp J8Ŏ6cBFYvbAAYZk9ڰ9:щ1 XdS1RԊ1\DX7Eٜ<5uQUk#с?a(~  AgmlA'6U$t8gCʬ 5g6;N\'Ȫ[H޲$(iZwANU ;5&wH3uYKV801 92U WVNM ?p2:GWXr4"j_[U& &ijXn3bHXFshը^ zϕ1R+ސ$wFheQlL`=Gi'<[/LӻKK:{{T|FȮkc rv)/?~߭~YrW,S' D6.1JVVc .f#8dԎ6;2,n濬 ߻6?^ݮsUz"F"޾û~Dzrs?Rs.߼~ӿ?rg{j2wl]zjΗ_~nc֒N:s6_J*:m~k[`YeydXZp￯R4ߘهwQW~ fp˔np~&e֍v3YG0B`r{m[n_q2r.WcM:! >],伽hq>|~4F;}6N6/N3͌g_bWgb1=5֬O7o  U U_i2d@z%]>Ox G%UV*3צSlT*n<ùbnAŶ/v?{ϲ_Q]{q'_ jz>FSY@tʐr9ꊊjIue ޷w0*igVTl$`S&6Tʐ3X q+pnכZij$]^}v;6U䎫 9 ؆yo鄫| ԭ }VC sJJ^ѪZU4*5䐯`r(uuTa3P2AimkQI{PAhЉ^=#*#:ǖц󸌌G8RaR2P%eLSh̅aIk/uN|"4T ClM1'`KNIh}ejĢ4k./b<.= Z$c4xdq%c$\}tuakXdLq>f\ʬjvmU2&dz::$Ve:yp&Cq%rDhҤS^6%SY.MD^lO Y6Cl==e.`$VhFֱ+X]X=Fʸأfl=Ss [3ݬvR+CñlvZ%d8^1.>>}†CshjkVF f[×|uyv]'&Y5JVb b6J2]I˖ eu~'{eU*=lly6^ !`nيhu|4IB-LI%IWGdu2&kKX-M7>W{ZZjލ&Bh8p>=F;חبg=CEKFpsXϋaŽ$/vc߂bk TU`Chg!`0)M2H D(DJ][aPB~ S+4(#e{T?SwhvoV,;輪b&BX3~LN1$&/Lˑ btUʗ,4y)XChѕ9lqf{^&>E7pts}ۆo pD{ v][6{˺ɬwZٲPBn3C[X.-&Me{٪=J%&PmFv+ebQPbȔrNVhBv#ig'X[[䐮n!-SjJ:jJ{TփR$Xr ;(,v둑A =ZP8o(4ɦM#X9D-6Ttgd474?7 駏#XO!,$! ]U,Q(XRz~#6ޚu2\_ug98_W-epGowEn*Epl'X/E)K9sVOŴ#0.˼~ӕB|:^4uTs:Nl4x ^ /m@zSgypV[=q8\uqBjswxW?B4jc/wnvWRȮ<|ל?;b[ Jwnq*vKYe` E`,yd`bknɒq[%}Z_,YڊjJ4Fp|O ؼnQJOgG߿9*WF:b֧CՔ\b l5X3s6U\?}wf g`lC^vRڿeZ$A^x1/ӈ.F +b,B/t:q ,lSXLD,meo`cGL }}4e2 fwz%UutQ㾅ʺ7rŷrsSW(d-"bQg) E(D29  9Fhm|q~=&\Nؼ#lv֛,ܔ[^~x@B,>g2(djCUXrgеo&t'KNӝpNmFNf%F J:Rm)ʽJ"?6ٶgGb-#Ӌ4Xb("pfj؉8'k'=ӥ>9/#/ێogmO2|T&@xu9d]{enZ$ 4Z*S5c%PTfdvyYAwd/rd18噹@vvx+cg0n6Ҕo]񗗔5P&  R$zM/ .Vᝍ*:5yEFh\%bK,!O^| )xGuj.4RyI~`j"IG~ 8:L"S{۝Hs׻5UH)4-ITbPQRgU1c<A5f\2aNc,XVS @5Ejc5^L9m93W.9AaNmhǔ:*;b$ 5ވnNIv}N~¦ !&WbNZHP4t_(X0֐|贩.LHJk Ƴk˔m#2>})Z髓5p"iVï>ޠδ>1NdN~诟Rj ?==;i _̎?]_7x|b|o꧳1eɥHU&85H6%Z|Ki!^%tvtǐOOͦ..N0ͷV}f8\N?-^߽x/765:'n;`9X4Fp]O|VWyyR.^cG4vN/vVOK_{tuǬ V95__l S)1Nk=j{tMaZ;Gp:xӳmZ̋Y#QAhUn#'r={ Ny eע:`c7;X]2]ܸ{QȒ2jSОJK2A8QͩƒKKVvxYa}Μ6;?v푷Fo[w~oRTپ;1jTZ$"CEEE+g;vZ7Zt/uݽAvbъm.EC1YLj3'=֊ɖz6qf] 0J Z>R}{:BRG`B0JHk%x%p< bίϯД]Eܑt[|0#a22p9E-9+kpɍbQi?\aD7v&=؇|kyϋoeH'@/gwQoAusF{bN*cWJ`ɂXqXqߖԽ`SHe"6$9LmV*>I 'U#LLkJ֦q={ovκ .5-xvrS@rƗw3?zyLJVAf͹wuAU%xJ}=RDkkaoU(hYUAK #v 4U 1;#xq/_5knP4ׇ89\K9Wr6  DT)j@E!h"SPHvd7^$f^ 摞MK[uɋ%*}H͑v3x.LK=3l |kMCOQۜ, Iɑꍶ.2q.dy:,$׋BY?ݑ?uZ+i@l)2(yk41H<|&yRyy/[2PxB @O?Puj1q,f]0E7ֶvܜ}k;WuxҘL{Wr1[W"9Xf1  f+b+}fjv$>wܜi_L>Mk_!yorAvEn*~>$Bx7Em%! W֐WQTdl#'fV ;H;:Xi8d.ϐ2ߩd-4/ @{f?˳1wq:KMm9)T4Ki+Y{4$V!_O3}κyLu?‹+h,+k`T/QhU5; nJ@@12Ur* 3 b&:.*HY򑓒[ ^C0j~(k鵱ū#̗vhV/Nnls/~Z>'\~}ӆ)!} 5 ?b܎;-^2iMN.iݠL,Cb:#qk;kp==6D@Qܫ')b8}(j kj&>`[a~lHTz Fd#M+<7j ˎ%=qq=% _`) (de2Q}J\تGpR= (`amǖ,=6+CZc, &mw&ZQ6!6QN>En@ ";x龝_> j3m6i'GZlPĔ FĿ"e?s|Wd1"[lddYn =?oWti2緳XA%hnrqACnnE+`uϫMK>Za_Wo&Mbb6{Gtr6_!bǟ_V=H a{q9yIϓ'q6E:y1oGh/i蜿{5ihRgQ{TOb[;m Qo\D#{ L2E39[؏mJi-J#Q}r- ځ- |8l SS^kBf3&J&$vf|ofsb] t61 ;[|M*qP;^Ř-FUٱ6>oB3؉F;5%lS2L? GYJȱ)%`AoX;i!aE&s)֞ '=J(N9EbL(y6'\TTV9X7F&ad53Ri"ӱp#TloJ6k<|5?PC,6Dk} d|1)b&|h?yx]|pⅯ#^&5|@~Mbpu'A̖*OkÝhS@ -B BT#g_[Vz~}6;?@|cj8H{Mn_JLfhkkn :,iTlHcSV{t-?6a!tIg).g`d[$L(H+*OϷ}KuL"un~ɤDE DB낤8$E+{L')bbg3[2dS \4I0`m[lfٔ:)u!A6GZ&AvGkk]4|h).UP$Oi LYdj r`c LP>Ku{G0L.8R:$$̚sx E.H5 :νw ,ٽ*S$P,cDp[,䫇1muԑއ첉x Y:{jwlg&FS{3_*ff]tf2Os?b@+BY->uxIxC5BX5) A{uo,=օ:#(huC^51>/8^$Ref`MSt<9UW)zY9FǬk_\G }Pl4]WTuMw|0GժcO)$i 19*,)cl* Ro5ƾ t"/Q}!ppByrV(^#Rik% ( Dv>o7kDtdqΚC=~ Q-$ss^L{a4xSjw)K=<2VƢ%V>d| mkߺ&~F:j챶vdLYq9&.lCiJ*WceI 3C̚B1 ZUբZM9k9;DdX>yԊY' oc\s`&7NOzG(_e.#4q!;VմGQ$&%H "4D(mlC }@8d`rB`bt[,ϣ+KC  nQCLc,b(':<9^ypb$t Nwʳ£ 4qW!=t"5wһbk+ˎ\9Ņo&K$}qjBQq Ca^PsVWh.>&+ETq0&&:n/R&%6Z!uwݶʧ< I֙$ٸ&1mg6LఖKEkWny9OE#JnnC1Ifl];ϼS$EK >磔R#,iȄB&ֻB)KZRا2)*oɴk, ._ڄ]r[2v7*&DLX~Ǜ__,/<ϧ-WROaђk]uޟV̔(qWWil?i fT)XQۋ󴔰WVN._rFǜv஝<}~_R|/AS?_r+R2K͸bBBbNAVN磛b<-{?V{wǕ.6~}C=[YO :fXy_{r.'ădd MAAuV+U|V§$Q^9vϹ\L+bro\)rR<^kx O+BDJТD2NR7BبJ]\Um&SB;.s5RxaƏSee~2H3%+ٖwOnmR6]|#<|vܫٌ sa}-PTO>b_ߝlHi63:b+<:2 D?VnC؄1s7h (,n_tA|=I_( 9ɻq _*gAyb:߾Eȓr;ZOZOGM1bBԌ5ʹ͋5pIl1Zuuǿs|SbR)5y-P}Vm,1;xBGqAZn ؀NA CQbT:U;}{c8Ab2/e5˳or[0!L2sd6;7Nq B4--wȣyit3γт|C;^FwϦstA֯xٻ 6s}O@qAnŒt C;hjAhr>'aNV:Kj ɜY^8zX0 m Z8|=pp$fc $z`Dл 7@ zw(Kx)8Dh\8l Xi|CEBA*JI7 sν-=ceUKr%(`Y +aA97+"Yy*b2m8J0~ru9ۭ1?|۞M?>Gf=U%`eM.1se \qhxh=%yYc\F! ST.km$GJ`v6, %{Y,pƀJؒ%[ln?nbWEh<}me0DAIĺ$ze?ob |Vxuc`tMa0xC6ׄR&PE%XRlѸdviIP BC:Mp5v;64Ng\ yqz͖FlP9hyΑ%FTP &}|6H1[PW-AA+jT59TqʔIfTHeQߢKT<к2/.27 KvAHB'ȓS$5%J18zo0ixUU^DgLʘf[Q_z{L>4 J*a.#\(q1+hMTPqPgQWK9搑bLY`r,)Ys(HcP9Aչd5 8 vrtpZ ˈ_O\=`b_Ϧ*o0,rw]pgEmꮣp;0=9fu㙘Tz ;`Ȯa8Sg.)~颽Dk_V$:%첫⧣<'Hc9" %p@ftޝ5Bcb3|S扞R5=i"kS7$AT ϰdwcbl'kgxm-Wpm]#+ xi]YBP@!ki Ho׫8YtthN rPo!y$* %,i&&7;7u0e31%7^pLJ&ys7yXsȀ@q+fEV|π7 oXebp`yneZ:2 B b [%EQ76*N@QBځyAIN]- {nhYp4F8!4X'r0Z2KFEUםA'`IkYK'4(AAbڧ)Kxz[ue4!l|}1ۊmEog#JI`) i'A  | 6BR8@WD$d#R,UA@HCYk-xtȵ FMVِ6[unWk[ɡ7,LX[]X1?msFiE^ȏJO6Aе$ULdBIACu 2GrF rF+:N[|΀8qH/tr{F{@@pъV9mZtsW l:7O7O7O79 JL BQRDNh[1qSt2Ge{>! Q:Q[ΒKCDel4> E@!e55Bz{Smn`fkLl*l Q&dLt싶Ecm]i ?EFb1M㯯>b Z:UZSAJ.a,C/ Db+zl1=[/_'&7Y<N;xz5Wgoxs.BC[[ 97SĬPhdH(diďRPx&@ _*įP璼U}:=1zMhIJnM:Vl)kv_,;`z8qwܟ6ٍG1L|btF7mjޝV4邧쟾L?ttvoIy߀P.m_98L1MG[d47:%&ͨfw:)W`z'i#m|4ƌOGX>7oBח 7Ls Nk@QSoz@Azg? &:h$02A s+^u⩹%Jl+t $&HAeߠD^C^ Pn:Y>s m3qpf-)s6u*~9F57ٯ:?fBHWBȪ !7ҝ#.(=q(5d|QfF!9+ Ch !,v݇q}n$~x^S)$b2t8/dTd`$d(x]bͺMng\V1c Pwvp `BHIk%ȩ %<[ _V-﭂/;RbIq'$=LكMEHkmFPd/DC6/aP/R8k\Â9s}zvH?SÀG`XPJ_1-d1dbuih<9c;pےn'vHQK @V_ҥWxN;J2`IY#-ciDF+X@ =6f&3@˒Ѓ;$Q[-GڢKHCn3XբV:8qK"(xcU^/% Pȭ(dUm.h#FUN<[$l&>hcbpu6HRtl2: >AQhٰ:!!f j6yէW\QEc1*0*,t u U8Ujtt5y(٠v/|?[׹{{{j=|^*v!Ҋ9sʶfhϦ,Dtr~ù~FQM*;)YʐL*勉B[@1'뢈L2j kcv%1;TL/PBC%s,4 *MAW>T/7̺4Q4zz&M^biRyD K}2&OAu5I$J%[FZY GX*K"y6hyE28!%5g:+)OAAJTʥD fiK!$MLAhh<=/I:$D*Y8u bP tE(d-8BHP :ԋUwq{<:8XID0PJe‹JaХ}LQG2瀎DӿĈ+ƹ(10VQե @PJz][8=Pݩr?i) ;?rCk0z\u1>㕣ߵvzl#knJO&M7Itz4KCܝ]vȮ[7oosAW<䛭uM2l'?.\7 Uj7Ka: Ca:c?M;FJ,.֕@ߡ~yYnXg9o&^n77ۍ}QnN"{"+FIG[ WZ_94ij~ ;h=`l ,CZ0&jqs06Oۺ>z~viR_֮U4XOPcբ:MjuVe־&2+A.Ć̣ŒH1VLXesKT6l§[6~SO SVfZgf7 ]ҽ6:N/ WBݛ-TQP˺<_Ҙ -I;Ym& L|A}yV:)ۀ ErЭl,9Tneyzp㤤8NL$]fe;"R(PH z=i8OE`.[Ì"lOApB0N ES<jaXئdNϟijbq= R xޒo3-sr=l7gk gu@M4]D<|re3x=/}| db7oqZ{GړUIsY3IҜv]Q ى9YjXtP%tL-vO]zFɐ`EsAWieJ/aMn6oJۡY?ѓJ>yooҠip$pu?j>F0VU;mN[[;~fe-BNyLi,cMEcp9AmpǕ!#%VH_,C<,,^< )5k0|1z&JԖ@/+xPP~E!ֆ9S!(NhDGu7-v9w/mOL Qsf>%C Q #pPlBbƎQPY-h1 zqŖm:x=⡣HJ@S+@[ڢ' u}j Jy(W,X& Ԟ=M{;cUҖӒ6חdHT1VIoKХ=شVydlex'm)OI I|z/B.!O]̡1e&.[E_J7;u5EE%,f?rJPT:NUX2B Tbo1T9Waڵ}>}i'/XD~ ?PU}c~-Rv v"/8wEXdROc1 ٧~:ۄIZExy]:R*ke~&8.`^R#K#=T)}Yؓ^netۅ '+f8/qUg˦v7sOLZyK\^'Hfl3WRzbfǧC_Rܒjr %oZ"-+"_Z pNg`/AA9yy景Ӂh-H$龜cMqmVn^HtU edsss}eZ T[*Ejn4fMHiD/=j攢Bͬjon`6^g{].P $o)o~Zbh>.]RJ (U#=Cly9885f) Ykֹ5XbO=$Zy6&i%b+_r'm0rLZ"%jilD:(V{ՊsxIF.E}  h[J>nT,|ÕpoVM \]8g!YD}%ݏ&ݴW7cv7-ۅRvF%''^U 4ՙuVbe`"jd- `mpn*b#(R13`L 8%@6ԇ%糋ecY~ Tڎ.Y\g.d>~(shSJo~`#Ч?l>k&%F׏Δ8hȞ=z_R=v.$FsG{s?jM>g/tɤa}ߍŷ[m49__7.0$wc?nRh^i#{z,${qur.tk.ls֮v8yį7T~>}u&t9;|lg̷!yo&n[+[;5nX7he]ϥtRUtpm .fS EMܑSo9Zflf(@ĉмKgOF[ om ӹGIՆxLj6:j3٧Si,>wT :"밗.+lq$W~Iv<|<ٛo8sIOKKW<9f.єnМznKiXiODKva i'46i CkHr{  s:DCR##G7 mBGߴLϳǖ#GS'c'.c*(%L8 zt=䲆&yE@ Cy!aJrވ{$R \f3{D(tG ;[[XA[+9uA掩">U 4TGq+MfĺrU=MOѲ퐊QeHf^kr +i@ ؄5fDa5f(N\[WY$oӝ-`ЖLaqG'Z>7T>!宅R+!u\je= 6 AP6" "<3EITRI܈b"jRceUNV?)g 0+nR/ qp<_jڵtf;pt*}D hBQģr=#_S珲[*G ~T y%K wʩ4fb_hqb\U ?l1tYl8[zBȵb_[FpbswocOA⦌w͑u;>i;7r`K~c٬ׅ>]GVC"ډ| FdnsND@^6$jTEx4)G~lvj7]+MZdKp%{ZNN*[9nW/]gu_KD bP\"0jDy@ |eHWK"5g$zزJ%)>V gN𵓶Q EH~.=@nfnxg-zVds-O5.ϔn\{6DpM;޻Fu;˺Q \s+UfbJ=\B-s|1Z.T:$֊9kŜKc"B# JA)R(aY;TmϾ7_֘L4HQ5ӥRNMB|nz 1'5-*r)2r.L,i'_>n:ҳ'|PWߓOF,:"+^9նTaol@AX`yY:(D"@9)TyfVKlyTbS$2bs5ˁ@aRs.=x=lQoڽAh 6lq+l|@nRVP B>bE I)xP*lv/پa'TF--v9%7Imt^]U`Yb0J)Pڸ<L^bͦ/(-rcLaWz :+yKd欷ޝOH|-kG PdsC*e}>׌&\G貳){k:#qgZIrLڮ`\&`F=XnlI Ւ%Kln?ȭ"d;AV(-טZծ.q{q{ЍNxٖb6r98 x0 ّ92HQC ڊ[kd!{ T&] Vzm 8h06/-HIkxINӑ$FY rDP)7>rc>.i v-Y0B9Me[x2X2҈D3rŢr@Kw_w7 eN[,xjz&x`*Xr(F[8!* Xݗf k7}fByQrf6lE5+`DW^ _ ߴ;/ C !ʦxM!;Q<8>G˅ IЄ|sC<C(C-QKRMTgXaM ɍ|fcbGJjduǴ+J0='_(L{:7MfnKMg}FkQ&Y ~eWOeUvwemu/ _?" ]?AZrkyS {֯4Gкyoˣmc9,z2+gI;㧭oN29 6 .ŘUBxG#R:PQGˬ'(L2` +0VWd,ӫ;8SF #+s q,Kw:z_1rW<ΏEAU1H%"eZ׆g [g_[ғ6BIymUq$jLy_Mv4^>>ڢv=VE#ZJ |Sq}Wն.٢"fF1u[Igz2s1{LxL'7\y #' F J!sf]b8ڂd㑘c^F?\; m\'P[L,^Y  o[twl $t}ќ\w;6rEgw}ST /N`6E᧷ OqoƄ#cdFkMH;e",,QE 00f۽"=ϽRWOd+zW`G]&os[w2. ǔk!Ic=~L{ UDgj7YndT& Bhu2F$Ja<tV[$N$h;/8crܖGdt"8rtF%u'51d*l̗s6X/\UR}8,Q?*q\yᆱQUÇ3qa؎GxEmU=>Nc&X/gwXOJD(]3V>xImZYSWܞ%y "b Mk8l!x N'Tx͍.f@DeZQR5_bF1latD%^hBhp8c>?ڽ_`& __< Z$ ؞SJNA^wWg7A+T%Evs OOgl6TgdhcTJ Ir*::<*<1$=g/̂~{\,C4q6@=q3Ƙ̹&VDd򠑦$+e>MհH/w\h:w/aNe<~-&Q7g']2h,֨hc%H0YFHy7$.#Q,,iZIFe$4'DÔꑐ7mNQAxۇ <5&{AtN=^zH}NAOG˲gUTI/MVax7%ymɎy; ]c7WHzңwDRA=y!0{0uX_;?[XF.2~$ryij>(8XC>9,oسXYzB,s4+U=$:QZk c0r& }4ShRJ d L>$C [igdΝugIOF.[Jo,xdb&224:^Ufڛ2=G= GM}Vb &EKdh:IJNU̝2$9yR=g2 "ihh<2[&"b*9cuXunF~E}8H"{G8Z2|$B##ͦ'~J:DCAph2=I<ĜUya 6:~`QNCP'tQ|VmV4'6ѭd]fSbjEmjvXڰ6V?pmmn۷[irBؽHvCE9T7Ovjy_]yخ+Gmlo!7X`P){t EWH1x41mr>z1 Ԕ\t3|҆#ݙ)ݑLKsȌbA'Z+16[amjc2lIi}Smm]>/dA?θ~$gLo{ʏ~ qVofB(sEl˾<^ښiIY>q6 m@s9.}Qd@j*sy!oO6ܛuF'ˍPf;Ӗ $;1:gBRDU,gn(22YN|8+;Pbsː 3{^N!$c`hR\ ,1 1qzڛ\qjufjvT\a=q%lq#4|@CGV *a/ i 8uqisR Q%keS#};I\e7u sFxc)"[Ml(Rc*)B&N\+DMևWveӣ.Qe= )'-Ggg`\?MwҐ!Eou bt4?VfB(F_;Nki[L-n鰅 j:rNoL2Yb8]gY:^)u f$A{tdoT{ E O6er4L FC0^EŵRIG+#<+ =yb !%ƪlem%CD_ڜus@zQ 5 z(I |=yo8&oxͨ(e4M:QAzQ3t@dL>f*bj:C57 c\0;t1DBe|;>w}=!Z$`5d[΅t=ނcg)ù3ʼed_uI^bfcR* Xޕ,DZ+_Ҏx1${!/y^ +k>݉jjB"dwP9p\L3hrav:)uHSُ=]PYٗ@V ޕ`b-&4ߪ\<4%ӛw.߮,?⊹ֲ=/,,SMЗ#`͘{)+=zY8%`o}jz%gm:]>Ҏ>ŽCkEѝ;_dɋޯUOtW7 ت]kJ_|/w70w$+|F<ȧB)r)]ZO];S8'ʋ<0Tc.kB.M߽~2W:ֹXVN>_06V.Hm L=bjgm&'۫\}>6kM&+%Eq%&?ޝӟª\(=qCk/ʇzfBzpX Kn߬ :aqE{)\٠m9+JC*̺d^\Lهtʮu-\ϳ=e\rkx+\?Ri[0FCc+.M91NmUcWuFh.4LnRW~l¨.#_q)$d1kSsiM;)\BӢ%NȩHp+jLc*ANSx$ WJ$9&AzcKDk3I|8o\XmLOJF5$ 3I|yMB;c *dL}.D[Y(QnA9B6DȶqLQIxy2bj}ΖE e1AYrZOH"C"#?a{ZTCȋmQa"^ 3ôִjք,ODKiTcpr!]sy1(ˌIՂ?E;I{#֎lMoyU”&5\:P )Ia8FPj,^$a {4,-QD.F %,x8/j˶-6OC &͸y3ZD:\H(b`ŗ;fj1^J֪dLU! "0yOa*O"K@^*$l7^C2*q;u$o3f90 sc2X53P8TSh6kDu͗]4á >6o&[!#ҧhb.rRIp Vd!@y3S uq'W?7/m ݹ8;w`_P1B5Cs+ b,j3b f +مDYPkvF`h2лCM%2ɢsl%TԚbG=z,h9i9/xzlz8ZJweGހ}/1ͩE3䬒[R&Pc$< σ{j'3 H>;F /~RIA$ʖ͵"x ӌ+v^W Mr~v|zyWֵ]) LsVVU6WZpHH[4 5L2\8Dل#|[=*.?ͻ=쐳+1*YsY[Cڳjtʕ) f@H4<>2{tQ 7~Eq|!xC/NAdh6U\*X:UUkK=C6$[lt+=OL=9 7^ KXwP[l~YV [0f 9 )gk6p:kS)noYUN6(?w$MLL ][PцKYa Ö)yA3{oY5qG?{ycKhREr.t]ikgMh4 X6riѷM)m`Dաq&dEkԌ1!W?k#V0Z5c=gX'LI $gh1v^֌ Lq]EE7{u# ֍ݠL9fj=$&ZPB9s"T3cEpN]qp{"9ʓH6 >wښ@Fbpan~Tɓh<$ئhcKC&~]CmXg}FJb¨sgڈDdF*k5d51J:ޫafO/rKRs9jC(OTN/҇_Ωrjo#\nUQDVŬujQtjO/1G D'|x/O\✶r{#XtM;6RBJ2^w .V's>賶E`/#NE8,۵j0v;/1CJڣ}@WanahAsK=13RP߿^S$U.@~<ޞ*d܌f]4FFk:E-b j:t|e ƎU#_2# !- u6ewo:=~((-fH%&RńU^Ĉ3zJ&/\sfϧ꼟8xM?6`nP>-A8<$ӗTX:k|"r# FցNjnOsP?W"箭:{FG$}l.*Fldn,@7vv2#3׮ZҲWR3FjVn+N?]fioMj_H P8>Dak(HQ+*t ~\.|HM yJ>/>W3}o%FxX̦l+{=,@.֩0RkUg<h&5xƁ3+ի9thCnL1դMDE},qC$Ntwu|=svy"ɭq#5a׌7I&g&'^WEgGuGe*Q[l9N|3i{4hgc6'vׇ|ޖhyjHjF.B.4Fᣟ _ALȒaŨhlSQl \xso|$fGbWbBqЋTjU0g7 nI~|6NBaJ0z]x!+r&qr1U/f 3!jՉ6APb5:m%FsNA1pW+ylZyu,D9ߓˣ0n?rEpr& 8*yعk˄AejGh sAS rA3 y?@=n1PшLJ!hT1Qb0e3hcI\~}زZJ#uKbE~䃖 Q mJUIɔuY&le{o]5(j:k);`Gǚ2MyUN8K#fi}wO.UHV^xY(tΆ!@y a:] L)\NUґa1ͱX3B.壕TKb4,}6|bk 5 (zlHlaM̀22=*Ӈې)"XF>NkAȔ Q!ap\bq[Dꄃn=Qs %֦ɢ+v*0溥ԕj&pGkcSu|x H94Ģŷee5@꬏3>Z7E5ÈTxKֽҺu:ᖾv 8%{R"ISch+ioN:%Dްӡa$,JlpJkw 6ݨ~qcF+w:A@3N(mB#MB&(h-*1F$3/;taK$`)a(H.HVWoSɐ-$c8$kʙglɨXvJђ;+:Y?4zyi0ʑ0q} @Zwqo Yụ̆??Er9jpܬ%G)Yj%k\QGĨvRrK >@iĝ_K˴1[m4#tn-L7jď&M.מCNǩL*%iGk,+HcHd$ADAƊNT@ʙ(Q~M'' M 9 @N`$iB!ՕŐ-'QnS9ZTR QP@6YȉWJe y)SPPCca8tG`䅨4c"o٠DT`v\:X+yWw ]& ҆! # 6lNl3ꚳ?9xosRsl)GM#:vk1L=}28RDiOg{m{qy[q_^ !a(g?@H.hV}Pn5%BzJ:ؔctP)$mU CPd+ɔJUتٔ[% Dٙ{Ky9oOxNl_OT=AgdN?Xm5]>jw TeLD-&'a'V iZ~LM)j)hBBY{4I- Z >aRV^h: M&yg:fG_z͑ЍӆQ͞xz^aT%'+`QVVkCE ̡`Q'&ל\Q9d/XvkE fbelͲ hjm Q@&5tЇRu]RVR.4H=G z[;)H`l铲Ey$Iڷ| 3EwG*zvK(HViLy 9:]ϾSpf}О]9 uqƪGo ~}>¥juQ\Ͻ)|TȮzTa>#l Suvl=t PUNaqEG]uU 2\ 諊Qeك!Rs7Lk";2 {ӀUU{f93VA K괈E㜁`pS@$]'..ƒSFEX27k2YP\TII@cH늷y{o2AZNY*aR') **,[ Q60FPj-O{'ޑ"}2E[(PJX]#U5IVD˘(fѭ-66xU$ f_|~E=n(?|<߬l?ܰf_'7r2N -__[߿45[?]}|\c.l`&ձ6Rm?miص-v_K~tXo~S,S #x65(~agŷw:L c5.栦3g^M[1˸Yb?hS_%_*)Fj1_)_C!fhuwWMgguF܌?cku??5%:PFHvd[D@`SQKĒܷ)op]w'jMiZ!ۆ9Ζ#>DJ &]yˣɊ7a(F&h7!{n)h+.p/_56omrq*]'-D/'AصM ; [ӠJ}^n;\boo͛m"ъO&j ufe1?j=vBtLɺ8. c9 we !b07[er媍>oyVߕ'j =^{dMW|Ѹx&?4U2+8krw҂;j'`7ɚlsF@w/舑7Sf#=`m!0;g`|;*;H-sfS~i]Kr]ٻW!Ϲ7ww鷵@(q6Q<4FMD}u3DCw|OiB(h;ά8BFroذ9;FwL*q9e ]> eMʳ5fZLݧ&i}۟DKPx^` p{M۩FG\1~{Bm8<o h&pPVc p:Ff2sQ}z\Djs //t q-hlYC吶)mLVDŧuMQ0r`oVC?>=̷<571r7&1y/;x,_>?]#◶ț7{gʎ׊7l)"x$196>XzzU<ƯO-UZzPEX]0"HT‡DҒ&n[5p\e4"tH㘔 E(7H.5bsK^,5̂jւ™jw웫^7_D~RVK氠*0U) ,TyU6v†;;쭦f%YO#ZzZCd5oBЕ:/ƌnWnaO}/{TOgVIz6̡ mXڤy.;rʺ^)G:U&`0#:5]m(}{Y';DX51W{Ƅ&֮g~op6&{eE]Pxa#:*mZРuޟ_ykEEKF5FQJ24q`pj6ka%ӔNR}ߚ1EژvZg`@Tɐ2b爴j"2 Ҧ;۩-;51٪aC71ç<βv]Hd$ADbNJNgຍk `–CgAϻ_F){êZ\$83`b ,J2zfF2^ SrEE]emc谵{?tqf#_OF2"B;T6?wt YgLBz*.Z)! _tԤC|^kaWYbBhEGec&Vkq{~vC47j-m)Zy`%ka`DmbRS7ݠcqw^% NHfcAQe ):Vεmj4s/ޗgl{W>g΢!w /" Y~Ŗde[nv"YE+,",{ł-9gTFgzPmeOQP`Rq6UI֑:"E@*)4r8DӀ֊ B:1eI5(P\~3B&о%!z+` r L` IK/ M=OP+c!NMrR&TLI4gF {ӵA,NCXk1E2$ekƗK(e[rQqS R]KQn]Z7$9{T#$tPޗHe'Ֆu2 ]Rw"ךGɼP99^[ ,v/?<*eR*%h#z`C*eS+SI:O8$OjXgpzSFZedeLȆ* WS?9a +j_0^ҋ&=Zɵ 1AuKl6:B]ԫDRR6l`іBg:ȼ %gsO!}ooF$etNrV)KzRe~1{%KR~+c(XĈ):^mQU#G"Hiem u_.Zɸ3 N. t2hm7:օbbFiCJyi%e*Ǹvuv9O6ϻOnGإW=?gW^^_n.<$͖Q줿wgM] J2} {=/4 9m;ඃ;W󳿯RlԬx'~r_ު݅IZ3;Kw<]hۻ {L%=k&v ٝ~@o.4{1ϓ:G!y)gŅ6њlyATt.gq>|^vi|飴,rYBe4PWirh?\?WN1]^aٗ<>y xwvmzɝv<&g?W_ƫ⯮fپqXQU[L ;_:fց w]|¿;>֒9CQduMvH_BI-](D1 P.,g}={s63w~y]G7+mbcO[`{wCW;iL 23lY?=?5mhbR'\/~bV,Ւ02}aJͷЯ[o [ L)`x^hy1 GC)ޭռ{e{"8<[?k]b*1e.@oUNEZ!մi4d1\k9zU5 LL:,!hK$ ã.ZtJ?N,u !J#VieޒJɧim@]LʰIv()@4u*qka-Di Q^in؝g#0-^Y`8ޥE1^1MN'v:څgo4k }Y ~}S.l)8MYdJPD:ApYY^gzQ|8"5g.)w;!;Yk2~;k0Oܳ%H+8)4JNFIC.$4yw&IzSvfS_M3^CCzNfcbQ @AsV~bBolsٷ-A0Q( 5HuA@k1J^}Z\[Nq"x݇l1 ZP4OvѠdc$c )WҪK4& ˃m6{#ҎPvP Y,48[X-fCxkYVzG]ϣf5,X4 =ǓAfc3O%F(bd#2| /^Kv JNJvӲe)^L\XȝX,4h(Bke~,.s"ǺTfOر{^(5@^6(D1 Dju-αIxTX\hdsdYN'ckIlZBM.Ƙ贴*sC&@1BP29" Cj}Um; :(~|r\pE T Le2ZVL{~9$f @^\8G h/6)mLE^$TOz ɅT Oل;s`zY~/P@ᔶʡ<NFW{CR-ApLRp6; Őϕ}*vh{jΗW-~2٢/*gC@s.g?V"qZj4(.Ȕ*d%L+{|3H5x* 19yPh%iW-C E-RmRERQ՚i GnmMAZ7{wH=+]oDžQmm0m$yMap=e}PP 7D/*ĠddUbNySDK/+P8z,4*Oܓ^ߏC,L.:fCfJU9sP&Be%Eގ`A⭟fpҖ- 4c-m>0> )Ph:s7>f\_60RBfTJ{."2ꝑ &({HIκ>&lj_O*ʗw࠯u6+\Xt&ڤu0Vv~IfFn52o.Ҏ%.\Sdz[-rOw|-mY%ftimgYw^jy6hF_ݴf,M/Tm4]%ji>Vg>n՝j!1}[`ݶ$Eδ`#[|}"4Uo#[by&ˣπkiCZkL"&W8%JU 2Yp Qy zi7k{sj7dvs0@o藧/~/,$M˜ѠR(3ub8g&tj|Lq KD ;7Ōu;z Jh9\s s}lMF 6kZvzHכWqW(0uXZ"⿦暬YqL,eSjJK4? *uăn( RB)`2xbWw&N+Z_lJEmwؠls c59XA:q?m :c"ES`fZlSނNsiup$"9W%FYupWt Ehj]^ޝ^%`[z fۤDL]x$I 0gF QiPJ.r^PSse:^<E% ):1ƨQu{^}]GԵ!f'1EbqqIO.٦p'$kM.AǁC[cCWZ_3KVɃ2^MR@;~~ػ޶$W΍6 0`cwe󡟱6H`&)yZTbs.~/Ox/5 _5=V?pt뫞NCgW/xPrTGa>(JPXȬ7 yF]e-%_˖ {a\#sxݚmu.%U\f#.}W^S<~=d_, *ʛ=efU}_ּZB䉇Lv_1hrVhc[9ui^WiRb(BlΪjme럼>t??CR+z}[pY ERzSBiU[Hz&]JT@"ɹ(Yd 啔@/6k7T)+/L4j.Mq`e*'e0-%ݾr-B:i;w@gEKچ$Ə@*G@Tb6֑lzDk+y%UTBʦV7 10Z)[V$Ԛ >c DvQ< Gp kfR|i7Q$ʓ`ʐ*+σhHt#7U\K E80adp?6'dHHՌLK-]L4Uq6Z䅺wF8O-3K)a M H#ٚڽs#FHW Ev٘vt%l֔LsMYDr^Zo+j!6(|ݳ!ܒR{#eDm\!e׺H M!-52j6Z2*Hgܞ~=G9QXB)k-"0+z T#TP 4%UCP.Yvvhӗ2 \TO6l89 Y >2Ivn9q7 @ݵ2 RӐ6slZ ENdmd+^.S,#_4n%D9 ֢Б-.` NSՑ@MўAB ڜZ/Y55'$*p?U2ZV *x: F{FPkª3-j8#f5RjP8B3X17wFwzfUU G VUBQ$5Lk҄ | :41;E&wP-(Z,@1V <&@*J's6@ 2negk%ϟݡk@m5ňX,լU%Ip)M5ULe9"*42X[ Pڔk$=kPj2tZDŠu*5XC0BUfHX[(N!OeM 4 DШ*5k h=,x,X:jIIQH.)+zTB- E҄6[>Yw ˇGk}tX\AP{" lDGJ #d XQ kKJ,^Vp0MYP R A, {C*u,\ D+~Px(K(I=RD,&ƈwOru*}1l3_# Ԥe@pyjSBGrKױ|ulO^E!@3Hdhe+HJ,rM*f [vzCBJ/ hQO815hED|EHT2ı]XÔC+,%VoEt,OKp+0 l kd !hhYB3퐜;;7 ܀i*mNZj UC` wK l`mT \ųh,R A|nb<d7hUf S16ZU&%@{P|<9 |Q5ƉRC0!HQj[ ĠAyz1`Vޡ;iT|Vs?Z]wªkYc:UQpf;Y2yzC JA.-KB"I$DZieKã͗.R2xH݁I!7v-s5h\Կ @mu%,փ7=qMB6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a㚧{ 6`ב?A8 p@3![gAH pAۘ_cύ>ri{䉔vhMX59aT+KÿiLZ-Wt ,ݿAD>ӳ"<\?a6b,Rm}q)`ӂ wIl_Dɲ}}}}}}}}}}}}}}}}}}}}}}}}<} Xd_ƾMx el_g/ax|,X:,auXbX:,auXbX:,auXbX:,auXbX:,auXbX:,auX2wԴ xCúO@%uX WRxZֽ~ֽD?tX:,:(a#{D.Ϗ~-ْO@4 !d'{ÍL0e_rc<8ȿ6%[ߌN|nӣHk)6 )5+'[p\#sm\\P~N7h硙OQ;N.H;R|rJk<:xſ>=KdGUΞAI[,:H[]Vi4Y4"#a?_άvf3YjgV;ڙάvf3YjgV;ڙάvf3YjgV;ڙάvf3YjgV;ڙάvf3YjgV;ڙάv!l0)ls-,|ܷoh7 k9çOslzX.yUo)w֊no}dKH(ͥ_QG?aɄOYcEǰ#*=Uφ;~Ň׋w=tx߹|?6o_}'*7˸O~:wJ2đkoㅚqiZN{K7ov^ŕeMY6TKMۇxqum{G-.ߍA^B7Njn~>oh/{ޡ 5vHE:ZQV[bXt20|u}Vm\팚t撗71V>sބOFk]⡕>ya _G k):rxO#`.O NLf"ai"5UM!WWTo'Gg \!Gs`w7gu@ݎs%3A- "v0ȲF\|Q_'KW }kGXqپSI&wnmH+~fK, 4X6J$ّK(ɺ%9G4'@`]SEWbUb>3\=WqGM$8h#g` c۔ ңʼn8#4h$<:1RNݸ/lof`04l5ˀ=juѢ@"mQѷ'hEUN*[ :8m?j% ͬn0,VǠU`{hRv\8i?j?qqZEʂB=aPbk!Lٙyeۨb.qh};i1Qӥps^qQTE$z[ˮԃ: u6›*~}ajq<qm?}G~*L% fO)4O67F[%wT9ЎA5OBo>/CԥIV/~>ݶ:?T}5=kPgM׳ˋmMJ'QnwukX>ޒRˢ~yZ 8Zb,`==re[$hF:mzojDs@6J wyyk-?`z9  ?~VK]N}t?N- & E9-zƵG((^^$kË#a֞GL)щY y 2Z>*+B+W] [jrz 4;_+eH-ZV.mei~&8Of/=hT"&&4No8/ä@ tj_lWۮEA2u*8u*9(}"ʑr:c^ާj{~b>K=@u-:<G7_bSI:,H̋Xp7 o Q۽`o5zߢZIgt_MXl=CsN+ۤ4Wk-6ΎQf`ouNǏAEFTAET ^[p|-x.+dU d%g?RlQz&ke 5?aXS[ct;M<ۥ1Zr賾x |㔟?>*mޯA)k>bܭGq"'V|k:u"7"9eV--x6,pg5TNeZ P\Xv5z>Hf-hlq0*~'EpZѴO n]V3h0/a<<瞠D#䊃b@|Y;aEɅn!(m21X.c;xq$}G6T;N+DP몯LZ o&R@ B' ݪt9Ig4dg4T-F.p:Xr<'=]{X,Kxz0q{tuCCԓt#T%$Le_d?ñV{j/稃/I/R O h/&)LHE'RTsRyirZd}H,S־?7R.+Za=VGӠUQcBw,oyd 9FxMGoWːp-7:پH̥<ѫ3}jQsQX'%ҵIv% 0䚩mQJ^(u{z,iGN tw^ #ˉMtdǰoI3ĠidR]0zб0D`Z#, }%%HЯ3OjY Q X>Lkyӷ\^b.c4޺JhI襉NrAf0:\\I⁁,CtBns]L݄'pmT촷>rZW^T,sek_$U*^1Z@l"KYG.9%u^+E3IF}>O)Wm)A*HA&b#qj](6DrM̧8k#k{ǃ\5֒kF1[B( QڤDŽw}? Z Q(Ӣ 1lAT!/O)!d[Zq" S*hiMH0iW)R)rׂ.YiTۍ z_nߛy47L:|2ZFA{-Xxrl1ؒ:sp`D/ co/$0TԂ Ec,]!R*ŢP =rn1LTdRzvyYD +NNZ2bf_2c]0=seQkYY+-9]Tr5 A EZ/`&9`)LQjoHJ ڱ㝭ǫ ]U sS`WSةmk1缱q"0vKnK ݫ/0[h>w>c`Jr'ǂREZ(Kۇ7m`c5Vn$;%s@zrXᬋƢ-[=w%Ч3=VQ׸o6oWJjee6fE(Y @X@+RL7_f/1BƒJ֜F7JthkIL]dA;bwlxrm~'K ɀ^l5:-vJ4y4%.G^IK!eD^ƏZ"GTlQ9u ]KT%H@AfE`hT+^7ҳ;moDA+G_ 3~q wϫQ~\gI!ui[^܄6^q~Dr :$Mwv,v pz@~FgqzM.L:脆Y)G7MDhkL P!}XI,-+ ]Ͽ= [!"Mc5@_l-->7xi΁[󫅥|Ç6(ma2.'ֻr?LKU!uEJ2_&t=yzǃXCpd=LVZcdX'WKksOlU'[;[|7MAa#l6Lsn58̫wwцJՅ=k6ssMgR̤'! 5NiCΏJufc48iv5Dc,ߎ(uf1ih[PUyT.'2 J0,̀ pg\rݯWu+Vc  #l huP\Q&7Bl' uX ( %]D9CHrfl>9]K=+gU ouZ]EQ_ͰC?,[ңa d:$QsV)eFvVU1k*ɒ*)*Ԃ)oJ!Oxn.H0g C/]sYq%ǔil‰Gb Q֊kB`<\@ %yr!6\7otr>qurfR3sܞhx ے2/3of6lUoډӇ79kQ4qd(,X蠤8[+8  NB\ή?^g{m?&l-ΑzCI ҿV 19{Q^Z8=*rÍɈOȋsG}ɱɿ"l)э(/[ >bߖ߻zBSVhvA[a 2cwU mӣaG_cp&bg )!Ti5@O"&^rZTv )JY|eOfD` (2Bn-Q6 vͼ _",,afo5+4#h5B yEʰei=ɾ dzɧfAY.p>%_1%i+OO֪^"mHɳF"<#NQQ*+ Q, GfN#(U!ւ3g%6h ź)AR@g\bFv332lz_Ooǀqktp\9dhd=yhFUjTό|Pћ&I%msa12ȤTvNxS^`?e?OFgIї%bcZŸS,_?["WIޖf &՜+~ͭW1́MHӻO~{{}-T-rz<'p0Ij:<,p Պ/z-dYO22vd7'"7:>elN:/g4'!ꔞO >϶F N.~ 2㯗#j̀Z78|$K-lZ:q߫Q O= tI!!?0N2+/ek*˶6#JV:'F&0c-fcHGY>TY/߁?Z {V#|PWØGq-*jAU:OXmp-@D L%Tɱ rYdfNX 5*^ym/S"uxQc*17s!$fE ^+KQ-MG#tI[CH5LRU1V: DeuRkBqDDrugtM9B}\X:Lɗ%WxIeR\ʇ \ٜ48R@JLX%ؠ&SӄVK3mb=FAL D"j6Evahr}Nlu$Mi#v9[['p]au Z4=?mwl2b5ytْW<-GՃ[ r "k&`)p+0TTŒ"㚴@r}:LV/6|݃lp5nRfd4V箦 F|qCe"62gcuD2tsȡ$ X[XLhZ`h rZW!_$-H9i$LeQOC@ [v,Qo}Z9xk(CP]U@ovKN"c}ANzw1!%+Q%^8{ 8.;jgKf tIFJ&~țoFC㗙<6vu:yܐ̮u94y::S6X2Rf`N8ͬ'Ucs"l?b}:}Ǥ1Y="S]Ri7{\k'0֥EV*]-7&x .d+BDdC+gwW{}s[<7z=5yrxMVF `{l޿mh/z|oC}&ksmk{AщՕkB]C Y1#/G$]ך_B~,d z I YtthN drbHo sn{MQ_Mcr>  S`5%92uOI~dFfY 4k %tXs*v p Ad#d=*ȩ( D'-I-9{ݱ K[L,^Yx]5|{:_e7rxXբ̅)BVyO4ڬϵO>Ѫ>?Y+^V?{ؑy0v{uOg@փAuw$D$%x=ՇDqH %]C|]Uw],bQ@㒋S"&zg$P@LK#^PLQQ$d@jeLL$1}b$ej@C;1`o>h!b]&֗(]%㣷e1#)V!.8{+w,iOx6i9K3iS:iGWΨtڴ1l5ӚޡB[z?k謑t~?Ь7km~\ooH޼i|X~G)5KqZo(~.RʞĿ yeֆ .=`ƽoO_^^~oڋݷ;<˃ysBMkV:R>S[sx|A,ɓcT͍/DDi3.FS3D]+7}~_γ8NzGz/8U ƿr'$fCoYHEҩz'cxyOп.RbMo?ۂ!n XC|s7/ըn 6;1/&3sE֕KIڦnO'Xw6Uz˰I߫Vt'?SxDWN;MJKBi,Ia= >&ABb\Kz|IAfwP^k Q=nĶ}!QP-)B# P 9c bBY\ PpV߻e˩F@aS@~Ǣ񔣼7_jLqX/\ WϤ$")I:fPUƯx!cPr L'*bj |<;?}_;TN_;*ࡳ؋o=kZ% LXRV)`.-.Q[H`KN @GU>_f63Pg;ԃOץZoB"Y,AjF%ճdrvl UZkIKE;QPk <@X={v6&S{6 NP3e"9XhJz! fjo3c - zDUM}Z#l%rf CK>)1hc^c,IMW`YŒш(b./ >HN/BM(WSLp o + -HJʥD@`,6(#$^Md4/)ok}[4ޟ?wkm]Fn]k^Լoy/6/N)Nm/W7[[_vu8a:|V]xg]F uO m-?^֬wܷz[An۷[ۆQmi҈;5S]lMV!]UWi+ٮ+7J.7M= B14B^F>ּ7_*V51I 4Y3,7+L4s؟ pO>h֥|~362oVIu,f~blcMulnvʆE\ޡwk|%㷭b1Ѽ8 Bݓ2-z}޿+|.kOJչͿBjBk.ufg,rږ)=0TuΤMN'TjBnn{ӖF%7NJq?"h|` He#SdE`wr3}aBI P)AqY"NaS䵵*蒨Sa]rؾa^caYZ`y{ yWg39z:ճ!ώ}Bb]{YF6CDӆPI*FW&ًXb\M5L%{P(d)JMhPE#EC6;M:ӎ9bY26ypF8{:Td}z]lW6xaP2==g}e?\! ޵j9WJ;,%8` ӆiCv뮝U ♸S޹5Fr[HMn?@;FIC+ DP*5d6h+9׸[{|hR"B@lx*[,励>YU- 3LNSF3d 'َ?-Æ9v7,wUuyℷpm (TUو_OG-i)s4q* YNf_?]=^[z3x^8Yn70z:#YZZ\uus]yu[G.^ҢMwetnca?Zn^v{5Kej3So g{sRh{?ۣYoj> u`(٪ 5>[H.&ٳ)plwc;N'X–6/]Ilov˼RR` u\l(@X i-uQX%FJTA'3b "{Id^.yTJ0KMb}Qh]r;yoM^/ONDU p";$EoR^a5$k,R.4 7|bBxB Y‹^)(w tDRcEf+N1H]sA%|Qūxrdl @-Ctr*毝ΪRpvDR?^ᨄh[RuёPmЖEBn >O <3bԛHJ"_#)U$])]Hjv fn!bq0Fa^ɍj0}&7jo'-DJ6 %2VGQJҦi`alDp,M~`6x |B'ӗ=z}y{ܡr!IQ$G9to?jY"y#AYk\,PIz$ѴM//ew7VN/t$Nay<\<ՁF-N /鄖ɂߞa9_d >C0o0Q=7H tk6F[Ti數 6Q7}p'a:Y,RV))I@E3JbHdUbP{ 6Kb"uxG4g`#LG +#Rf4xm(ښʶ RRP3h@_)MބC@2PkEVPrF YtI%5W,U)=ɠETMrYvx'>4^`uM9LoyPͥ ŵj4@E^ۘch~6֊0dػ6)~Tuu-9 ~JZK,ʲwTSIzXd;pfHN^NLRJdukR7]KHnAg4L@\8fHߏ71bo<}c!ojpV8 &د1$@?TI+lgFlKf~O)R*a=+f^XW԰!  Ivz{iyg4d+]vdB,bAb. GxdZB?\OCP'vqdm rvZM %%&Kf0O}pearnEAGU9ff3x12SwMBMAo54+jc e/Zɣ)䜮>[@(rէ94o+h?ޚ^癡@}ݝ%,DƋ ~]r$&[PsV* 59m 5捄獄獄獄썄w23x#T ւ ˣmuvq;&I6 dS`T/jͥK=2-AT.f-߆g>Ɖ^Jv$~|[ fcޏ{Kzcm994j޸=㭏 :벏 lu`+9&κ=N Z CXgؾ3>3ֿ۪y||.xL\W"NkE'0*/ǃR?gMbi:GV̈́ W=s^3Ugw4Z>^447D+%gOg+LdJÖ,PHOFGӎS .j/_b<ym8EJ(I&VG-Zoŷs38q5d9ob-L$u61Q_ϥRUm5S#,1-  7WBмs3$_j9fǶL*6OCX&{vWy \75m29yє6c8唶UEJZ9#ؤZ]bo@BSAo-L=%Z*:-2U;C~M~ܫ̌QccN,)j:90L8Otx2L2^k=cڸӥĦh1 `Xr Qʆ{<>(6iD(ɓjlkdT]Id$@CO@\-ab#I7~"IS͘L:^~Zg̼cgc*>ThB*\Ro##I5t-x%]3$pJ5#'0 ΞbyZ3V`U[э㥺Fxn4DuŤYL$ Hѓr$\c8;Ktvڝ\$ *D{ r},[d ֍nEq֍OU8DC+!ǷmL&%/Ґɬq {Fa.S LA"M=$<'mD"EBK;UPG~֐]h7SIK5O&TW%ǖ@kɗbbH߽, KxmUz dGX ^$ '9~kOuz-`AקCrSh9E٧u k-i,3KbA ɻPfΡE`vAseVM &{ဘ!u }PyǔfVumsE4IDVxZu bEcqX}}ڼK2،κh )l6Z,ژh3TlqZ.K@אcW}]7ꉽKf6XamUHyb1 eV'-N)╏5*VEXZ1'i?@صHsNϐ߫(gG0"vB1Bu,2ҼeTؚ Deн*@FP ]7_FC1ҫ} !jkƬsKrb51{Ȉe-zN?Y+K m^+܋{itQѫ{{0CI?q@a~is(!yɻs7'L!agf<{dix|z%  ^QOm2z{}UOjdyn.\˓p0Dh1o7+bkZimo WDPTbW3uDFH@mwLB#@Co7uy>_Ұk6 %%Lhz;:x`g+H'a^}o|W;fwq}JvD}|9dzrc `;PN|R-#o_O߾ 8荨ʓ|6PUȻc{+eoL^]/0\ǹ7*?V.C*zo_NR2{WW7Խս+(xX^{{W&j{r~[Ѿ+S?N?uXp.țW ֔ݣf@zSwV'(/_9r۩e)d Fkw}++G`!aQ^ߜJD.E/cFBV./h5O_:Pu1뤷g>b_7[|pHiVժT9 ڑy=85w`/BġWh*zs 64΢1C6z\Y֭уJJ^\ŧ!@+Qw7s~ p$`e >?CNj^$WB_e=><&hzkB ZSC,!g8bg_$m/VA)h"D1}(!ve/`狕pc_j&7p,5Ɉd@jJłCƹbn(󢚘5 Bid1I.2Tz6y $W"}C=!x疗\XQ$ek[ /孏+⯯!&z7_W6 mP^U߻Mjf|lbxz1 -ل_[݌<ƎpsSLRތY}H; yƲނh"l7}d=L)Q$F`mn{pFW{J^:1nS4QhJ/!Y+а&HCX4)FF<֜fNX$\/0VhE2ϱh=Ei@4Z2-"q)ThoڲtMSL'"1d2C+t(ER+J݊ ŅqFl(.NKDi,P4ˎ7owlT&<ۊ`U]ubI7֘\mIH14 '"ꇈgȭxl|7筈ۘI(1|sE }BJ ļ<4UO}|-* L#vB%NB_.C|fcgd(ܗeFH[n%}C>1b>rPG,$ZMTKU Ybf}Q5M&r`($")nfWrDNKVɴ:Ӻ[;%:>\R:[T#DJ1B;ޯ\ml1k^g0DjcPUũz{N;ji^lLA?9OOr8xbpMS9~&яcMRvcFk# \]} V+!-ۻ,n\.Wbjn,Y}U}^q9n| j%[S$ \5R0.,}982N@Z'˥4F7N8ň,J#Vv} Ywnyf (@:Է?R @ 0\Lyw2*#ρ"嬤mћ*k#\R9&PVˍg>ĝ̖QA?\7ResosǴYwJK R|z?/2+EҕJ|tx*LJZ 'ꋈ {;~u^JEN[ Ze)gH#81xd6iJhw#e}+-,)3xcXD8OPB;>i ̆. ڂרHҘ̀"D(Dy1H9x"ls WHXƵ8NxJ4-X)5kΪ,V_SF9֋fB*甲*?N;jXϧ 7oc2f֏՜;>?aFqCA24&argU66VŚګ j6S.'pmP#k@ ̫eI2\u^zJ멏y^^[[m; ZJQ)goէG IX'V <s"TsܣiZ܅;\=:YTiŨD5Q}2'oq7k8zBF,߭~IsMuQn_KaE8A 1q^N|I]/S~Z6g <_-v .tN7Z;=гYsi /oLcץyGF8e›s?]Qg|@!\޼6YeP6۫OEsf9`?r̪@\o.onnn|,Lr9eS #_ZW#h̼_ׯ˹d Z.Yn);&3|s4ۿN&Ʈ-[MKA2MGq9(0Jy),p%=!hIdWܱl>c}FJ&3eRҚ "-sDml4!N&5M '?גXb#wE_1[q1~Dψ:ldzo) W1"#YECVcLM"/"3ay %\ǃIO*+nmQG>]m!(@! i HS&%PRT*ʠ%p >: t\g BfcMZbK|@650|#i83 m.yk Ins;\I Hwaa}<БFI+!E(2iDq E"Ke{t٣Cnb˻+%OlqL⑐uʘOw#;K'ŢFaxH K1$$=h*>7`A6J;eBp!z^"RJE[E%m:uBI,G͜皇(ER—+dRutm&t́rv V)BqN{|_ěDohmqm8Np||"9p W$V.G+2^;뺮x2]=|uUQoongҤGӊcxu~mU㳯eJO=En: v~ՐY8|d=ÈKHbbVhb+k7ᆳҝXBG2nXxQ~րūjz3|u%˵!~*& "h]M2$wpyԲvt>4i2C48ɛ1_fW>z1\0RK.~}s]~rxUG5*q7zC?xJv_[2Z Z24/w,Ƴ;^hoKjHlO* ਄ `0J*M8UYޣtؑ˒wm=nqؼU[d!@A˭u=E{G$ܧ[=/ZsHU_")!&ټ4mӶ' 7ì>$mԿ;o_v3/ߩ-:EAl bg<'P"0+#X !yC&q+<\у{sA%/Zۺ?'K2TmZ)۾2ڤɋ1SHU!=́Ȕt:[CS"sZR4*)gr:@k!9[㨹,\ lXVQSU{_ΒFaP ~WgdYv_R4zq+SɊXn#VgGnNٻ?~Lh7}6|'C~tp@B]:'Klca%(a ?{wt9ȌDZ1> ab..1+[ ^o@&1}b EyM:g;.פ i:mZJV+| 1m.D!U'/uqp:\2LF2ﲐ@c"SA# JE\{,/RYrq[4b]&! v_/N @I7AH p).YN6t($k`5Y O\^o‡@prFAqf1gu51**iB@ s sǎ;>vj$ڻ!`-@Yw'TRgo4d}(+UGFGe)`F" "# |8\^n,8 lKP3ւ9)$lfBJtECV99ed4hdg ɕ kze<`=8s<1iuz7K"!qtǢk/ʋRU:JPjտ1R[ ATԟ[ ]hT zS!;RѦ&s,dD 1aΑH&$1<PX`D2:}²)<ę֠Lqik|XgAAUM4Z3fsu)[Y2DcAv>TL@A h6<-!t hEUgt.[a*$AJ,”"뚃S#PSgtvrѳkE΁΀bg=H%jBͦ,WBtUIp>!xG0 =L,mNX18giS]Ĕ5jMDbUTUMA:oT#ҏTr9?CJ,t7䟿79_㜿Awv3{p"r5%'tx:9]N 99t:h_sգD3,#~uf8o6hñ ϽoE&=F˦"O)a 7ov'=pMWzO}5~#n+ɼxdWl^,/^ h?^3or!eB#>pSmkԝgȶ;AЋ8os[ }N \6eQt)2ցXuxs9Vep{ہMrدwz1N_ǃA}Xw\, ߏ*bAY;$ymE!N cr2sBYU@X*kH#Q_ ,Q\=7q!Sfj)+@I98+&gu1S#AX_ct{ap0lPAAsnG"5푚#Sv1Houm3v_r9/l} ]u1VNK#cʑ$<"|6%IJSS[)LV!sv -"R(B]ZN5|Zze3ݰ T~^;4J& CVW([?+0Q<lgl369f;k-v-,*$&}}Ӈ  tv8Xr> `fk}H;į[xV@H,Uebx`u\1.)&BBM6Y[@<z<6ng ^1e]5RQt w|(oWONχ8IkgSyGBꊍ)8s҄ݫ Wafj$SL!l0;5\Q:kʔiFl9*1Hh{) 1Y#90+q9$QWQ)tL10h-i>|?f)=(exߒ<|YzF9c::Ǯv }R0-gcE#&=A=(Pn0UrΎz.w9 gd TCWon1 V[uB(*WEp&m0g49M%('n\&~Jg׊sZ:M,ik`;)LrIu|ץm}Z-R4B^7a-oWBE"b. rVhzoѶOt[(u;9;Wz|{-%oTA^",3[dV$gSgm@>k_{79C+L;rU%`W*\?g+ztqC]0.cVvYI˿ܿo> 'ߡY>0ݽsy^g4oztL\Ͼ~uknޜyTI.ڗZw*vdy&|ky7j-!zwdz*kZ!|,R+F7>\v|%mkgƷ4Ok;DgWW;u\(З tmn;«ʍ^m. goćRm?t\ ׆IWHim0`R;5RyD`ڻ΁*wk,zMdze30k (&.Nqi[S*5X6z]r SɂZqQhŨRXy!#ZS4p9@X\ulR%T/n@D""A0D]N Dj"GZʬ17+FC҉WAQ9+(1ǜ*A@Wō7 YzO|GZuٚlc>'ŬEGa~خ,}龤žl [ZW'jUZ9B.f29). "Pؗ =9EGo;Cȏ0V'y֞/nvp^vst*5|#|]yObYc.>]W?77ӶSl r C-=UCkIjnBǀ1bkF1kH 47+5sP !&jτjv|!6)ROHF޼#le T*]NUًeTCj],IumcF|4yƎ$5"eȷPKlńN+XW[ VE)11!Ve3BP[%׎ ߙeψćF>#> Z08~oU |lR;sv0j9e,+>:5c+fdPteUl(8ިZOU] dkWLPJy&P Vt/xbώW>j&4 C_ixjuѫa]ZHXtO}B!jh rBg)IJX1PA{(` {1ێƴ}^;_Ee.ˎJ벃H^TbN|%,<>BGTKBklꪢDbͲ#y;C<jIqFvIky1f4笣*Y v0oz!Ў }yFh𢽄eҧN~4D L5t:V׈9[H5Fc#᱑H V2#0 *H Bvw>:k U&#B0t j3QavCȵ,󍊹~hZ{U֫(?¼76j$H~s7TpN7 k$ڍ)FIWrln4&%J(IխԪ  ZSѸK.ѼhoDI-uoSMkʚ缤.+\3ۊ+G RDFݾz)40ƀNJuvH1l\!ߏ/&k0ǦN΍o:e6m6bK3ˌJ3Fxym↚\NN^$x讅3ƱنڱOh-9 c in;E Fucɉ)TG-$\⫵.zܝs%OΦv9Ly}Ċ[S2Dp"q$j3dCH8kUkNﭤf5M +8aB Aяځ$6}7ǒ e$݅4!Oil׳B03#GcЇ6MN^եM쨎 6Й@ hյ.0ž}sto}PneB8:wX"֑i3ӴbdR+&kEzA`kr(pxAJf@Ov$T=GnsONO %Tih$(=5 -w͈rG6|9Okh|͘&Ռ8W3=o\[)KJ=c`ް;LIur*UF!]r1';U3(Q)D>yCXHIqZANh(A=b<W06T4xndݠd =w4bYM$$AI1& 咚H@rID\Ol;6-_M'WɪIJJ#{ 1ﶭwEԺ٥(.ia'/!WC#mLWif5D PGbl` -Lz'-.)4"zbP+D="zE۷R;0E-89Z9dj]{ ?z}TG`F(Rl ~5D#)jףpŗV@?v\\k{nA_-6"x\ rG)Zro/y9vSiX2Jض%%smTzU8XW0NɎƈM8uO?J4xUۿw/O*5O=^W|-%&7h]^7Tg9/tfzVCx{C6TBcwUv| mgCz#1>bBoIj!퐋6aF=}'b=? ]RrݙQDeVdYr߷ΥIQy=֠J- NƬ)٥d;  =Q@Į 9ӛ}-ljchrVJͅ;i#%cᣟPcq\[-T vFLcAIGAsx>OηBr`݉QqRM ':thhu:/Von*܌"Ia|=of,y0~^j/ӯ̇˫}yhNިa;/71zY/%adiWfv1=sdfyY#-k}"5sfݗrf*~ Z-7> ~O!bICɭ"`jkd3RTIsK|#^ F^.Tm,pNC5A-Sl},nŏHH&WX3ǔkπQ1wN>sMwg`[eA]z|I* V&ye ,z7g2faJ1 BA+´ e*G$ɋb)ViEzEh=XF>Ұ{Y("eF2CFO:Y/fZ`lblXLÞ  חB}ً[#R;6i5867 lœmuq&,edr!ą;޺F^ {wxZdK6M" AoVV_ HSdTS5߰\^h?18{A:8Ζxi?/nK=a), EHb'1E\n, :p`ܨDuᔷ>.w/CȲЕ`D0Fjzc_9'45yebF5賥ji P~PU9l0.\.pIJPDJl[ϒC#i)y.jb'p!C Y7W@-б%`k]T3IPb4ȧ`CfW eM&6O"6q`L K`nYNbCs0(O%5JCjYhdEQ>5-Fo\z)7/כF:?AYq x#?XeIV΄F$vKL/Pwz%Fݳ>r< KDDm^cs@5&nآ,ɣ%n^{O{S)T7)7UU?/{"N$`%M@JS`$+C (‰ .}QINɄD~ tBOetd-7$hC 1RZb d= K ('j{9S>h-H6MHz'6ߟLiEkH1"ɀDB@mIݛbGmr6E/mvENl-xj(4[S?J˜ KKryY"jo[?pҝ\NZyKLcC/ (HX_LRid// .@|ҟ<~' VfQL/N.IF5xÞ)AXksf$ӈ5bu;>,yvak^*Eg 5PŶAi?"vL}/5_|/GNl-Mac{(>[|ToϿ~?+`AHԩo3X6`4޲Cd+e'a_DӯG';DR,NyfϪydzYdK 5VM»NitA\ыwUPj5 V An&E6`BA7rkm4"(X,{OskF1J*.Z 'ZX㍬qm24{»,h@UIiw7nR)g6ة5cqh@+i)\=jwcvE [j^|)dhY2rw(ƪٷ^+s@ixVy@P桅DysZXeKi3¿]NHΐfƁ$`~S|M<|Mp"%SD{DВ,1Wɝq)5)d-&ucɉ)TG-ˍY|5Ǭ\ɓ5]3Dk9RgF%ք `]vĨL$WI@]*Û"0{wtO('GrtRͮY}#]Oʍ_<8;l|0,z?}gUMs74+% kЎf=w r:tеǖ%Q_ f \˾b0()I8i$W鍓:ӗΡDHQdrXRWד5zPTN+cI +6iN<|OokU}>nq0kn+u->ɒ4ԟĿhe{O1\d9Q!*)'z%O?s6*|WjuuEs2'#͞قS/f.6k-Y%nǼ%iV{Wb{HxDxrcsyQ2ΗyU&bQKu62D18uWSk&B*"SDMyl}$Q'~cqb@Y2W'uJ(u=KD#1-lf`o_v̙qgre; qIvQy\Wb"8EaWy3:)m}R-: &UY_[^RYF!+I -z;hmg@~7^逴yO&0Yu tBFx(&(&_! &$.9AkĂ,Z.9oZY\dRJ/B")Lv&hLZ<7 5=YHپHxF{ k%"ux!n.$>r R51ۂ`-l"#+ ΙfgHG {f ,{VJ3 @ȰS5*62I"dÑ3 c/zhzǗW\4F*s *H}Yy*e|+n&iʢKVoa61) ^6=lx ^2:m:HvѕRIe.H@YN4lN2*u]t+9" _lz1!HX[>*T$ox 8ox}k~2~i-눧ӺxU-RכJI&lD"E"aVH)@'+;dZd0W!IYSlG'n8al:?XR Z2) 29SclN^uPO:u%2$32Ӂuez~&o8OI=217jmu͚$F8=<&"E'B'l(AXZ>aqH䎲Z"F]l'm66vv/@@4Oyvs8VÓKLu"Qbau@ R$86y":[=_cM5a_fgnvqY\nKxq}*ۈ7`(36ފ QeQSTVVlxKΆVr-ICKXSl2|7|br8UjA~KZY%J#A9քzqד9y;߮yc lc5#!%oӲUv'{0QKlJ6p8N%XXվ1%(TG 3ľw|'<(w=?s#):)R{BsS ?pS0fZ y/B% lPD_a\;zA$%uP`-1@pX$XR;}P@KTINc2 &Qַt,ad$Lm D!Wg)7 j,jg4e"HYCYHyp7ёgi 7WG(ncJCt}b,c,pdT>V#,)c1%H_<5j`Ci`6˻i+oTqLEWNBka &c2H3Dd xqLcd:;/LԑfI$*?{nd@b@vd:ocr w3q, ѺH @+R"f2YӺ:VYURR&mR"0{r]Xe zQS"!l1*Q؟rLSxU['f$"4_/NSb#v5\]gCd-@2֢}J9#|1.RikO !|?e쥮z5R+HF]v)!Ft6hRѪT9CڵrTrac*R&vZSC>:(G):2Ҙ ܵ`yCmtҏ4*0z_m7# 0_m2Ju/CjFC=:}IɊD WT/* <"k" {HȥB)clR&[ŠQ RY犥j$H=ճ,!f5 3#Wy}˽XjcFV24LRX0Jr~ˋA+I8䇓I29>^Zy.@maCO殧wZ<c;}VXFqWv%W ]^̿e^%D[X>;'+>>['V,ofm5 ;g?JgWV҄TTOpL?]wKai~έ}74D#~Y~˻#:Y҂OOnn'_>o8YVI{g']wGs{D aO?(K:O Hϲ>cm+p~)s("/.H/E8Tp6ՙOYUkik5rYnj9\1Z:I8+OuMy!ӡNյG]ӴT1"TYXbԂ7Zftrꐪ:V܄ٹM;y bNuI`J[* EnM#] H5dnа-ȶ1HN|}uu7D_] nyObw[?i##DD ӮwwuoSlNd^seQhB;*$Ѯy;q]kW/ }Kpl]{k톂HV ] ktA Cm*$rT4ciֻPǸbU1*am,XC.uY-TD =e0"[F=2fZ;>m;am#"R1RifCF;;zlVY]zFv]70zvѱYr7`rW/x%-lj9m,dTPVLo:a}&NS0!+6^HcrT1>% @k8]nבJm(y+ 1I>e:퓼d7h f]9"חWb%V_W]kØ:^6CpAGAJesȪcc>UwTNTsu'{+kuy`b>`ٻ6$U=lK~ Xf@K`p@VAOKTHJwch9jLuUW9&ҕ`U"Io NeI4jH$$%'8-J|([ WL&լnJz%}+zFDXUuenEG\&1:JPJf zDmbg{ݺ@}4 I gcnMF|!z u[֍ijCV8ÑJk4PLK*~BϽs~O"׶O q,SHëY+eOEUK qR1isjʽLͩ6HUA(rMREpJ-ZSWRQh}b$D-d9Um%J*>C(NZsv~TᡌkڇS;w ƭXZkǃ(!rFLֱvJz pp5Å""$be޸IUYU؞UAF&^Pj'>HJFlI'Q)p=wd^mJlt̄&&# g.5Uzueۻ E.x.#)=@)krWUݗw.vg1ifd FG#\}ޗ1[-8x%ldHRVvǢU bFmD@ :Sv>!_%P @LҾ,ԹD*rp1RTeJѻ_d\;dD ,cs_y0ا´Umv8C8 ӈse%q[ܮ=tBAZ5 u8#0se2Jʃ?ǵH-Jfuo7ޞhka%ҢJQQdSk2&i ]]%:wY Kۈ}1,lĀK򊺢8kea=/ZJId~rR G\9trk~m]qn$:4]J Jr>glMcKD: 5TDEZYMV|eT# ͗u<4qom=t \,fdh3IƱ{wi5'7ދ}H2k&l-KKmX8%JA]<wo2o`2+J/NyqrO=(ތF)^jv9f⦗=Ņ_# 4|#w_}jg\u'lzK7_O+jtLٔ-*:Vl:wf0}w묈a"P#(ؙ,)j3ؙ4jTLMY}}]Vs^V`ŘjHǑOy*Bߗ5zYRI¯ygALCt)4#w-^i ΤXGJ;p̬ y8n4 XWdybݵy_cЌбd=EgU$O,B<{gjUq謙,gKoZjRJU&wC{L휻g~:ACDndJ{hs} hUUf=SqUkM !G$9HujJN_;vŹeJL:M z3 ^rl ~v[8\kw1j%' }FWZ%U&_jukkץj)v`5?~OW´>*]$kKD/=h--S$$j%K 9Gs NmsZ k`PQ%=Ҍ5 }y$pZ.4{ɽgYp)z0u3s*ED:bKݔ.G&f}hN;UWZ1;kn fL<#N$Gr HwJ#|lAE_oҶ+gCe%} $ڠJfI8]wV5B }YT4}G&_WBgl8(c&7_*0UKq=$<Y+i1[آP[`)G'l([<&ѭ7|֕쫀gSIڲnHs$2I8pQeyqI{s YbI_u]_M1R[Zete׮a<W4;0IJu%-Ux]t.?KH"oy@[#*1[jlՂT}JȿH42%8xd#ʃr 툟n.ʙ-_\$\0U洟U~vM#@YjCf}gߠXj٠3BB36YJReUn;۾Y1ƻL/rF/U&UCϨZ$(.(C֪*Xɽwq`:HGG0*U;*vfFp"Jp@EŌ$ ~̪|{7x/m2k3!b$[6Eb%Rlx71kμdr}@h%x1isDx 򝖽ڸ\!zJQEG~km]Uբ_"Yu@8"H$/bnT:G:5OߩNz Le% _%֢ng8S VUAY-l6)ƉUXQiMua~J*?/tJ7҄pnoŅpAX#Ey"7n.qq򯋋4sz|>o^cWOmfu;yB*B3UO7iƱwE0Yt_n7tëR|憏le^omx#AC7Ѵ9g6XRA0?[Gi1WYrp;jTBJ~Z\lǥNGz۞mdm%G3ouqj {z>^T$ ]lnRRQq%Ahb4945Ad@/ɔQ#-=|N>OXxW1'͚ãp#]5Ʒ<q(|L +hu"8 >6ronG#4_d7;e40Zg1`k\ܚ,o%}o0t/Ms~s pv)xo_'w?|Ιt⋓:ƶĞmyÆO|7B !N&).AΧ Ait8ʽ[Yt7nggSٹ QvgFFtnwir? >Jj .QjǛ+f-ߧ4ܡC JGA>W B|erw*g+v-δ+SGSsIq׆sovC1xNӜt^q&sK^B4y`YMV,хJ <ls @Ɓ!h)gdr3(leqTU* χ#}={nrv0y6|unt <p£0,E)} А3EFmZXBk"r0X~jݭӸsH|Ơi'`&^X+%.8s 湿7Cv6"EncvcV6l\MB \Lz2B'-l=O#X}0JT>2=NW^Ncu.H :sFe@e$N$Ѡ+e{v؎ yU|}A˭<ܒ?ΓGYY Z&Hmŕ U۲lod4Аdz\$YBSɒ2QGc\4_z^v %yLj6GZշAfr+L5\TY*U[E#0ٚLު˹?7Ig #xGħd(!v1݂R౫P3k b6&A$[O$I4a7%h=AL@B[R 0jmFOx,$.aPʳ]Ih(K4z-^(ujIZv95 Uda#&dK»'[7R]&WvkQw?̹_W <&P`jOJB4z(#),rb |ئ8Юi/_ܗzzZcݹr۸>wo'~/1t唿w_pD-':KYתRJ#m664n^X!lTN#+PiV!Q2.8}{Hzg=˶{ݣv~X{=φ{BH^E)37ҿX+bXyêCtx{ػuC#K24,@!L(N,0!9Y|Nܼ)q Fkr88ì\p0;lTH!]#Ee$~;ЅflQKQртϒl\fr+ ǣ(Z!p&jJ=+h=HS-vŀiHiCOAl/0e][&|?T/Gˏc.; 8SS^A<ΝGU*q02o͗:6vyO cY~B9:3p'?.H4tG4wn c;ʗsQ]@ϗKnxKX=R%OhTѢY5+(#P5bV5+{1+G_}hȺPNgeޛJאᲬ_]yf'wC{&- k#=K!d'?ο;pN?~O6چN,sLlɕgFS 7/#~u5W?ky(%p/1С9+! #,c|J17(kqTCB0G&hHA%P\%6Cy..C^v9i佚 \ Vj)@s[LjLe֑jk5ùѵele홊TrKt,HF-;0'ޭYkBE8J"q(b[v9hw>jtΒ:ur4GU&IM̏&;*Rb)fz$ uBOc }jKR8v$~r :}&d$5@i,ud1^$ZiVG4Z/bµFAsyz4j6#09(AjDћ5?3?I$@DO3r{B,BS^f,jF^fbm 쥔QXR7o87 I qf`fb-~fyaP$bF@Q=Jb8b/] 5K 9{nŲ˹_3v0K?==j}kэeTźA 0 Ld*1&#\,+f.2%^B, -ۛd5dO@ #A`NWݾa%Y ˹_7ۓӐ5EˤIru3bF5߄eg^|017!v0nԡ=oj4ct׊~5^naNR"ubёy"Y5=9MOK`C5 I˜{j#K3ŋt,#-ĵ,-3[$%uLf M}IyԪ8r3Czmnv;z)&AszP"]Qk*5RjvYj9ηp̼TMBZG/$5xZl>xi7d|'};ҲގtW+dDٯb7 nս'oo﮸_C?4-qiq".qb۠d뗝ηӣ|azpl5P'/S叽ނK0z ̤wdvl#υ"칚R/G>O\Щojoa\1f=N⫑,7;3Y#3WסZF_I澕XF3 oxz?tb-MM pzzqQ)#w0yJ OiA=Od|D?; T|VOUC}D Z/sy-dI&KϜ] }Ʌw5ڱmm_[^wLg&wFulF4FG+0@0A?Ձϖ_l}b}qln[EM ,7jdnVC)~XX(,FW]o'!.$L%`& CUz;$ȍz>Z],Ƞ|s9/זqx{-|~|j/4Ǵ믦u6V{w[ ,VB9=60VO7ܽ təH3M*"IQ vNu$iYAMIzqD o7ّM.h=AG*>7RlZKEVT5UkMKG+\o6 Zpyb<"q)gqi&H\Os`OH)=ƬЇYƖ9FM3Es3oL@yO\GZ +߹`JfNpaTr)v̩-6B~|EC\E1 C&]e4L} %{}&=73J8FQs㊔Z˛q(c?A"؎O'U$Ĕ>8̚uNkzuUqZ{+{q7o-Z!pGQ#tNKynn(|Z} yuSG io}.sQ{dF4/+ZZ4ECK`DΔ`O}+{n~a)(jBI9/a&," )wܽ+ RAP̞K*2)Hkي&:_Cs"6G5F7Kf`/ b%DU=to 88` ;t^Bw=$I}%23՝;/s+ܗ9a mv@ TEȌB &-sK'`W^lqmF;F/ Fqh,5i Rn¿kzϐNCfE`:~Z!5rv#uW=.i8^HRP&iUFWNkQJ5"r)|1 P[۞JNNy!BdE7LqZ[C..橀IUJpr9Dk,* 5$0  XS9U&F@J,ϖW M yDf65&[0Z,JyB$6RAڒmKhb&݀k9{{:w8l=τA^xacJnu!:"G\UdTPUȨiInIԫΏ @˫n"%R QPP6Y̏Wi5y!RXCDmq*:"1rK4^D& B@DaY6+#?d0hk4 Hi$ D Q`QE i*7SsX98u̙˸scKq>**&aɪ6h,\)IYIԺv'M`3J[C$C&G_q?`p:4M{D!{J|)Z '$@)$ee cd+JUUbdU+R,젳3G[{4Mw5}!/k# \\[]O&轩6BM{w d2)QW ibi[+yXmאf )fN +ЌVjI!/dbM T|$[uR: ݼiRwY&8h>^aL'+k(j.iY+[+[;[-[`TT5'Wd2iemfZEa3YenaԶfQ@r8h(m"__(l [s0C)dT&$\0( v;{d )I$OWVtr/0$vZjr#"L9s_2(L"-BF#G75y)/87 2A!ZlC*9'?8\dA*s [3P=J@gP!T76׋=$5@6=*yg+*daХ`|ф*mf TԜ@$tMz=V#A}ӄU*zRS >燃Ňs~w FVfݎYibwsI\ןW`mT]7z|z~7ˋlk_~`b 5m3әa?|sH@ Z_W߾.AE1 o蟗(_0vVIdٔhJU9E#HM>u4hޔ'Ý;X(dM4vc}ג>'667v҇Y7/fؒx?\u3nҿ8SD`L( dUǔrQV9 3j0+OeG-ݪ~4˫.Ob#,[]CA-upZL*m+wz;O~47J!Xư>Cv,,NO[V@BS*5[ 8f,V$s. &MXi`&8K1f.~X{KqEFL;m\v͵6Qi^[40N PD8R7L4Z"ug]c%Z@?>w2z5r(?]lDS3*X 0$Pli%}N"[e8K5-.RϯeWu֥gVM.cd=;dOL5̊- `MѠg1I{Fg@{9_}J{cdyUӲrB\e:[&m3g<^9Rf&Y>3<>DAȧNjU&(E~:Ll cJDX1.oƦqZUfm|ǀM|JίRg'{ny$,ƞ:uf"{>a6+!iy6m6֔N?Ri1M?km#Ikd)G!>tccp~vbW`]y4Ok1}Jz{7!>kgUnh;גYplqC.5ppU_ѷlp8kmT<.{ 9vd67l &_RUD-d10^aIE泫Y %BX*]~#>[.$rW"!%Ld |YwGm.w{X/~|p9"̾O.22bW{Mfc6S)9ͱ]͉&E/6$"g%pO~쬉G:9:Lfvi#,vZXCNћLxZLoE_8*[ `vՂ-K#5k1tgoa&6ll*|2xzI{؍;$MIi!{/;Y;!J,_-ˣ4dY᦮:ԉrOΔRϹxvzz1;^~މ\鏣/O3 z,W[,yowBa1&>[) 38]uΛ*}it nʌa0,C@ Z)Ĵuws}QG,ܚ+ j[@YӳFcDLvO,1SF3SCʀ1I IBr9`=Z )&;Rty)0Q *s0ɉlc䘍ښ b2V6=*rFLkP`dT)Uˌe!)&N,`:9Pl/C dCrnJCݴs#j3ƀ`fwˎQB{Lh_AK3m:{M-lvCaVF0D*ԓU0/\EFGJ J5X%ڨ~*/9Đ2A9`|k Ef[1U-vqQMvapTkSOvJrp14ِ2jAOxdC0ؑ1f0Ű%Rָ79^Odrʫip_) { ;SAhYgly,O@A,1mcK{n )7k{⵶SKAZQw41zbJ2 CcQbmq ~CGoC8 6 C0W/)?~jȶHs.#(.sדZzm^3.VqP'%(`Mv]FEST,A46@lc_B2XMo=MpU"I 3gEJ<4ǙGyVmrNTRK@RtڅQM G%qm@Z0z(%j:.ɹ>K<|7՝ɝM@Js4\ [TBvJWFLcdOKD-8ci42 ES⾝*JGdv'D Yf.;UZL"f7pݽM* %c KξȨciͶF ,m6$"/zZuGlyi' /sXoj&#Z>Jan.,_?VG.Mh%CZ&S slyCJ(˟Ni-sqI:klogy%jX)^?y '_Jp  WIIX,bo{(, R;TݎwFlhOorM~$O>\C|_7WJc8{wxhKN2%`Z8VbqQ&)ȓrUka]6&/vlɼy,Y:KQsQPƾ#[o[90I/EA[cY$cM;[d-Jl9,[G91- /u>(^*~*BƬQIZq]L:Kt] U)Eq0j{ⴾoRIP~8G:$eu R+b&S UZOYg1,߈<+_пM֋$u'U&YaolpDW>]3>LMԶތntm_pן5 |vqu3L^;z{isߜrVFz"9Q%K-5|e;bu=֕o_8]ݔy \oPL󯭺ôM"~),f7cx'3ԘV(j G //wuWb K1-ΛAkFIqe$Cl//?a1?t3OLog/{>F\LJxaUBP[,>Fr+(rpߕtquߖo~<<=a@Vr,|u3_[s7F˛w`XY̆+,v|p8 ~0L_Ӛ&S,LmӾ5,ƝK@9qg,fid;`1ZZ{ hp>mI>ӼϺ,wMˍؾ^]w >[ ,yӞ@R*ad5tĵR ^Tuh/n/T2j>.hݱ:Ň`jn䩎knYh L%)[ Sjn'0G$*?g6dތ?/ͣ!6gJN4t=(wm64,M\S$q=xZ`T TJ40X2!H@T$#+8#<wG#ޙPeLTJ¸[)FXO@)+_tf/g}vz"5<=tD(rgShx=C\_QY||( V1ӝ2&;bFE_[""jhMqP{= }^ŽSc@a#eֵS`.n+bM%*bQ{DQQ9jyd գ́PF;Ow&דZxRz;@\V&ZAQJa$yBZFmJ_,x̛D-? "6Ԭ96S\r\Ġ l]eM:%xejS+Fe)Y%eTTу&+CQLXcqQ PԐt9Yq T (F1f!njթ]Y{{z {jB@؛krw=ԁm Yf")Bഒ6(e1uzۺf*[P82)"WI<&?x-',- :{"8pqcZ $LGD Jـa`XbNI9WCSaJa&}6L|r!GGA/T&ǘU Sgy^Vڵ$Rz6{/nv re~cr۸N-A4u 80(o7Em;Q[N/ļz> @k  W".wuԅ3fzΈ2 #'NYg+U* [Phm `-_'Hq;^J{gG,4ہ xa{<-VMK5-G+TQ@E`l֛Ll;dI>phIE z/(DZOr1g颴?(-VzYK}t'?}g=jv/f~i G|=Ww^L0XtX(AāEհc$4x]`U1jFf l/I3djH̥1FkIۢ!ehﹶSBBr d1~ޙJ{LFz^f=t-Toiulq @}l1֓L<Ϟ&:r 3FNfc-GW䡔`C :SKDd;);^V_adsdZNg/ɦ_5"-Z8T2fm#V۲9KUT#E&0~]Em;)6qPO"_3o<[ ZO]G_:a*bpNT0AFׯ)֬`B'.y.FGxЂ.L.eRPd5~/]@*}Lb 2;/ qF 3[߹q @3n\]oU7YIA;pa J cH#\U:@%-Xq"Yt!MT CVTkpJ0VE)K\=!He^J%CƮXcnbfs֚+-qa_epÁdM().IrZV!TE=>6ב/u\Ð (A7m窹.`)(`<  MJQ2JMH!@N6tԉ2m"%' >dOcRD|Jw&tVI^yaW"bD*FAZ1'2a"ed}/5ږΖ+4&*JU"99F TPg`yH>^/]/LTk~fxՖB>M?iEhQA& -ji-Yl[:TPW)IغK*[\0#=]6eC;kH*~$+@)3QsIFefTu PIJ`’tfsMTSDj3Fyv1PO*j-L5TTVd d,Qe')ʢ*gb)P@{t0\4c Sgy^~0n`OcKir6vrO\{6gWow6_1b//.; 'V´O:x'"=uPcY_8  &r1Dt κzW9;`fJٜ8Dh{H6L~z'g9/u#:aU7UԹl=kS.~#V~#~#f~\FXAKQ&b.HFKt\)>$&}6L|r!A2rIy69ƤxYq/w4&Vu]P=u {wghFt`d[\H.L[Iuy_sJod_md971:{[v拚J5B*6z *,U)bLmAU3r2S6Е|!KV\FmVj$P|&:CY.`zl9P XSJIHnĿSIJڐQG cM!T+Vۈ=1_U!nAU ,x>H)`T1 b/qٔ4rGRgy,F*ۈޗ\JdWb2F)H%L֠,Z [伴h jr5.fKx03kp+md%JUGW- |(Mb*P|-_c)ͧB^Vg1CĐcmd'6yIxc;xY'V uĚ)M[6*=[31 m5C Zhb4LX $ "P慼KW⼔ͳs<Apd@B>˙ xgHZ%QxeD֓MqzR&pܣ\鉛WP:D.st|TIl"5+B4*=ڝ@y>L:{>YK3pxFPąpc\ǣw`R^%Q{і'1[Lȏك0Հ&7|W0Le=8ebѻfqxZΊ?17uN1Bh 7'8Ǟӓq{訕~m,R6Oop?>ՒiX[@ᒇHX4^`B}q[/-Nr`[S#Z$c hrL\&=ޅO Npav(!>JgU11b;1#{Z{dITvP=Y/CRHθWbVk{T!N%(1;=Wp{=r!S\4=|wT&] F3]^[\-.nBw0|u01+ҲJ'.fj̳>_^>{Q]үod>ԓ&hnP1Y ǿ|4q_jGtmt ɘc ]\KPϤp؇!ơzG^1K{ΜX|}BF2=e B$shHD5MP$Fp߯'l#w. z~q6UA˴-J)X#Y_fC\YjFG'z 1ҎӟKg\—[[t 7'ܹ|fk!| >vkJSqlAjt)eSI%./{LhPO'糤g\y#E%Sۛ'åF0NMNjapI2²$x99DV6pb+I[j23aje\m]_>-<ώ:I3DgYW:L1lY1ڱ VNe<=_z~4G#){s"Y`pjB b$ 1w1`{kU˫'S2.#my'uQq ՘K uBSZ:JE䂐Z%C>T* U{@كdgOd{L`*(x!Hh`1^e^I4* AQb>cNM{+Ip˼!c Vsm>䲳t9b97:$UIFJG-ex1}#kmN# -fiO}?{W%|9E?CEg9oE9ZV˛.ʟnT}SmA zqo!UQ͟GQbo^o}뿽=yxq~ @Xj8g k+qoao{buu:+Q;58wܝ5+mtq=^OYr/o7S䁰{狈aPBkPb]q4 u! L'xUg-.o 5ǯ?54kZao]NAnMWRM Qy41a~oVى/L> z$ -j"H*98)'̾?`t+LU,R)KlxC'\GJjz-?e%uNM\Ѹ6ȭ-s{(' 4, pT|Fh{R [p2qa3)YTW,P&p)4k o2q3Xݝ4%C|gYQ̴ǰ|{@nPL&ʧM(G)")5fqYp bve)% 2W2 \ wTh8 x- i3G&KgIh(B3]36U8"HouI !{yk>npL)y`8{"DKuHiV@2(0, ["ɂ)'mIEA.E_ҧ1O=徙ťgzI ^\<#1Fs II;AX`^t9FAĨD JQ29 H[W3t>G%KxΝu+B9T_Jo s='-*2t' U4u\<61"=Qy>Ɂ~JA-ЀI M\,9Ὅh(u*(E& ڐhC2ZU!u ,P3 bQrrȐ9ρœ!i9g؎Yw !Hқ {.)Ĕ I8QgIe3Bp6)*$=AP5ǧ!r} k$$$!CB r?at(LEIBLMc ̅a96: zLp$j%3ɳp4A@q$I#qAֺSw`zKDyR&eFDmD Y穢H^XEQͱtӺev´I~_?ӽ p#$7'"cH57Fsyv+ rN5.±`XU͐#h|gᰨ oV<\LraZa݇Uisw89>6)A9h?Ś ,ÿͺop>l-,҆5!ᴜts~k|NxEIbKgfg~F֔AXY_>8߷{G_8]e.n:uA!T_E?|\[l~4q[{}W M_IҴ gs9}}Qru@l^<k,m5mN ,j7eQC,n۷YG\Qbͩ[P\gg?2XO E 4DpE"B"+!A2'jdw{' iCI9'`)TC@y JM$J#5|RE|:ƭyբWcV~1{a#S)2EjzWJe&e>X>c!*Yg2"}V$/Jk81>D} "&V)́fАSSNybA0vYkâjEo_&waHB h 5m$|&[[a;AhTQja'f}-Ŵ쐪 ۡCkԋU<Լuy˦o:9 )%BD$4o2!V`qZ_f?Xo74+ܶp`1f:?C BB8h+QSv0{$Гi kkJ` rfv  ! L7^cp8$ޙ|k \KHl}wظFF`!tDl#R#Uo$Y =eo`SZ]V0jȎخYfژl+(ri| j̎4%2yTY 4 !b*r)h\rM`܃AD@ c^ M@!'ZqCHS&U+VId#Yl w xBygڜ[oա5 o%a]4h4fZ٠ՂڎӘOsh`vuQ5}42PÓ[`L8vnUkGn2?*}:A* ,x]&'9]ttꟾPCe2ϗ =FjY͂?7 С-v17lQ'z]!r1b|Ӕuc PՂY5?O|ՀCYU KmF! FhcQ vP]oQ;ᅮ%֯DdsF֤HG,PMb $:xR1  @ ƹ?tG(аjdx3:`ث)3K3>}1<>1hk ,BCGX܇ziSy1dQε<?ԈQ«j =@1Crc]@Y饀AF[]͘BBɮK2d1a07Ӧd[϶O@~?,&ܞ `'+*QξgQi!\Π幫R;avqj.X{%nH { V/5&4ICBب=tN"`0W=6TC6Bl$v7krfm.<QuZtS@G0 bFsVkt2r%`"RIVf¼R&Nr[Sã퇮8/nڠVG?S/jЪƵ-v %3BgȒ9d\.\]6O)EY ]U bL*@[CIO*<513J {\O Lz^&7X0ĨUF-FM|Pכ2u6* գf ; M-R3`|nH$k4HZOLx:'-ŘAK=E{$XUԮvE7Ѽ 8gun(EEYEљnyrs[_?w|H%e!ڑS1Cp7yI_|QuͿ󗰱z6<-Іq^kKa`~uD;<8!K|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,a V}Iâv|Xa!˿ Q;Ň%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|X~5^ gx-e/Ňh  QŇ7a3ć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xa!I>,4a/Ƈa9(//>k*$>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć%>,aK|Xć>o h56D~}3Q_70{~q=g6_?hhnUH.ߜ߼M+glU֠WӿWզ|uoe;T|vg1ERkhX݁/Q?}Wˆ;P]/rŇ >suX>XN?Lr;}| 5FN)(:.eو0#:.?Apj*^]vޖ $z m=h#cyҢg]ޏi|Z6x1< ~JAy1|gjwK5e^?nGOJ+sZ6nn 8vϧo^]j؏)} XUfs(7Nϟq6?!3|?7?6u ھ{s~nxY8Cm?֦OA741b0 9NLD:EyyM~d{SG^)b*?HB-SI^~ < bۇPRI/j-,?z Q_~{> 1^[gwo_.ZG tAw=o|4u|u)Aώ~fk>k#TblCcoxm;Sm P91Em>Oe@M>y3z;~i&U梶o7?/_v%|q{|1s˜0r[eG%Ef:咏ԗ!9  U{EP>C󬋳JSR3/*Ұs_('t)<)+J֨XUgu~he~lwݭ&dsQq鰐=i!ދ&/Y͠j.Y nif4Eujp/TA-i7\*b3ܻy ΊSP'G =Cxxd;դ9׶ &)ȧT֔N.)TKQ4O m҄{W,#3bQ8E\,%wړ!jh1e5j<[{//@kU} Gs^?'*Yp01/ϤvYn[w5qJF cl_wwl{~5;ʹc<}NK+pe}0VC   2`x!{Lls7m;pQ!]ûYTusvx==~~9AZGQd:ux91?YMb%ʦv;|̴(5U{ΐT"pȯ"fRM{[Ybw~ou<";H4]%>x8^%e!hq汣>[˖ʎRHuY9d|KWYr}˧^]N''|D=ԣfGG;GPl@R*%R(B fךh1:zpt&4`6 Dhu`f;7ku, #jWun 1G#ƾ`60Tph:"C+jV~ӳ.:E:[%fqyUjE;4ىGx+*%;`׭i FC(%iӞcZB.2oc>ć3ny*IVV%wȒm*3<b8q6PLKfqzM*lFhrW!~Ş .J Gh S$/z1 Z輍+YHs 0J}jK7 ӵʽenΌg֜Y}*W.:ITk/ijκZZdbwJ.bh9\ܔ7Ed)a%DxsKd:SԲY4I~+<eL'ObvE|_bڔkBNLp.dQ^s\h(q uCsϩ Ԫu,\I9;BqH 2t V. 6v0J"h?EL1^8tSif%ɯӌo=ejYg{ȽqB\))l" V^7xD+ ~Mܚع= [9uyhyM` θ]#r&b.9_bvD+6e"`jljwKx+ߝ󫄿SD&[W=YKrX;d;Tg%k|Yqi^_p WP: m#o(r>.gў=A T ꞐU*PbXhHqy܄|PbJMTbGsڞJN{כȑ!UUuG;EcGr(A-AOX =S&QmHR4FG0Ѐɽ"fMZ\8~)ŦgM Tuz4ز_{dG&ɍRGIi=^sUY؞J%[9llBKdw?67mAm䨰t< s*`΍Qwkݹ߾h?hH5 "`7'~[d?ZtTK~ E|S0vz&`[N[PX|c]toX,fevCE[KwԐÌ. ]{W- ZӜϵ `8 JSM;hU阋!X{ &}=q&АZ!2UQ\:A_\vcpb)+ .^vojAqwӈ#N-L  F0zNf3W.6`Fbja~S-õMM9`Mrpl dhTk]M`럷뇞r XE]TF_aųĿEKU$> 0k\m-+ucŌyFdA Dlqk!JuSUqtOU9 N:Iu_(` _@N"钘\}X쨑\!۩~ f_4aWkp}?8/Gs?Eg&Գ_:lmLS)5R8S3et5jO]/8{C 4$0#jzp=yH xgrL5؄Ƽ{ H{p4Pv N ҫr,gLUPg4͗07K 5%oW oCU5{11la$šTǝ![cJ^sZb7e2 |Zp27vT8FR`JO9ݖhcή FgG])^%e!r<_XB?E<}ddM*q.,PZbҁ&9llG>~a"_#rƜF* zD,֯K'K“Vp:4SGsBQLonXEyOp,[8t!X(6Ƭz g,8:0޽owxh:TMGr hfPkodՊT}>R0jZ=Tna`xTz8Z }V&BŇ#Uu77WpPSKFqc|,}=?|r#?ۿt?޿S?{;w9u=|߾|~Љwʛ9h2 +lOCT pZP!׽j1+#~DW.#fT2& AʼuUGr>R:p_pΩt5*@EC.";V]UT0ؠƱ%H%$SW:6.SPJqcyѩGh]FlL Pl@pS"%Ni$|M/nm/|9Y>䛛_e]ofV^VIzpܱ1 H B|׵{B`1n.z٘v4U$4RQwɇ )1YZ'v$WMR"fRϠ]oK!7z%2c̜J6XƸsCugDkT&PuqxdkP єp-C ~O8O95*ՀDv*DϢVUYI9fH()<ё3Gg +R48q]qx>|È? ޿o[VPd9z:pŠmcJ{H_!>{,i^,Q6EJ ՟~,她t.OVOpQjX^S;<R!̋`sbR<4T~Wg`aFw;ӻN՞l*'pƱ3MG髋t:^].1%]?vԏ:& ,qLI׮_4L  %7sdVKpN<1b/b9OeGو> #a>jә!h l$Db}'t8h˂dA%R'-WF띒N;z?f@iY^ڭx%mK[ZZh<^[.U7n3N9% %&vnqu5(Q,?ϵ`8b)O{._{r1Ij αP=0cyn~ZoJy:RP`qSşPFӬgHGm.yWDnb|6u1~bRЕQ#y͢idf!4oc1b0Ѡia3JHzU!CG(8 և8Z'W^TzPa%[|+ì:mنz5`]8Z=QрWnDP\Hρ%)a\އR d31@@kIGqN.9 {g?{~As ǯe%դyClk JvՕj*ASN%xK6RժRzUEvhJUyǗKd0b1 ɶ$@ˢ! $0ςuKg[(P #CmUCG!d^92EHYR `Ll@lQG?RnPjeiqAU(iZ`f[i[L͓>{!-8K8a[6`-`=rZ %0 YDQ擷T3!#W[‘2%M%@3fL8N]ZeM(c\ \(2> N_'bKDq{N]!T)) s2;"NH 5Ezm{>!ÈCF'X/xM$v1E0=x^ÞFڥ %3=l>F)ݓt#cgz)'+"ɩӠ̘E.>mjIWRf(k(پZ Z>4+4oc$/zPXDϛ++OmQ٢O;^uqo@#@CR@좾oq#^pݴ(((k}Cgw pm9(p4F)1}T"ˡ-]ƶBڡ;qˋEݣϾoLCִò:YUY 0t$ǐ).S9χʳVٵ-[A D.FAD$hK)s9; X#٬٨L?o4('ǬAoec,IcN_$m$#ň[!@ɥ%J:-2h%[VD9ZGج x J=A;Rz\"^CuKLhxBuK8"w ]I-8vᓒESLFOQ`&>DI^-21mAs,3\dlts]S G2Zơ(¤RQ(̎vfs)e2ɄsX KܽX@X j%P[drdl V$a[ T$*2eY.L.)#)2E漝 =]܂D q)")j1ZKR$P%{?\{Y+¢S0iG)E9+\#UiΘz&EHKRQ(P 6sSZ䘍30WV:H֪eZ^ّ1c 0TB#S4ۦE2nk0.|fcX(̩Q剁@c}{ Y U[a.5V?ৎIlamC =OT ͫg|/I>5ﮆ~%F|'7 ٵ^ș NI]J3fpL)!6qf<"(DD}Rt՟1y՜Yq=q|3ݫ+δE0qA7hp 9e8EPig}4'A'Ǭ?`&ySHu) n"#,UDȲ1ހha矯.fj ë; w:i޴rmFZɢ lZ})|}pK`wU̫zjv1ٟ]GElI= ;[&sW}d6[}*]'`yo -MC| ݢ's,uB9mтR}YdtDS:A Rf̍#` ذv>4mqНONez4NOV0\riA!E- P(Q48=U˩̎١>٪WkB&k rtLK3AԮ74+(;dRۄ6H5RupkPIT9:٘&SzjPۮ^^nT=iR˛7k0i7?_1Mާx_K_&MކC]K[R5Ԭ Kq$%^XsW눹$Ic%ZGbvZ!Զ鵗G7zY1PˇSɍCWL9v%/0e9PfABQk)8q=K)P Rs)Lc x^Gl: [|-8n, QD9/|@o9VU4NR&Q^@qtk89M2O6gŴ1V=^f[R.bqfKSeP^EQ\Es 1IڢhlJW(lۊ-sfN A!qOF%뼵E v,:k(K`Ji frM|qn(YҘZ#> 7bށûIEϳq½v(I{4yt:6$N[TT/IO:M5EhA#N D=k[pE1-A{&&8xwQ~F4AD X:! dߨF2H b4i2$X PHb-IkI4e ,xm&t/sJ3vf-~@ɖɲ4"l-| H`^M15E*c@ŢRi6dfVV.壧 vg~{,=ێ Ʈ7}?a\""|R'&:}Ѳ)W 'L%cgx;l}y #هmA5RK)Qs{ȵ@6ukkҨOӘt6J1(ִ.Z޺4ayxrqyA% 耑k{q!5>gDi>0 tsi A㞜3t`e'=r0 xT#OOOһSSN̲&S6,5 I6a5o7BOԆD?:in95Jz@NZSrCPKb}3m]"B>N[}Aq$(+W#5w!eֺ ۝s%Mrz佛J<')tK1N=yǢr=6VbTsMb tHd{ċ`M_M$BzУ1s9UL,]3*.v&n搜dr:h:/'uOR(FI.joCuRg|Mkj1i[*Ь'3z:p:wT3+Iu} 8Lv9Otx2L2^kE|A?okMJPV`TKLdk'c++[R=ZUbW& "`B拯բZFطhWэbyZ+F8hdZj+;H}'򒱩7*:ȹ:KͣAtcɎiI5;92D;Rg* I@#K_Lv9OkтچݘL>^2n t#l1w{l@يHrQE`diDf)"z[mlsԈZ%f :Cnv)m]Ddnq ]3K|j:Y5b<4xOdR* 5K,m0րtΣu* BUZYCBؔ:J lCCލfL%Ռ0f.uJ47ԳńRtA=-irP@6\(2fXN̵ tYȏ6>+gǚZ \@>IȭGw`eD4Lqّ1xn^v)YiNڳ VM]8f1C z׿WSgD}ǭF-őbhW/ٟ|xm7|ѦG\~ (Vv b9idZi>8Rl9TJ'u jSo\/ylRiFy5ΜmkQc6{U>6!UBs0,qH&dUU'=< +Hikkf7UZˮyDV#\#]F5{D}(ԝ,~5b T9"g\vD. 1Qz{7r*U;҃=Y:AߝǿMPp9QxqFnOIi<׀^UG)h랒8z(}Ī59o%j̍L!{oJɭ-h5OAKRo(١{Xn-}n|j6S_J2:|ʼn~aY|?N=}Ɗeڙ,Q5GBl 6v GD[ ׭>,IV׋fƐKq{l?yW6= xWd݉=OjV輪R'߇ENC[W:5{Fک?8?цbz,,е]L#3w׾ZA_=K4R=E&Ӂ>.KɩydbJ*LXLsjR΁awi㍇г:CFDkn%JzLI{6+Ӂr =23B); x̔` V[0f9#%.)ϠZ&hl`d%F=g4F/Sc)b!pbw6RJ0ŕ26 H Y3;6䎇eA{;4?#vz q~j/ݧoi +XͯT~FS, 4rK`],\P?.^{\'xTPi-=gΡ3HF].tKhkt0 5%`3lkq{=@>k& -ٲuI݇~|*gdhjebOFdZ͔r8d>tziYrr)L'Sh$B䜱x/#QLeTT Um-#P~3K \ ;V37Nǫ_N.mn %VhSS?dJFSk83,5uQR3Hfh+9rPekEfTyA)9ۋD, , mJ[. g:r^DBL'Y6*7wN6O.>w4n@Y4F?;׾2i3L.eȧdlr f8N-c93W=:n` iXJ\5s.r#eϡV]\+#&}Zf@ʢzVKN}keGl \?? \́!dsDRt8o`ԻPܼ#peý>ݐRR$ Qr<ń3@z !քQ y{?,qw>gNi4sʎ8%RdxvKk71D;/LR,)=̩F s|A:8ggnU1%7cTgs-sLTND`M)Y6>pàێXgrҌSb&6֪xC 1LJ!Oʥ8  SDzSb-;jfPޤр{F 7k4ཌྷ1$N}4*@p=98tJ#BuنpuT7( D5P#{vX’`kP8<\C׽`TrD?oZ,5cNWGυ]qV|{vt7}tz-WmZnS +#-Wzjpth\z,,sb_-QW<ޏz=⸜ P?_.uQTc7>_C1y WT)`9Q Tl"nYWFG9mڞNފ&"R(L[HUSg9D7)U]1HHE)M8:- b7űS|ÍlJTSr7{T-N}9xӹXBe F<룏+CC5fmh7Z {\|h3A#ū]MI)1Y)\=o$QwF]&.vF~97VF^p[kN3gQ -⪊mSpo[`?9#g$8*Z2ƨkn1RyHrv·:|Js;;==_;zM`\ XȅgUQqhjHa0m_E~Xmv\ zkv{뤎ɧ ;oq[P-d0 oSO>Q)9 *Uwa tce1=#IZZ m^lBJn q1hC l< wY:C_jCfXHQ=I)w92Esz rWTMe}nثr{6L@}(f;I<+5JyHصrcy*؍]u=_g @M6\|X:a=jF~! dogUTKrpaiVPk%4*^ׯo5G&JD:ɑoNXVl)Mfذ>J["=/T"\I< @ey秗yZC<v/m> ~Mqc49Ղ6iP-]U>͢DxB^Uh/b69D~AP5dr*.jSBV3.Ҧ ͨSZÛ%>}H NS}%A['HI9Kq.Dѻ* buj;$ڰ"ZH#'83qIX< 0eQr*T:d+}FG0>-%"h<\PI4> TGiX@opNF}W\m ||sYɺºGXBifr52wfgo+˸F LBQУd\Z'ewVi{J;"4mx H5"p2LmZ聟Y`mYK&{i9S Ykm.x7-\)>W\LE!pi$i.Xruf f+䤫$a-^.L0y%.q"I!&DM/1[GW鴦(*A$]@ Ifh+dfUW1bnO(kɕ:#*@a$Y (*nrfnRWqR1%N)R$#,;%Ӻ0_,10TdQ$Ȣo(UE!V)i309i 0ZN:AyѶr-R[F-mmN.h(w5{Y~zWhOk^jt$ɤJF}X]q')7i!b$ t3 %ԦB2=dz)ntaΖH9hKsy0lp؊ @#%Tj`1!s, #`xmd^J,/os!KCqTߌ% bҲPrp힢~^:,p}`]V"T*Qv)N38K״K4AL8M[}.1b' kkC05{n}A5hK:;gRYUk堥{Z+ js$1gpF [cTU-tU }>/e)P#S)9M'8:_4/wN˜&fΛ@h61*}U=FVm:چ44z TC0;3hCfrR!?ZqPE#zpܦHt"+M8`j샻Ѵ{ $RpW2V-VZaWrJBC,1g1%.tox{`#|7D[ e rRP&vJ;pՄilj$  K_JT-V_rwGoQF*R>,WFCum4^v?^;cfCY^N)yAYk~R*ȵ-WmD~r.. ϥG ̪9^wmM#fjI pGMP$09eo}33"F d!dj5Kv"=/k_mtMCi}u2ASnu8sproǏoN\9r>>9jX~>?uNkUD.\z6/i|7cz78[.id;/q|j(J-nhμÉy \*P>}9~+ws_i=7lb|h8>͍6Yi-Zl/A&cYyXQ<˹v<먁yXhȅ|K JC9ʼn㠎s!JɽІk\.&ITdka-_-@zlNv/tkL}0V+ J{ %@P"^7TDAz%G "0,LzL),9'BfA+!-^>AK]:>=JwAH/e6 g;ϓK䪠HB9"~^JRy)z!߉à{Pd a L@[EjHI^$!V/HK'zxN3Kҡ<̔gQ;SlS R; 8';"}ӀU ם}<~H RK d͉Z 9DDj0=缛`;[d>&3!PF3e( r4(2izIVPU>Лᨢ,Ȋ2띶FDy3#!2\<%Rr n' x=1.Jafu ^%K[:`,@3BL#m"M lIv[c`ڞч04ξ~|ޙ: QăEyp-Z ̃I#hC9ls/DŽ9d 2ktBP?-zcZ쎳j5hl>֟en`B{o,ej@G#ؤ{4B۞0{5 \\%; :9. Lo\p 5vw~=(Ay95;9'Ŕ(oK[4NOSk)`k+L24 Ptʳ4or;VlK3?oK:p7Vx=phx[B;{Y ,~~,S:q+w"(y=Tqů#*wC^n,L*׫KPr)>.eo6C 1ϝ"pRvۜmmwL*fj&82dȨ eFeD2#"oI"o7_;(?s4"թl_hCiK gEn(Jr0*GF^#oU,|Qj, ׁ̂)jbMϑކs sl1k ?O~M j`>3m0dXmBi8_uk(x o K$.x&h!whQ dv gd_` \b%  x+Kw yFTadZ}42*Ek|t)"`. dAAR ia][corl cT2o!\◤ Y(&e}`J g4kj[1 ֽ HzI^G3";$LK’RFg%繀aż41e,j=D!Y0N?{Vd?MHbvwX`/4(&vl$oQGK_Y:lIWQ:Uqb[ݚs)ؕdug[{nӶKÓ¤C"KT\.RkLD"GB%2lJ_ A){faC 6W * !3K%WК ֔qz`a9O JDlqTy͞]^oG'f /v/ڤ!RR++.\nټ-8Y+Xqiag$DW< Uv~zbހF'M1:">B&W`ozAB=Iߧn:+z3IF}9q_wEh$Z~0BkG`kC Jd͚I[ $4VSFgsPBd ihfl,ᤤ9u2_RQ Cm9iTyl<&?vRv@n\6=J!ClV@PKHHMkRZ`)CA0"6)-QwڹƠqWenFܖpP6M `8ieBP)DA0Fdsf(`wY#YKQ+R9r@4A%jXKӥ%yU0T(j/՚)w6 b^7xtzٺJS/]~"mqӉ #${ԏZv~`G@]l_ЃCϕ@\[ԶK 8 5\m;*4r_m\ኍfNh\ceF'APc"#骷 }89Ũ^憈uzlHV%BpZŞw5Z: `njm]A^jrYl*oMزKX{H1UWX7j[ 5B@'|'fN\,rWx@%DY8{Ĥ>\kdM1pR^ky?b)cCZ;3b=a c=fe|Hp͗0:R\U`J*thuV!qV5z|8zC,*=S\&(I7Ԍ7>R\J2D аTbʱ=aKs ܈"]Գ!8DJ EA)h*] xxJWU? ץ֖{EWJЄML+n$"kՔج#&yyʱ>HS~}eYf{ /AIs:')MSJ-"gRlQ;5QO -g&aKjL_}t\7*T a!YiYA({m)gNrF]3d2h,S[VZSb/`cޘhŞG ֬U kf³ISƧ ,g|h/N :ezugUUS%j$x"Qů˯Xxql)ƊTF@囮WF $  =|9PFQГQLS %4n KqOl?rYe:2VۍP`cj>:]ZML[a4QuŢ`X=,@%M?W#H,ҢIal˖ci5W&{BΡs1)Jѓg$OFgpJOc8.\r-8Wu||RD+gHʄ?eoQO]ߧ%s u\l?^=x綌yw֩So|vO֠{A8ɔ/;\N]\;{Osa%:sa&":Z#=tYO~oҲOg]$V(jMZۜg\/O}̩Xv*YwQd6ڗGw$ n:'ދGA|srSd'x¢Nש0ߜct:ELV}lm`NWD'np\{=pY!*#eɢB j/оk#k8PCYЧi:OFΧSj VYn-mHI4E zMfן*I3TePl!?:On _n__mZ)2}9=}b -,<=:EX^r_\i<4F 9SU峘|Q@ڋs*Id&y3UbVn<"`zY>+m~=\tϣǮHUk'LO)Edesfa߫˟?}ɞSȠn -XykcM4wk[MKuH3&vK$m(Ph 2Q`[0f7,W{ )cԲ-ǸFM5U*PRP+'Ǎ$NFJ&|.̧gYT\"MN(&ԄM9T :ܓuN4 Z7.y\xb`Ruو =SWvXɎ>1D# Ofz>\{HQ_PHOI953hlTXXPJAZ~p:z޹]쑍@r*"fbVFlBlY/ 4)NN,2; ܏x[7":D~9ӪE5]KǴ#<Ele##yNtd#[]7Eٞo oxv7nɟ ~,*v~%SV(L5lmVEfʾg_;}{ً7C4IMd|19S-SĖ* 2c[鱳 {nΖ,wV45Xk Pn,={DtESձu2U 瞈H6kOd>4ybMݓY'SXu`vm,M~9,>*=x5hGjqi%Y*t6 c`~LXӝ i+ĺ2DAI[1‚Ks%"Zzk 0n]I(Np"*Ȟr|o uYRa`3KgیN[낵>\Ё4mVJK:R88|H˖}<qNOђ- ٲ+YEc"W-y 2ΖBջ }ru6(I!!2: C )lt^y6dV >W<=ar ~ 'f/i.TXc|c*p{mudQEԠk@M ~玹>=?=:}Mb%C+)tخLğ)KHK̵>H۪z&=BYʉ-DF>_14i]3KH4옧hx$!QIe,Yތ}.IDMa+ks6]}L$ 5Pl9eιcFtYs 6g`+hEԝ fuM=&``%ʲ(Aј|09ex)m 9qFdMҩ ,-6𬥵ޠ’Y(1l Ok)͜uM$Y=<Ԝr)D#BYZE텈$x3OĤy"hMOc>wZp 1 \x̞LEZ*L`JxN-FPMg!ӽ =nGm BIk46EEIpe <\ -r*mU4N,iD먄d(%TzYrYV' K7mF{NݯZ;1tҭͿ~;gpppaw܎a(ҙy_ÊJ}zԤpxHAGݘA3'tYپ+cJH'S\:޼{7CX4?鬚#Y5g[{_E}Ԝ .+X^6d, HB! ddJH<(VJ+ V²–BglK_ &w[X,A㜒-&m4 95"D$J΢|LkE(4^;ӣ 6Po,$76ΕrHܑyKn̯V?i_̔ w> .?'>hO1h(t}UAxJT\v3Y~XOx yό1UJvVS)UoJl$l49DV %qvR=z-ͫb8lY ˍwN~x }YN~1Sp%C૔|$L(&-nlgv`r1XjzKwN E&%ʛ^~ L_,uulzZ;8ݛ.f3΋op֣Uv훦?={z{Q<Nvi'4?_KN}oλO{)_]i'B2kS}fwECG`OH Vܹo?Lf7Ǵݣ߽a3k[kf#+XYNG'+;c`Y _ 3s?:"UO^|?'=8GB?:X|uSz |K|`.]W\筣*j_aнKɊzΜ`\>" UXAg {U\duGV᠝ƬQkhJR_O7]s ^ >;(akr-A="N #,Yyc[eW=N{5(ky+/}7#݆o?Švxjͤҥ3K2+[)ބ 2f) Rso/'sK)!~jxiRbrׅ[סH[#RX 06 F YR<않:+VWh]6gu}]"2u<էؠqM>:N+)p8܇qu5]ց (5{ 1)"q͠} Hs^piKR ADj2I +60r.)Xn:zk13Ǵ ǥ0cIA#wJ\rB "b4y.brVX>&DxXDRT1 ;bHhj`DQS!cZ @QN(A8V5\\Y^DBXE"k|WX#P"=P6P7t i$LҰ7dUjJU('Q #bLNԢMcU%G:1 uڼVD]Ɔ~ETO4Vg/-up.Q4QuB!P $q] I5/#J